target-arm queue:

*  hw/gpio/pl061: Declare pullups/pulldowns as 8-bit types
  * docs/system/arm/virt: Document user-creatable SMMUv3
  * docs/system/security: Restrict "virtualization use case" to specific machines
  * target/arm: Add assert to arm_to_core_mmu_idx()
  * hw/arm/virt: remove deprecated virt-4.1 and virt-4.2 machine types
  * hvf: Refactorings and cleanups
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmkFAKcZHHBldGVyLm1h
 eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3oSZD/0ekFlrMRFZCYg7ie9t/Cgz
 7OBZGjK+WfuKsD9odYesZzxJ+aPMBQHu6l/44cYaqf+NTRM2hI9ZeaV9e4fXPG0e
 fYImjYMLKPHj4UTam42uN0btl3poq+oaVPKqDPovy+9E09NctO4fmTl7Zys6pH/1
 EwznCk1x3+JLW0xPXXEvfTniB1nB+hvKA/n7NS0qe6n2ddenhQzG8DpdnGEGB+75
 whMwhE/UJ5Y8rP6/Nfc8XqzgU6fmEpPsDRHjDCULy/CiGCV6k8/C8J94UTf2SExh
 iiMLySUb2Rv6qIL2nJX2+xup79UB7umxxoIL0eeN1U/M1L7zMB64rlcU/cym2I40
 mAFuW2qzdsADnpRP8d4KTMJQmFxtZuKuxpkapvIFuusiKq5vBwTxfzyLWdM6nPI9
 7tbKImzLxC1mnOAT0QeZYhLrWMZgQi3tBcS852JAXpiW1eT7SWsl59bKNgCVzI7r
 malptTniE1G+F4VWlghApLympBhNMMaFBfY4XBQ+VxEu+JNhO+MQlJhcLVbqX+oY
 m2OQhPHRv2YUM2VGv40JuzaUE1cXHXNsC7s9hHsB/3UwIp3fXOsdGuq6KviHdcbP
 moQn3M8S/vdFB+1spkhVxS7xgIZJo9f2kaTe9VlpEY7/k5n36BTsxPN6Uae2gIVq
 w4qzOjXFEyeIxLLKQZqyZg==
 =9IV+
 -----END PGP SIGNATURE-----

Merge tag 'pull-target-arm-20251031' of https://gitlab.com/pm215/qemu into staging

target-arm queue:
 *  hw/gpio/pl061: Declare pullups/pulldowns as 8-bit types
 * docs/system/arm/virt: Document user-creatable SMMUv3
 * docs/system/security: Restrict "virtualization use case" to specific machines
 * target/arm: Add assert to arm_to_core_mmu_idx()
 * hw/arm/virt: remove deprecated virt-4.1 and virt-4.2 machine types
 * hvf: Refactorings and cleanups

# -----BEGIN PGP SIGNATURE-----
#
# iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmkFAKcZHHBldGVyLm1h
# eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3oSZD/0ekFlrMRFZCYg7ie9t/Cgz
# 7OBZGjK+WfuKsD9odYesZzxJ+aPMBQHu6l/44cYaqf+NTRM2hI9ZeaV9e4fXPG0e
# fYImjYMLKPHj4UTam42uN0btl3poq+oaVPKqDPovy+9E09NctO4fmTl7Zys6pH/1
# EwznCk1x3+JLW0xPXXEvfTniB1nB+hvKA/n7NS0qe6n2ddenhQzG8DpdnGEGB+75
# whMwhE/UJ5Y8rP6/Nfc8XqzgU6fmEpPsDRHjDCULy/CiGCV6k8/C8J94UTf2SExh
# iiMLySUb2Rv6qIL2nJX2+xup79UB7umxxoIL0eeN1U/M1L7zMB64rlcU/cym2I40
# mAFuW2qzdsADnpRP8d4KTMJQmFxtZuKuxpkapvIFuusiKq5vBwTxfzyLWdM6nPI9
# 7tbKImzLxC1mnOAT0QeZYhLrWMZgQi3tBcS852JAXpiW1eT7SWsl59bKNgCVzI7r
# malptTniE1G+F4VWlghApLympBhNMMaFBfY4XBQ+VxEu+JNhO+MQlJhcLVbqX+oY
# m2OQhPHRv2YUM2VGv40JuzaUE1cXHXNsC7s9hHsB/3UwIp3fXOsdGuq6KviHdcbP
# moQn3M8S/vdFB+1spkhVxS7xgIZJo9f2kaTe9VlpEY7/k5n36BTsxPN6Uae2gIVq
# w4qzOjXFEyeIxLLKQZqyZg==
# =9IV+
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 31 Oct 2025 07:32:07 PM CET
# gpg:                using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg:                issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [unknown]
# gpg:                 aka "Peter Maydell <pmaydell@gmail.com>" [unknown]
# gpg:                 aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [unknown]
# gpg:                 aka "Peter Maydell <peter@archaic.org.uk>" [unknown]
# gpg: WARNING: The key's User ID is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83  15CF 3C25 25ED 1436 0CDE

* tag 'pull-target-arm-20251031' of https://gitlab.com/pm215/qemu: (38 commits)
  accel/hvf: Trace prefetch abort
  target/arm/hvf/hvf: Document $pc adjustment in HVF & SMC
  target/arm: Share ARM_PSCI_CALL trace event between TCG and HVF
  target/arm: Re-use arm_is_psci_call() in HVF
  target/arm/hvf: Rename 'vgic' -> 'emu_reginfo' in trace events
  target/arm: Rename init_cpreg_list() -> arm_init_cpreg_list()
  accel/hvf: Restrict ARM specific fields of AccelCPUState
  target/arm: Call aarch64_add_pauth_properties() once in host_initfn()
  accel/hvf: Guard hv_vcpu_run() between cpu_exec_start/end() calls
  cpus: Trace cpu_exec_start() and cpu_exec_end() calls
  target/arm/hvf: Keep calling hv_vcpu_run() in loop
  target/arm/hvf: Factor hvf_handle_vmexit() out
  target/i386/hvf: Factor hvf_handle_vmexit() out
  target/arm/hvf: Factor hvf_handle_exception() out
  target/arm/hvf: switch hvf_arm_get_host_cpu_features to not create a vCPU
  target/arm/hvf: Simplify hvf_arm_get_host_cpu_features()
  target/arm/hvf: Hardcode Apple MIDR
  accel/hvf: Implement hvf_arch_vcpu_destroy()
  target/arm/hvf: Mention hvf_inject_interrupts() must run on vCPU thread
  accel/hvf: Mention hvf_arch_update_guest_debug() must run on vCPU
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2025-11-01 10:52:48 +01:00
commit 53b41bb789
20 changed files with 511 additions and 399 deletions

View file

@ -81,7 +81,7 @@ hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
if (!cpu->vcpu_dirty) {
hvf_get_registers(cpu);
hvf_arch_get_registers(cpu);
cpu->vcpu_dirty = true;
}
}
@ -194,7 +194,7 @@ static void *hvf_cpu_thread_fn(void *arg)
do {
qemu_process_cpu_events(cpu);
if (cpu_can_run(cpu)) {
r = hvf_vcpu_exec(cpu);
r = hvf_arch_vcpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
}

View file

@ -249,6 +249,8 @@ void end_exclusive(void)
/* Wait for exclusive ops to finish, and begin cpu execution. */
void cpu_exec_start(CPUState *cpu)
{
trace_cpu_exec_start(cpu->cpu_index);
qatomic_set(&cpu->running, true);
/* Write cpu->running before reading pending_cpus. */
@ -319,6 +321,7 @@ void cpu_exec_end(CPUState *cpu)
}
}
}
trace_cpu_exec_end(cpu->cpu_index);
}
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,

View file

@ -37,7 +37,8 @@ The virt board supports:
- An RTC
- The fw_cfg device that allows a guest to obtain data from QEMU
- A PL061 GPIO controller
- An optional SMMUv3 IOMMU
- An optional machine-wide SMMUv3 IOMMU
- User-creatable SMMUv3 devices (see below for example)
- hotpluggable DIMMs
- hotpluggable NVDIMMs
- An MSI controller (GICv2M or ITS). GICv2M is selected by default along
@ -176,7 +177,7 @@ iommu
``none``
Don't create an IOMMU (the default)
``smmuv3``
Create an SMMUv3
Create a machine-wide SMMUv3.
default-bus-bypass-iommu
Set ``on``/``off`` to enable/disable `bypass_iommu
@ -219,6 +220,36 @@ x-oem-table-id
Set string (up to 8 bytes) to override the default value of field OEM Table ID
in ACPI table header.
SMMU configuration
""""""""""""""""""
Machine-wide SMMUv3 IOMMU
Setting the machine-specific option ``iommu=smmuv3`` causes QEMU to
create a single, machine-wide SMMUv3 instance that applies to all
devices in the PCIe topology.
For information about selectively bypassing devices, refer to
``docs/bypass-iommu.txt``.
User-creatable SMMUv3 devices
You can use the ``-device arm-smmuv3`` option to create multiple
user-defined SMMUv3 devices, each associated with a separate PCIe
root complex. This is only permitted if the machine-wide SMMUv3
(``iommu=smmuv3``) option is not used. Each ``arm-smmuv3`` device
uses the ``primary-bus`` sub-option to specify which PCIe root
complex it is associated with.
This model is useful when you want to mirror a host configuration where
each NUMA node typically has its own SMMU, allowing the VM topology to
align more closely with the hosts hardware layout.
Example::
-device arm-smmuv3,primary-bus=pcie.0,id=smmuv3.0
...
-device pxb-pcie,id=pcie.1,numa_node=1
-device arm-smmuv3,primary-bus=pcie.1,id=smmuv3.1
Linux guest kernel configuration
""""""""""""""""""""""""""""""""

View file

@ -35,6 +35,32 @@ malicious:
Bugs affecting these entities are evaluated on whether they can cause damage in
real-world use cases and treated as security bugs if this is the case.
To be covered by this security support policy you must:
- use a virtualization accelerator like KVM or HVF
- use one of the machine types listed below
It may be possible to use other machine types with a virtualization
accelerator to provide improved performance with a trusted guest
workload, but any machine type not listed here should not be
considered to be providing guest isolation or security guarantees,
and falls under the "non-virtualization use case".
Supported machine types for the virtualization use case, by target architecture:
aarch64
``virt``
i386, x86_64
``microvm``, ``xenfv``, ``xenpv``, ``xenpvh``, ``pc``, ``q35``
s390x
``s390-ccw-virtio``
loongarch64:
``virt``
ppc64:
``pseries``
riscv32, riscv64:
``virt``
Non-virtualization Use Case
'''''''''''''''''''''''''''

View file

@ -1136,8 +1136,8 @@ static void create_gpio_devices(const VirtMachineState *vms, int gpio,
pl061_dev = qdev_new("pl061");
/* Pull lines down to 0 if not driven by the PL061 */
qdev_prop_set_uint32(pl061_dev, "pullups", 0);
qdev_prop_set_uint32(pl061_dev, "pulldowns", 0xff);
qdev_prop_set_uint8(pl061_dev, "pullups", 0);
qdev_prop_set_uint8(pl061_dev, "pulldowns", 0xff);
s = SYS_BUS_DEVICE(pl061_dev);
sysbus_realize_and_unref(s, &error_fatal);
memory_region_add_subregion(mem, base, sysbus_mmio_get_region(s, 0));
@ -2213,7 +2213,6 @@ static void machvirt_init(MachineState *machine)
int n, virt_max_cpus;
bool firmware_loaded;
bool aarch64 = true;
bool has_ged = !vmc->no_ged;
unsigned int smp_cpus = machine->smp.cpus;
unsigned int max_cpus = machine->smp.max_cpus;
@ -2366,11 +2365,6 @@ static void machvirt_init(MachineState *machine)
object_property_set_bool(cpuobj, "has_el2", false, NULL);
}
if (vmc->kvm_no_adjvtime &&
object_property_find(cpuobj, "kvm-no-adjvtime")) {
object_property_set_bool(cpuobj, "kvm-no-adjvtime", true, NULL);
}
if (vmc->no_kvm_steal_time &&
object_property_find(cpuobj, "kvm-steal-time")) {
object_property_set_bool(cpuobj, "kvm-steal-time", false, NULL);
@ -2515,7 +2509,7 @@ static void machvirt_init(MachineState *machine)
create_pcie(vms);
create_cxl_host_reg_region(vms);
if (has_ged && aarch64 && firmware_loaded && virt_is_acpi_enabled(vms)) {
if (aarch64 && firmware_loaded && virt_is_acpi_enabled(vms)) {
vms->acpi_dev = create_acpi_ged(vms);
vms->generic_error_notifier.notify = virt_generic_error_req;
notifier_list_add(&acpi_generic_error_notifiers,
@ -3701,24 +3695,3 @@ static void virt_machine_5_0_options(MachineClass *mc)
mc->auto_enable_numa_with_memdev = false;
}
DEFINE_VIRT_MACHINE(5, 0)
static void virt_machine_4_2_options(MachineClass *mc)
{
VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
virt_machine_5_0_options(mc);
compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len);
vmc->kvm_no_adjvtime = true;
}
DEFINE_VIRT_MACHINE(4, 2)
static void virt_machine_4_1_options(MachineClass *mc)
{
VirtMachineClass *vmc = VIRT_MACHINE_CLASS(OBJECT_CLASS(mc));
virt_machine_4_2_options(mc);
compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len);
vmc->no_ged = true;
mc->auto_enable_numa_with_memhp = false;
}
DEFINE_VIRT_MACHINE(4, 1)

View file

@ -79,8 +79,8 @@ struct PL061State {
qemu_irq out[N_GPIOS];
const unsigned char *id;
/* Properties, for non-Luminary PL061 */
uint32_t pullups;
uint32_t pulldowns;
uint8_t pullups;
uint8_t pulldowns;
};
static const VMStateDescription vmstate_pl061 = {
@ -547,14 +547,6 @@ static void pl061_realize(DeviceState *dev, Error **errp)
{
PL061State *s = PL061(dev);
if (s->pullups > 0xff) {
error_setg(errp, "pullups property must be between 0 and 0xff");
return;
}
if (s->pulldowns > 0xff) {
error_setg(errp, "pulldowns property must be between 0 and 0xff");
return;
}
if (s->pullups & s->pulldowns) {
error_setg(errp, "no bit may be set both in pullups and pulldowns");
return;
@ -562,8 +554,8 @@ static void pl061_realize(DeviceState *dev, Error **errp)
}
static const Property pl061_props[] = {
DEFINE_PROP_UINT32("pullups", PL061State, pullups, 0xff),
DEFINE_PROP_UINT32("pulldowns", PL061State, pulldowns, 0x0),
DEFINE_PROP_UINT8("pullups", PL061State, pullups, 0xff),
DEFINE_PROP_UINT8("pulldowns", PL061State, pulldowns, 0x0),
};
static void pl061_class_init(ObjectClass *klass, const void *data)

View file

@ -326,8 +326,8 @@ static void create_gpio_devices(const VMAppleMachineState *vms, int gpio,
pl061_dev = qdev_new("pl061");
/* Pull lines down to 0 if not driven by the PL061 */
qdev_prop_set_uint32(pl061_dev, "pullups", 0);
qdev_prop_set_uint32(pl061_dev, "pulldowns", 0xff);
qdev_prop_set_uint8(pl061_dev, "pullups", 0);
qdev_prop_set_uint8(pl061_dev, "pulldowns", 0xff);
s = SYS_BUS_DEVICE(pl061_dev);
sysbus_realize_and_unref(s, &error_fatal);
memory_region_add_subregion(mem, base, sysbus_mmio_get_region(s, 0));

View file

@ -123,8 +123,6 @@ struct VirtMachineClass {
MachineClass parent;
bool no_tcg_its;
bool no_highmem_compact;
bool no_ged; /* Machines < 4.2 have no support for ACPI GED device */
bool kvm_no_adjvtime;
bool no_kvm_steal_time;
bool acpi_expose_flash;
bool no_secure_gpio;

View file

@ -59,10 +59,12 @@ extern HVFState *hvf_state;
struct AccelCPUState {
hvf_vcpuid fd;
void *exit;
#ifdef __aarch64__
hv_vcpu_exit_t *exit;
bool vtimer_masked;
sigset_t unblock_ipi_mask;
bool guest_debug_enabled;
#endif
};
void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
@ -71,14 +73,22 @@ void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
const char *hvf_return_string(hv_return_t ret);
int hvf_arch_init(void);
hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range);
int hvf_arch_init_vcpu(CPUState *cpu);
void hvf_arch_vcpu_destroy(CPUState *cpu);
int hvf_vcpu_exec(CPUState *);
hvf_slot *hvf_find_overlap_slot(uint64_t, uint64_t);
int hvf_put_registers(CPUState *);
int hvf_get_registers(CPUState *);
void hvf_kick_vcpu_thread(CPUState *cpu);
/* Must be called by the owning thread */
int hvf_arch_init_vcpu(CPUState *cpu);
/* Must be called by the owning thread */
void hvf_arch_vcpu_destroy(CPUState *cpu);
/* Must be called by the owning thread */
int hvf_arch_vcpu_exec(CPUState *);
/* Must be called by the owning thread */
int hvf_arch_put_registers(CPUState *);
/* Must be called by the owning thread */
int hvf_arch_get_registers(CPUState *);
/* Must be called by the owning thread */
void hvf_arch_update_guest_debug(CPUState *cpu);
struct hvf_sw_breakpoint {
vaddr pc;
vaddr saved_insn;
@ -104,7 +114,6 @@ void hvf_arch_remove_all_hw_breakpoints(void);
* handled by calling down to hvf_arch_update_guest_debug.
*/
int hvf_update_guest_debug(CPUState *cpu);
void hvf_arch_update_guest_debug(CPUState *cpu);
/*
* Return whether the guest supports debugging.

View file

@ -2141,7 +2141,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
arm_cpu_register_gdb_regs_for_features(cpu);
arm_cpu_register_gdb_commands(cpu);
init_cpreg_list(cpu);
arm_init_cpreg_list(cpu);
#ifndef CONFIG_USER_ONLY
MachineState *ms = MACHINE(qdev_get_machine());

View file

@ -762,20 +762,20 @@ static void aarch64_a53_initfn(Object *obj)
static void aarch64_host_initfn(Object *obj)
{
#if defined(CONFIG_KVM)
ARMCPU *cpu = ARM_CPU(obj);
#if defined(CONFIG_KVM)
kvm_arm_set_cpu_features_from_host(cpu);
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
aarch64_add_sve_properties(obj);
aarch64_add_pauth_properties(obj);
}
#elif defined(CONFIG_HVF)
ARMCPU *cpu = ARM_CPU(obj);
hvf_arm_set_cpu_features_from_host(cpu);
aarch64_add_pauth_properties(obj);
#else
g_assert_not_reached();
#endif
if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
aarch64_add_pauth_properties(obj);
}
}
static void aarch64_max_initfn(Object *obj)

View file

@ -253,7 +253,7 @@ static void count_cpreg(gpointer key, gpointer value, gpointer opaque)
}
}
void init_cpreg_list(ARMCPU *cpu)
void arm_init_cpreg_list(ARMCPU *cpu)
{
/*
* Initialise the cpreg_tuples[] array based on the cp_regs hash.

View file

@ -34,6 +34,7 @@
#include "target/arm/internals.h"
#include "target/arm/multiprocessing.h"
#include "target/arm/gtimer.h"
#include "target/arm/trace.h"
#include "trace.h"
#include "migration/vmstate.h"
@ -150,6 +151,8 @@ void hvf_arm_init_debug(void)
max_hw_wps = hvf_arm_num_wrps(config);
hw_watchpoints =
g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps);
os_release(config);
}
#define SYSREG_OP0_SHIFT 20
@ -422,7 +425,7 @@ static const hv_sys_reg_t hvf_sreg_list[] = {
#undef DEF_SYSREG
int hvf_get_registers(CPUState *cpu)
int hvf_arch_get_registers(CPUState *cpu)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
@ -562,7 +565,7 @@ int hvf_get_registers(CPUState *cpu)
return 0;
}
int hvf_put_registers(CPUState *cpu)
int hvf_arch_put_registers(CPUState *cpu)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
@ -687,14 +690,16 @@ int hvf_put_registers(CPUState *cpu)
return 0;
}
/* Must be called by the owning thread */
static void flush_cpu_state(CPUState *cpu)
{
if (cpu->vcpu_dirty) {
hvf_put_registers(cpu);
hvf_arch_put_registers(cpu);
cpu->vcpu_dirty = false;
}
}
/* Must be called by the owning thread */
static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
{
hv_return_t r;
@ -707,6 +712,7 @@ static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
}
}
/* Must be called by the owning thread */
static uint64_t hvf_get_reg(CPUState *cpu, int rt)
{
uint64_t val = 0;
@ -738,26 +744,26 @@ static void clamp_id_aa64mmfr0_parange_to_ipa_size(ARMISARegisters *isar)
static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
{
ARMISARegisters host_isar = {};
const struct isar_regs {
int reg;
uint64_t *val;
static const struct isar_regs {
hv_feature_reg_t reg;
ARMIDRegisterIdx index;
} regs[] = {
{ HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.idregs[ID_AA64PFR0_EL1_IDX] },
{ HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.idregs[ID_AA64PFR1_EL1_IDX] },
{ HV_FEATURE_REG_ID_AA64PFR0_EL1, ID_AA64PFR0_EL1_IDX },
{ HV_FEATURE_REG_ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_IDX },
/* Add ID_AA64PFR2_EL1 here when HVF supports it */
{ HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.idregs[ID_AA64DFR0_EL1_IDX] },
{ HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.idregs[ID_AA64DFR1_EL1_IDX] },
{ HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.idregs[ID_AA64ISAR0_EL1_IDX] },
{ HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.idregs[ID_AA64ISAR1_EL1_IDX] },
{ HV_FEATURE_REG_ID_AA64DFR0_EL1, ID_AA64DFR0_EL1_IDX },
{ HV_FEATURE_REG_ID_AA64DFR1_EL1, ID_AA64DFR1_EL1_IDX },
{ HV_FEATURE_REG_ID_AA64ISAR0_EL1, ID_AA64ISAR0_EL1_IDX },
{ HV_FEATURE_REG_ID_AA64ISAR1_EL1, ID_AA64ISAR1_EL1_IDX },
/* Add ID_AA64ISAR2_EL1 here when HVF supports it */
{ HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.idregs[ID_AA64MMFR0_EL1_IDX] },
{ HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.idregs[ID_AA64MMFR1_EL1_IDX] },
{ HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.idregs[ID_AA64MMFR2_EL1_IDX] },
{ HV_FEATURE_REG_ID_AA64MMFR0_EL1, ID_AA64MMFR0_EL1_IDX },
{ HV_FEATURE_REG_ID_AA64MMFR1_EL1, ID_AA64MMFR1_EL1_IDX },
{ HV_FEATURE_REG_ID_AA64MMFR2_EL1, ID_AA64MMFR2_EL1_IDX },
/* Add ID_AA64MMFR3_EL1 here when HVF supports it */
};
hv_vcpu_t fd;
hv_return_t r = HV_SUCCESS;
hv_vcpu_exit_t *exit;
hv_vcpu_config_t config = hv_vcpu_config_create();
uint64_t t;
int i;
ahcf->dtb_compatible = "arm,armv8";
@ -767,17 +773,22 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
(1ULL << ARM_FEATURE_PMU) |
(1ULL << ARM_FEATURE_GENERIC_TIMER);
/* We set up a small vcpu to extract host registers */
if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) {
return false;
}
for (i = 0; i < ARRAY_SIZE(regs); i++) {
r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val);
r |= hv_vcpu_config_get_feature_reg(config, regs[i].reg,
&host_isar.idregs[regs[i].index]);
}
r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr);
r |= hv_vcpu_destroy(fd);
os_release(config);
/*
* Hardcode MIDR because Apple deliberately doesn't expose a divergent
* MIDR across systems.
*/
t = FIELD_DP64(0, MIDR_EL1, IMPLEMENTER, 0x61); /* Apple */
t = FIELD_DP64(t, MIDR_EL1, ARCHITECTURE, 0xf); /* v7 or later */
t = FIELD_DP64(t, MIDR_EL1, PARTNUM, 0);
t = FIELD_DP64(t, MIDR_EL1, VARIANT, 0);
t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
ahcf->midr = t;
clamp_id_aa64mmfr0_parange_to_ipa_size(&host_isar);
@ -863,6 +874,10 @@ void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu)
void hvf_arch_vcpu_destroy(CPUState *cpu)
{
hv_return_t ret;
ret = hv_vcpu_destroy(cpu->accel->fd);
assert_hvf_ok(ret);
}
hv_return_t hvf_arch_vm_create(MachineState *ms, uint32_t pa_range)
@ -961,8 +976,11 @@ int hvf_arch_init_vcpu(CPUState *cpu)
void hvf_kick_vcpu_thread(CPUState *cpu)
{
hv_return_t ret;
trace_hvf_kick_vcpu_thread(cpu->cpu_index, cpu->stop);
cpus_kick_thread(cpu);
hv_vcpus_exit(&cpu->accel->fd, 1);
ret = hv_vcpus_exit(&cpu->accel->fd, 1);
assert_hvf_ok(ret);
}
static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
@ -1008,7 +1026,7 @@ static bool hvf_handle_psci_call(CPUState *cpu)
int target_el = 1;
int32_t ret = 0;
trace_hvf_psci_call(param[0], param[1], param[2], param[3],
trace_arm_psci_call(param[0], param[1], param[2], param[3],
arm_cpu_mp_affinity(arm_cpu));
switch (param[0]) {
@ -1132,7 +1150,8 @@ static uint32_t hvf_reg2cp_reg(uint32_t reg)
(reg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK);
}
static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
static bool hvf_sysreg_read_cp(CPUState *cpu, const char *cpname,
uint32_t reg, uint64_t *val)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
@ -1155,7 +1174,7 @@ static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val)
} else {
*val = raw_read(env, ri);
}
trace_hvf_vgic_read(ri->name, *val);
trace_hvf_emu_reginfo_read(cpname, ri->name, *val);
return true;
}
@ -1244,7 +1263,7 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val)
case SYSREG_ICC_SRE_EL1:
case SYSREG_ICC_CTLR_EL1:
/* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
if (hvf_sysreg_read_cp(cpu, reg, val)) {
if (hvf_sysreg_read_cp(cpu, "GICv3", reg, val)) {
return 0;
}
break;
@ -1415,7 +1434,8 @@ static void pmswinc_write(CPUARMState *env, uint64_t value)
}
}
static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
static bool hvf_sysreg_write_cp(CPUState *cpu, const char *cpname,
uint32_t reg, uint64_t val)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
@ -1438,7 +1458,7 @@ static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val)
raw_write(env, ri, val);
}
trace_hvf_vgic_write(ri->name, val);
trace_hvf_emu_reginfo_write(cpname, ri->name, val);
return true;
}
@ -1564,7 +1584,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
case SYSREG_ICC_SGI1R_EL1:
case SYSREG_ICC_SRE_EL1:
/* Call the TCG sysreg handler. This is only safe for GICv3 regs. */
if (hvf_sysreg_write_cp(cpu, reg, val)) {
if (hvf_sysreg_write_cp(cpu, "GICv3", reg, val)) {
return 0;
}
break;
@ -1656,6 +1676,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
return 1;
}
/* Must be called by the owning thread */
static int hvf_inject_interrupts(CPUState *cpu)
{
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_FIQ)) {
@ -1756,6 +1777,7 @@ static void hvf_wfi(CPUState *cpu)
hvf_wait_for_ipi(cpu, &ts);
}
/* Must be called by the owning thread */
static void hvf_sync_vtimer(CPUState *cpu)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
@ -1777,66 +1799,21 @@ static void hvf_sync_vtimer(CPUState *cpu)
if (!irq_state) {
/* Timer no longer asserting, we can unmask it */
hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
r = hv_vcpu_set_vtimer_mask(cpu->accel->fd, false);
assert_hvf_ok(r);
cpu->accel->vtimer_masked = false;
}
}
int hvf_vcpu_exec(CPUState *cpu)
static int hvf_handle_exception(CPUState *cpu, hv_vcpu_exit_exception_t *excp)
{
ARMCPU *arm_cpu = ARM_CPU(cpu);
CPUARMState *env = &arm_cpu->env;
int ret;
hv_vcpu_exit_t *hvf_exit = cpu->accel->exit;
hv_return_t r;
bool advance_pc = false;
if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) &&
hvf_inject_interrupts(cpu)) {
return EXCP_INTERRUPT;
}
if (cpu->halted) {
return EXCP_HLT;
}
flush_cpu_state(cpu);
bql_unlock();
r = hv_vcpu_run(cpu->accel->fd);
bql_lock();
switch (r) {
case HV_SUCCESS:
break;
case HV_ILLEGAL_GUEST_STATE:
trace_hvf_illegal_guest_state();
/* fall through */
default:
g_assert_not_reached();
}
/* handle VMEXIT */
uint64_t exit_reason = hvf_exit->reason;
uint64_t syndrome = hvf_exit->exception.syndrome;
CPUARMState *env = cpu_env(cpu);
ARMCPU *arm_cpu = env_archcpu(env);
uint64_t syndrome = excp->syndrome;
uint32_t ec = syn_get_ec(syndrome);
ret = 0;
switch (exit_reason) {
case HV_EXIT_REASON_EXCEPTION:
/* This is the main one, handle below. */
break;
case HV_EXIT_REASON_VTIMER_ACTIVATED:
qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
cpu->accel->vtimer_masked = true;
return 0;
case HV_EXIT_REASON_CANCELED:
/* we got kicked, no exit to process */
return 0;
default:
g_assert_not_reached();
}
hvf_sync_vtimer(cpu);
bool advance_pc = false;
hv_return_t r;
int ret = 0;
switch (ec) {
case EC_SOFTWARESTEP: {
@ -1875,7 +1852,7 @@ int hvf_vcpu_exec(CPUState *cpu)
cpu_synchronize_state(cpu);
CPUWatchpoint *wp =
find_hw_watchpoint(cpu, hvf_exit->exception.virtual_address);
find_hw_watchpoint(cpu, excp->virtual_address);
if (!wp) {
error_report("EXCP_DEBUG but unknown hw watchpoint");
}
@ -1893,8 +1870,8 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t cm = (syndrome >> 8) & 0x1;
uint64_t val = 0;
trace_hvf_data_abort(hvf_exit->exception.virtual_address,
hvf_exit->exception.physical_address, isv,
trace_hvf_data_abort(excp->virtual_address,
excp->physical_address, isv,
iswrite, s1ptw, len, srt);
if (cm) {
@ -1908,11 +1885,11 @@ int hvf_vcpu_exec(CPUState *cpu)
if (iswrite) {
val = hvf_get_reg(cpu, srt);
address_space_write(&address_space_memory,
hvf_exit->exception.physical_address,
excp->physical_address,
MEMTXATTRS_UNSPECIFIED, &val, len);
} else {
address_space_read(&address_space_memory,
hvf_exit->exception.physical_address,
excp->physical_address,
MEMTXATTRS_UNSPECIFIED, &val, len);
if (sse) {
val = sextract64(val, 0, len * 8);
@ -1958,7 +1935,8 @@ int hvf_vcpu_exec(CPUState *cpu)
break;
case EC_AA64_HVC:
cpu_synchronize_state(cpu);
if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) {
if (arm_is_psci_call(arm_cpu, EXCP_HVC)) {
/* Do NOT advance $pc for HVC */
if (!hvf_handle_psci_call(cpu)) {
trace_hvf_unknown_hvc(env->pc, env->xregs[0]);
/* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
@ -1971,7 +1949,8 @@ int hvf_vcpu_exec(CPUState *cpu)
break;
case EC_AA64_SMC:
cpu_synchronize_state(cpu);
if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) {
if (arm_is_psci_call(arm_cpu, EXCP_SMC)) {
/* Secure Monitor Call exception, we need to advance $pc */
advance_pc = true;
if (!hvf_handle_psci_call(cpu)) {
@ -1984,6 +1963,17 @@ int hvf_vcpu_exec(CPUState *cpu)
hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized(), 1);
}
break;
case EC_INSNABORT: {
uint32_t set = (syndrome >> 12) & 3;
bool fnv = (syndrome >> 10) & 1;
bool ea = (syndrome >> 9) & 1;
bool s1ptw = (syndrome >> 7) & 1;
uint32_t ifsc = (syndrome >> 0) & 0x3f;
trace_hvf_insn_abort(env->pc, set, fnv, ea, s1ptw, ifsc);
/* fall through */
}
default:
cpu_synchronize_state(cpu);
trace_hvf_exit(syndrome, ec, env->pc);
@ -2010,6 +2000,67 @@ int hvf_vcpu_exec(CPUState *cpu)
return ret;
}
static int hvf_handle_vmexit(CPUState *cpu, hv_vcpu_exit_t *exit)
{
ARMCPU *arm_cpu = env_archcpu(cpu_env(cpu));
int ret = 0;
switch (exit->reason) {
case HV_EXIT_REASON_EXCEPTION:
hvf_sync_vtimer(cpu);
ret = hvf_handle_exception(cpu, &exit->exception);
break;
case HV_EXIT_REASON_VTIMER_ACTIVATED:
qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
cpu->accel->vtimer_masked = true;
break;
case HV_EXIT_REASON_CANCELED:
/* we got kicked, no exit to process */
break;
default:
g_assert_not_reached();
}
return ret;
}
int hvf_arch_vcpu_exec(CPUState *cpu)
{
int ret;
hv_return_t r;
if (cpu->halted) {
return EXCP_HLT;
}
flush_cpu_state(cpu);
do {
if (!(cpu->singlestep_enabled & SSTEP_NOIRQ) &&
hvf_inject_interrupts(cpu)) {
return EXCP_INTERRUPT;
}
bql_unlock();
cpu_exec_start(cpu);
r = hv_vcpu_run(cpu->accel->fd);
cpu_exec_end(cpu);
bql_lock();
switch (r) {
case HV_SUCCESS:
ret = hvf_handle_vmexit(cpu, cpu->accel->exit);
break;
case HV_ILLEGAL_GUEST_STATE:
trace_hvf_illegal_guest_state();
/* fall through */
default:
g_assert_not_reached();
}
} while (ret == 0);
return ret;
}
static const VMStateDescription vmstate_hvf_vtimer = {
.name = "hvf-vtimer",
.version_id = 1,
@ -2110,6 +2161,7 @@ void hvf_arch_remove_all_hw_breakpoints(void)
* Update the vCPU with the gdbstub's view of debug registers. This view
* consists of all hardware breakpoints and watchpoints inserted so far while
* debugging the guest.
* Must be called by the owning thread.
*/
static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
{
@ -2148,6 +2200,7 @@ static void hvf_put_gdbstub_debug_registers(CPUState *cpu)
/*
* Update the vCPU with the guest's view of debug registers. This view is kept
* in the environment at all times.
* Must be called by the owning thread.
*/
static void hvf_put_guest_debug_registers(CPUState *cpu)
{
@ -2180,6 +2233,7 @@ static inline bool hvf_arm_hw_debug_active(CPUState *cpu)
return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
}
/* Must be called by the owning thread */
static void hvf_arch_set_traps(CPUState *cpu)
{
bool should_enable_traps = false;

View file

@ -3,12 +3,14 @@ hvf_unhandled_sysreg_write(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1
hvf_inject_fiq(void) "injecting FIQ"
hvf_inject_irq(void) "injecting IRQ"
hvf_data_abort(uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]"
hvf_insn_abort(uint64_t pc, uint32_t set, bool fnv, bool ea, bool s1ptw, uint32_t ifsc) "insn abort: [pc=0x%"PRIx64" set=%d fnv=%d ea=%d s1ptw=%d ifsc=%d]"
hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64
hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")"
hvf_unknown_hvc(uint64_t pc, uint64_t x0) "pc=0x%"PRIx64" unknown HVC! 0x%016"PRIx64
hvf_unknown_smc(uint64_t x0) "unknown SMC! 0x%016"PRIx64
hvf_exit(uint64_t syndrome, uint32_t ec, uint64_t pc) "exit: 0x%"PRIx64" [ec=0x%x pc=0x%"PRIx64"]"
hvf_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpuid=0x%x"
hvf_vgic_write(const char *name, uint64_t val) "vgic write to %s [val=0x%016"PRIx64"]"
hvf_vgic_read(const char *name, uint64_t val) "vgic read from %s [val=0x%016"PRIx64"]"
hvf_emu_reginfo_write(const char *cpname, const char *regname, uint64_t val) "[%s] write to %s [val=0x%016"PRIx64"]"
hvf_emu_reginfo_read(const char *cpname, const char *regname, uint64_t val) "[%s] read from %s [val=0x%016"PRIx64"]"
hvf_illegal_guest_state(void) "HV_ILLEGAL_GUEST_STATE"
hvf_kick_vcpu_thread(unsigned cpuidx, bool stop) "cpu:%u stop:%u"

View file

@ -377,7 +377,7 @@ void arm_cpu_register(const ARMCPUInfo *info);
void arm_do_plugin_vcpu_discon_cb(CPUState *cs, uint64_t from);
void register_cp_regs_for_features(ARMCPU *cpu);
void init_cpreg_list(ARMCPU *cpu);
void arm_init_cpreg_list(ARMCPU *cpu);
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu);
void arm_translate_init(void);
@ -969,7 +969,9 @@ bool arm_cpu_tlb_fill_align(CPUState *cs, CPUTLBEntryFull *out, vaddr addr,
static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
{
return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
int coreidx = mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
assert(coreidx < NB_MMU_MODES);
return coreidx;
}
static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)

View file

@ -25,6 +25,7 @@
#include "internals.h"
#include "arm-powerctl.h"
#include "target/arm/multiprocessing.h"
#include "target/arm/trace.h"
bool arm_is_psci_call(ARMCPU *cpu, int excp_type)
{
@ -79,6 +80,8 @@ void arm_handle_psci_call(ARMCPU *cpu)
*/
param[i] = is_a64(env) ? env->xregs[i] : env->regs[i];
}
trace_arm_psci_call(param[0], param[1], param[2], param[3],
arm_cpu_mp_affinity(cpu));
if ((param[0] & QEMU_PSCI_0_2_64BIT) && !is_a64(env)) {
ret = QEMU_PSCI_RET_NOT_SUPPORTED;

View file

@ -23,3 +23,6 @@ arm_powerctl_set_cpu_on(uint64_t mp_aff, unsigned target_el, const char *mode, u
arm_powerctl_set_cpu_on_and_reset(uint64_t mp_aff) "cpu %" PRIu64
arm_powerctl_set_cpu_off(uint64_t mp_aff) "cpu %" PRIu64
arm_powerctl_reset_cpu(uint64_t mp_aff) "cpu %" PRIu64
# tcg/psci.c and hvf/hvf.c
arm_psci_call(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3, uint32_t cpuid) "PSCI Call x0=0x%016"PRIx64" x1=0x%016"PRIx64" x2=0x%016"PRIx64" x3=0x%016"PRIx64" cpuid=0x%x"

View file

@ -721,44 +721,16 @@ void hvf_simulate_wrmsr(CPUState *cs)
printf("write msr %llx\n", RCX(cs));*/
}
int hvf_vcpu_exec(CPUState *cpu)
static int hvf_handle_vmexit(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
int ret = 0;
uint64_t rip = 0;
if (hvf_process_events(cpu)) {
return EXCP_HLT;
}
do {
if (cpu->vcpu_dirty) {
hvf_put_registers(cpu);
cpu->vcpu_dirty = false;
}
if (hvf_inject_interrupts(cpu)) {
return EXCP_INTERRUPT;
}
vmx_update_tpr(cpu);
bql_unlock();
if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
bql_lock();
return EXCP_HLT;
}
hv_return_t r = hv_vcpu_run_until(cpu->accel->fd, HV_DEADLINE_FOREVER);
assert_hvf_ok(r);
/* handle VMEXIT */
X86CPU *x86_cpu = env_archcpu(cpu_env(cpu));
uint64_t exit_reason = rvmcs(cpu->accel->fd, VMCS_EXIT_REASON);
uint64_t exit_qual = rvmcs(cpu->accel->fd, VMCS_EXIT_QUALIFICATION);
uint32_t ins_len = (uint32_t)rvmcs(cpu->accel->fd,
VMCS_EXIT_INSTRUCTION_LENGTH);
uint64_t idtvec_info = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO);
int ret = 0;
hvf_store_events(cpu, ins_len, idtvec_info);
rip = rreg(cpu->accel->fd, HV_X86_RIP);
@ -769,14 +741,13 @@ int hvf_vcpu_exec(CPUState *cpu)
update_apic_tpr(cpu);
current_cpu = cpu;
ret = 0;
switch (exit_reason) {
case EXIT_REASON_HLT: {
macvm_set_rip(cpu, rip + ins_len);
if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK))
&& !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI) &&
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)
&& (env->eflags & IF_MASK))
&& !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)
&& !(idtvec_info & VMCS_IDT_VEC_VALID)) {
cpu->halted = 1;
ret = EXCP_HLT;
break;
@ -819,9 +790,11 @@ int hvf_vcpu_exec(CPUState *cpu)
uint32_t string = (exit_qual & 16) != 0;
uint32_t port = exit_qual >> 16;
/*uint32_t rep = (exit_qual & 0x20) != 0;*/
struct x86_decode decode;
if (!string && in) {
uint64_t val = 0;
hvf_load_regs(cpu);
hvf_handle_io(env_cpu(env), port, &val, 0, size, 1);
if (size == 1) {
@ -842,7 +815,6 @@ int hvf_vcpu_exec(CPUState *cpu)
macvm_set_rip(cpu, rip + ins_len);
break;
}
struct x86_decode decode;
hvf_load_regs(cpu);
decode_instruction(env, &decode);
@ -962,9 +934,11 @@ int hvf_vcpu_exec(CPUState *cpu)
case EXIT_REASON_TASK_SWITCH: {
uint64_t vinfo = rvmcs(cpu->accel->fd, VMCS_IDT_VECTORING_INFO);
x86_segment_selector sel = {.sel = exit_qual & 0xffff};
vmx_handle_task_switch(cpu, sel, (exit_qual >> 30) & 0x3,
vinfo & VMCS_INTR_VALID, vinfo & VECTORING_INFO_VECTOR_MASK, vinfo
& VMCS_INTR_T_MASK);
vinfo & VMCS_INTR_VALID,
vinfo & VECTORING_INFO_VECTOR_MASK,
vinfo & VMCS_INTR_T_MASK);
break;
}
case EXIT_REASON_TRIPLE_FAULT: {
@ -986,6 +960,46 @@ int hvf_vcpu_exec(CPUState *cpu)
default:
error_report("%llx: unhandled exit %llx", rip, exit_reason);
}
return ret;
}
int hvf_arch_vcpu_exec(CPUState *cpu)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
int ret = 0;
uint64_t rip = 0;
if (hvf_process_events(cpu)) {
return EXCP_HLT;
}
do {
if (cpu->vcpu_dirty) {
hvf_arch_put_registers(cpu);
cpu->vcpu_dirty = false;
}
if (hvf_inject_interrupts(cpu)) {
return EXCP_INTERRUPT;
}
vmx_update_tpr(cpu);
bql_unlock();
if (!cpu_is_bsp(X86_CPU(cpu)) && cpu->halted) {
bql_lock();
return EXCP_HLT;
}
cpu_exec_start(cpu);
hv_return_t r = hv_vcpu_run_until(cpu->accel->fd, HV_DEADLINE_FOREVER);
assert_hvf_ok(r);
cpu_exec_end(cpu);
ret = hvf_handle_vmexit(cpu);
} while (ret == 0);
return ret;

View file

@ -236,7 +236,7 @@ void hvf_get_msrs(CPUState *cs)
env->tsc = rdtscp() + rvmcs(cs->accel->fd, VMCS_TSC_OFFSET);
}
int hvf_put_registers(CPUState *cs)
int hvf_arch_put_registers(CPUState *cs)
{
X86CPU *x86cpu = X86_CPU(cs);
CPUX86State *env = &x86cpu->env;
@ -280,7 +280,7 @@ int hvf_put_registers(CPUState *cs)
return 0;
}
int hvf_get_registers(CPUState *cs)
int hvf_arch_get_registers(CPUState *cs)
{
X86CPU *x86cpu = X86_CPU(cs);
CPUX86State *env = &x86cpu->env;

View file

@ -29,6 +29,8 @@
breakpoint_insert(int cpu_index, uint64_t pc, int flags) "cpu=%d pc=0x%" PRIx64 " flags=0x%x"
breakpoint_remove(int cpu_index, uint64_t pc, int flags) "cpu=%d pc=0x%" PRIx64 " flags=0x%x"
breakpoint_singlestep(int cpu_index, int enabled) "cpu=%d enable=%d"
cpu_exec_start(int cpu_index) "cpu=%d"
cpu_exec_end(int cpu_index) "cpu=%d"
# job.c
job_state_transition(void *job, int ret, const char *legal, const char *s0, const char *s1) "job %p (ret: %d) attempting %s transition (%s-->%s)"