virtio,pci,pc: features, fixes, tests

SPCR acpi table can now be disabled
 vhost-vdpa can now report hashing capability to guest
 PPTT acpi table now tells guest vCPUs are identical
 vost-user-blk now shuts down faster
 loongarch64 now supports bios-tables-test
 intel_iommu now supports ATS
 cxl now supports DCD Fabric Management Command Set
 arm now supports acpi pci hotplug
 
 fixes, cleanups
 
 Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQFDBAABCgAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmh1+7APHG1zdEByZWRo
 YXQuY29tAAoJECgfDbjSjVRpcZ8H/2udpCZ49vjPB8IwQAGdFTw2TWVdxUQFHexQ
 pOsCGyFBNAXqD1bmb8lwWyYVJ08WELyL6xWsQ5tfVPiXpKYYHPHl4rNr/SPoyNcv
 joY++tagudmOki2DU7nfJ+rPIIuigOTUHbv4TZciwcHle6f65s0iKXhR1sL0cj4i
 TS6iJlApSuJInrBBUxuxSUomXk79mFTNKRiXj1k58LRw6JOUEgYvtIW8i+mOUcTg
 h1dZphxEQr/oG+a2pM8GOVJ1AFaBPSfgEnRM4kTX9QuTIDCeMAKUBo/mwOk6PV7z
 ZhSrDPLrea27XKGL++EJm0fFJ/AsHF1dTks2+c0rDrSK+UV87Zc=
 =sktm
 -----END PGP SIGNATURE-----

Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into staging

virtio,pci,pc: features, fixes, tests

SPCR acpi table can now be disabled
vhost-vdpa can now report hashing capability to guest
PPTT acpi table now tells guest vCPUs are identical
vost-user-blk now shuts down faster
loongarch64 now supports bios-tables-test
intel_iommu now supports ATS
cxl now supports DCD Fabric Management Command Set
arm now supports acpi pci hotplug

fixes, cleanups

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

 # -----BEGIN PGP SIGNATURE-----
 #
 # iQFDBAABCgAtFiEEXQn9CHHI+FuUyooNKB8NuNKNVGkFAmh1+7APHG1zdEByZWRo
 # YXQuY29tAAoJECgfDbjSjVRpcZ8H/2udpCZ49vjPB8IwQAGdFTw2TWVdxUQFHexQ
 # pOsCGyFBNAXqD1bmb8lwWyYVJ08WELyL6xWsQ5tfVPiXpKYYHPHl4rNr/SPoyNcv
 # joY++tagudmOki2DU7nfJ+rPIIuigOTUHbv4TZciwcHle6f65s0iKXhR1sL0cj4i
 # TS6iJlApSuJInrBBUxuxSUomXk79mFTNKRiXj1k58LRw6JOUEgYvtIW8i+mOUcTg
 # h1dZphxEQr/oG+a2pM8GOVJ1AFaBPSfgEnRM4kTX9QuTIDCeMAKUBo/mwOk6PV7z
 # ZhSrDPLrea27XKGL++EJm0fFJ/AsHF1dTks2+c0rDrSK+UV87Zc=
 # =sktm
 # -----END PGP SIGNATURE-----
 # gpg: Signature made Tue 15 Jul 2025 02:56:48 EDT
 # gpg:                using RSA key 5D09FD0871C8F85B94CA8A0D281F0DB8D28D5469
 # gpg:                issuer "mst@redhat.com"
 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" [full]
 # gpg:                 aka "Michael S. Tsirkin <mst@redhat.com>" [full]
 # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17  0970 C350 3912 AFBE 8E67
 #      Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA  8A0D 281F 0DB8 D28D 5469

* tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu: (97 commits)
  hw/cxl: mailbox-utils: 0x5605 - FMAPI Initiate DC Release
  hw/cxl: mailbox-utils: 0x5604 - FMAPI Initiate DC Add
  hw/cxl: Create helper function to create DC Event Records from extents
  hw/cxl: mailbox-utils: 0x5603 - FMAPI Get DC Region Extent Lists
  hw/cxl: mailbox-utils: 0x5602 - FMAPI Set DC Region Config
  hw/mem: cxl_type3: Add DC Region bitmap lock
  hw/cxl: Move definition for dynamic_capacity_uuid and enum for DC event types to header
  hw/cxl: mailbox-utils: 0x5601 - FMAPI Get Host Region Config
  hw/mem: cxl_type3: Add dsmas_flags to CXLDCRegion struct
  hw/cxl: mailbox-utils: 0x5600 - FMAPI Get DCD Info
  hw/cxl: fix DC extent capacity tracking
  tests: virt: Update expected ACPI tables for virt test
  hw/acpi/aml-build: Build a root node in the PPTT table
  hw/acpi/aml-build: Set identical implementation flag for PPTT processor nodes
  tests: virt: Allow changes to PPTT test table
  qtest/bios-tables-test: Generate reference blob for DSDT.acpipcihp
  qtest/bios-tables-test: Generate reference blob for DSDT.hpoffacpiindex
  tests/qtest/bios-tables-test: Add aarch64 ACPI PCI hotplug test
  tests/qtest/bios-tables-test: Prepare for addition of acpi pci hp tests
  hw/arm/virt: Let virt support pci hotplug/unplug GED event
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>

Conflicts:
  net/vhost-vdpa.c
  vhost_vdpa_set_steering_ebpf() was removed, resolve the context
  conflict.
This commit is contained in:
Stefan Hajnoczi 2025-07-16 07:00:47 -04:00
commit e452053097
139 changed files with 2460 additions and 1034 deletions

View file

@ -22,7 +22,6 @@
#include "system/accel-ops.h"
#include "system/cpus.h"
#include "exec/cpu-common.h"
#include "exec/tswap.h"
#include "exec/replay-core.h"
#include "exec/log.h"
#include "hw/core/cpu.h"
@ -85,9 +84,3 @@ void cpu_abort(CPUState *cpu, const char *fmt, ...)
#endif
abort();
}
#undef target_big_endian
bool target_big_endian(void)
{
return TARGET_BIG_ENDIAN;
}

View file

@ -4,7 +4,7 @@
const VMStateDescription vmstate_acpi_pcihp_pci_status;
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s,
MemoryRegion *address_space_io, uint16_t io_base)
{
}

View file

@ -160,7 +160,7 @@ void crs_replace_with_free_ranges(GPtrArray *ranges,
*/
static void crs_range_merge(GPtrArray *range)
{
GPtrArray *tmp = g_ptr_array_new_with_free_func(crs_range_free);
g_autoptr(GPtrArray) tmp = g_ptr_array_new_with_free_func(crs_range_free);
CrsRangeEntry *entry;
uint64_t range_base, range_limit;
int i;
@ -191,7 +191,6 @@ static void crs_range_merge(GPtrArray *range)
entry = g_ptr_array_index(tmp, i);
crs_range_insert(range, entry->base, entry->limit);
}
g_ptr_array_free(tmp, true);
}
static void
@ -2153,12 +2152,25 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
int64_t socket_id = -1, cluster_id = -1, core_id = -1;
uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0;
uint32_t pptt_start = table_data->len;
uint32_t root_offset;
int n;
AcpiTable table = { .sig = "PPTT", .rev = 2,
.oem_id = oem_id, .oem_table_id = oem_table_id };
acpi_table_begin(&table, table_data);
/*
* Build a root node for all the processor nodes. Otherwise when
* building a multi-socket system each socket tree is separated
* and will be hard for the OS like Linux to know whether the
* system is homogeneous.
*/
root_offset = table_data->len - pptt_start;
build_processor_hierarchy_node(table_data,
(1 << 0) | /* Physical package */
(1 << 4), /* Identical Implementation */
0, 0, NULL, 0);
/*
* This works with the assumption that cpus[n].props.*_id has been
* sorted from top to down levels in mc->possible_cpu_arch_ids().
@ -2173,8 +2185,9 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
core_id = -1;
socket_offset = table_data->len - pptt_start;
build_processor_hierarchy_node(table_data,
(1 << 0), /* Physical package */
0, socket_id, NULL, 0);
(1 << 0) | /* Physical package */
(1 << 4), /* Identical Implementation */
root_offset, socket_id, NULL, 0);
}
if (mc->smp_props.clusters_supported && mc->smp_props.has_clusters) {
@ -2184,7 +2197,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
core_id = -1;
cluster_offset = table_data->len - pptt_start;
build_processor_hierarchy_node(table_data,
(0 << 0), /* Not a physical package */
(0 << 0) | /* Not a physical package */
(1 << 4), /* Identical Implementation */
socket_offset, cluster_id, NULL, 0);
}
} else {
@ -2202,7 +2216,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
core_id = cpus->cpus[n].props.core_id;
core_offset = table_data->len - pptt_start;
build_processor_hierarchy_node(table_data,
(0 << 0), /* Not a physical package */
(0 << 0) | /* Not a physical package */
(1 << 4), /* Identical Implementation */
cluster_offset, core_id, NULL, 0);
}

View file

@ -22,8 +22,6 @@
#include "hw/acpi/bios-linker-loader.h"
#include "hw/nvram/fw_cfg.h"
#include "qemu/bswap.h"
/*
* Linker/loader is a paravirtualized interface that passes commands to guest.
* The commands can be used to request guest to

View file

@ -12,10 +12,13 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/pcihp.h"
#include "hw/acpi/generic_event_device.h"
#include "hw/pci/pci.h"
#include "hw/irq.h"
#include "hw/mem/pc-dimm.h"
#include "hw/mem/nvdimm.h"
#include "hw/pci/pci_device.h"
#include "hw/qdev-properties.h"
#include "migration/vmstate.h"
#include "qemu/error-report.h"
@ -26,6 +29,7 @@ static const uint32_t ged_supported_events[] = {
ACPI_GED_PWR_DOWN_EVT,
ACPI_GED_NVDIMM_HOTPLUG_EVT,
ACPI_GED_CPU_HOTPLUG_EVT,
ACPI_GED_PCI_HOTPLUG_EVT,
};
/*
@ -121,6 +125,12 @@ void build_ged_aml(Aml *table, const char *name, HotplugHandler *hotplug_dev,
aml_notify(aml_name("\\_SB.NVDR"),
aml_int(0x80)));
break;
case ACPI_GED_PCI_HOTPLUG_EVT:
aml_append(if_ctx,
aml_acquire(aml_name("\\_SB.PCI0.BLCK"), 0xFFFF));
aml_append(if_ctx, aml_call0("\\_SB.PCI0.PCNT"));
aml_append(if_ctx, aml_release(aml_name("\\_SB.PCI0.BLCK")));
break;
default:
/*
* Please make sure all the events in ged_supported_events[]
@ -227,6 +237,14 @@ static const MemoryRegionOps ged_regs_ops = {
},
};
static void acpi_ged_device_pre_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_pre_plug_cb(hotplug_dev, dev, errp);
}
}
static void acpi_ged_device_plug_cb(HotplugHandler *hotplug_dev,
DeviceState *dev, Error **errp)
{
@ -240,6 +258,8 @@ static void acpi_ged_device_plug_cb(HotplugHandler *hotplug_dev,
}
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
acpi_cpu_plug_cb(hotplug_dev, &s->cpuhp_state, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_plug_cb(hotplug_dev, &s->pcihp_state, dev, errp);
} else {
error_setg(errp, "virt: device plug request for unsupported device"
" type: %s", object_get_typename(OBJECT(dev)));
@ -256,6 +276,9 @@ static void acpi_ged_unplug_request_cb(HotplugHandler *hotplug_dev,
acpi_memory_unplug_request_cb(hotplug_dev, &s->memhp_state, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
acpi_cpu_unplug_request_cb(hotplug_dev, &s->cpuhp_state, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_unplug_request_cb(hotplug_dev, &s->pcihp_state,
dev, errp);
} else {
error_setg(errp, "acpi: device unplug request for unsupported device"
" type: %s", object_get_typename(OBJECT(dev)));
@ -271,6 +294,8 @@ static void acpi_ged_unplug_cb(HotplugHandler *hotplug_dev,
acpi_memory_unplug_cb(&s->memhp_state, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) {
acpi_cpu_unplug_cb(&s->cpuhp_state, dev, errp);
} else if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
acpi_pcihp_device_unplug_cb(hotplug_dev, &s->pcihp_state, dev, errp);
} else {
error_setg(errp, "acpi: device unplug for unsupported device"
" type: %s", object_get_typename(OBJECT(dev)));
@ -299,6 +324,8 @@ static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
sel = ACPI_GED_NVDIMM_HOTPLUG_EVT;
} else if (ev & ACPI_CPU_HOTPLUG_STATUS) {
sel = ACPI_GED_CPU_HOTPLUG_EVT;
} else if (ev & ACPI_PCI_HOTPLUG_STATUS) {
sel = ACPI_GED_PCI_HOTPLUG_EVT;
} else {
/* Unknown event. Return without generating interrupt. */
warn_report("GED: Unsupported event %d. No irq injected", ev);
@ -318,6 +345,10 @@ static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev)
static const Property acpi_ged_properties[] = {
DEFINE_PROP_UINT32("ged-event", AcpiGedState, ged_event_bitmap, 0),
DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, AcpiGedState,
pcihp_state.use_acpi_hotplug_bridge, 0),
DEFINE_PROP_LINK("bus", AcpiGedState, pcihp_state.root,
TYPE_PCI_BUS, PCIBus *),
};
static const VMStateDescription vmstate_memhp_state = {
@ -386,6 +417,25 @@ static const VMStateDescription vmstate_ghes_state = {
}
};
static bool pcihp_needed(void *opaque)
{
AcpiGedState *s = opaque;
return s->pcihp_state.use_acpi_hotplug_bridge;
}
static const VMStateDescription vmstate_pcihp_state = {
.name = "acpi-ged/pcihp",
.version_id = 1,
.minimum_version_id = 1,
.needed = pcihp_needed,
.fields = (const VMStateField[]) {
VMSTATE_PCI_HOTPLUG(pcihp_state,
AcpiGedState,
NULL, NULL),
VMSTATE_END_OF_LIST()
}
};
static const VMStateDescription vmstate_acpi_ged = {
.name = "acpi-ged",
.version_id = 1,
@ -398,6 +448,7 @@ static const VMStateDescription vmstate_acpi_ged = {
&vmstate_memhp_state,
&vmstate_cpuhp_state,
&vmstate_ghes_state,
&vmstate_pcihp_state,
NULL
}
};
@ -406,9 +457,13 @@ static void acpi_ged_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
AcpiGedState *s = ACPI_GED(dev);
AcpiPciHpState *pcihp_state = &s->pcihp_state;
uint32_t ged_events;
int i;
if (pcihp_state->use_acpi_hotplug_bridge) {
s->ged_event_bitmap |= ACPI_GED_PCI_HOTPLUG_EVT;
}
ged_events = ctpop32(s->ged_event_bitmap);
for (i = 0; i < ARRAY_SIZE(ged_supported_events) && ged_events; i++) {
@ -428,6 +483,13 @@ static void acpi_ged_realize(DeviceState *dev, Error **errp)
cpu_hotplug_hw_init(&s->container_cpuhp, OBJECT(dev),
&s->cpuhp_state, 0);
break;
case ACPI_GED_PCI_HOTPLUG_EVT:
memory_region_init(&s->container_pcihp, OBJECT(dev),
ACPI_PCIHP_REGION_NAME, ACPI_PCIHP_SIZE);
sysbus_init_mmio(sbd, &s->container_pcihp);
acpi_pcihp_init(OBJECT(s), &s->pcihp_state,
&s->container_pcihp, 0);
qbus_set_hotplug_handler(BUS(s->pcihp_state.root), OBJECT(dev));
}
ged_events--;
}
@ -469,20 +531,34 @@ static void acpi_ged_initfn(Object *obj)
sysbus_init_mmio(sbd, &ged_st->regs);
}
static void ged_reset_hold(Object *obj, ResetType type)
{
AcpiGedState *s = ACPI_GED(obj);
if (s->pcihp_state.use_acpi_hotplug_bridge) {
acpi_pcihp_reset(&s->pcihp_state);
}
}
static void acpi_ged_class_init(ObjectClass *class, const void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(class);
AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(class);
ResettableClass *rc = RESETTABLE_CLASS(class);
AcpiGedClass *gedc = ACPI_GED_CLASS(class);
dc->desc = "ACPI Generic Event Device";
device_class_set_props(dc, acpi_ged_properties);
dc->vmsd = &vmstate_acpi_ged;
dc->realize = acpi_ged_realize;
hc->pre_plug = acpi_ged_device_pre_plug_cb;
hc->plug = acpi_ged_device_plug_cb;
hc->unplug_request = acpi_ged_unplug_request_cb;
hc->unplug = acpi_ged_unplug_cb;
resettable_class_set_parent_phases(rc, NULL, ged_reset_hold, NULL,
&gedc->parent_phases);
adevc->ospm_status = acpi_ged_ospm_status;
adevc->send_event = acpi_ged_send_event;
@ -494,6 +570,7 @@ static const TypeInfo acpi_ged_info = {
.instance_size = sizeof(AcpiGedState),
.instance_init = acpi_ged_initfn,
.class_init = acpi_ged_class_init,
.class_size = sizeof(AcpiGedClass),
.interfaces = (const InterfaceInfo[]) {
{ TYPE_HOTPLUG_HANDLER },
{ TYPE_ACPI_DEVICE_IF },

View file

@ -322,9 +322,10 @@ void ich9_pm_init(PCIDevice *lpc_pci, ICH9LPCPMRegs *pm, qemu_irq sci_irq)
}
if (pm->acpi_pci_hotplug.use_acpi_hotplug_bridge) {
object_property_set_link(OBJECT(lpc_pci), "bus",
OBJECT(pci_get_bus(lpc_pci)), &error_abort);
acpi_pcihp_init(OBJECT(lpc_pci),
&pm->acpi_pci_hotplug,
pci_get_bus(lpc_pci),
pci_address_space_io(lpc_pci),
ACPI_PCIHP_ADDR_ICH9);
@ -428,6 +429,10 @@ void ich9_pm_add_properties(Object *obj, ICH9LPCPMRegs *pm)
object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE,
&pm->pm_io_base, OBJ_PROP_FLAG_READ);
object_property_add_link(obj, "bus", TYPE_PCI_BUS,
(Object **)&pm->acpi_pci_hotplug.root,
object_property_allow_set_link,
OBJ_PROP_LINK_STRONG);
object_property_add(obj, ACPI_PM_PROP_GPE0_BLK, "uint32",
ich9_pm_get_gpe0_blk,
NULL, NULL, pm);

View file

@ -35,3 +35,57 @@ void build_pci_bridge_aml(AcpiDevAmlIf *adev, Aml *scope)
}
}
}
Aml *build_pci_bridge_edsm(void)
{
Aml *method, *ifctx;
Aml *zero = aml_int(0);
Aml *func = aml_arg(2);
Aml *ret = aml_local(0);
Aml *aidx = aml_local(1);
Aml *params = aml_arg(4);
method = aml_method("EDSM", 5, AML_SERIALIZED);
/* get supported functions */
ifctx = aml_if(aml_equal(func, zero));
{
/* 1: have supported functions */
/* 7: support for function 7 */
const uint8_t caps = 1 | BIT(7);
build_append_pci_dsm_func0_common(ifctx, ret);
aml_append(ifctx, aml_store(aml_int(caps), aml_index(ret, zero)));
aml_append(ifctx, aml_return(ret));
}
aml_append(method, ifctx);
/* handle specific functions requests */
/*
* PCI Firmware Specification 3.1
* 4.6.7. _DSM for Naming a PCI or PCI Express Device Under
* Operating Systems
*/
ifctx = aml_if(aml_equal(func, aml_int(7)));
{
Aml *pkg = aml_package(2);
aml_append(pkg, zero);
/* optional, if not impl. should return null string */
aml_append(pkg, aml_string("%s", ""));
aml_append(ifctx, aml_store(pkg, ret));
/*
* IASL is fine when initializing Package with computational data,
* however it makes guest unhappy /it fails to process such AML/.
* So use runtime assignment to set acpi-index after initializer
* to make OSPM happy.
*/
aml_append(ifctx,
aml_store(aml_derefof(aml_index(params, aml_int(0))), aidx));
aml_append(ifctx, aml_store(aidx, aml_index(ret, zero)));
aml_append(ifctx, aml_return(ret));
}
aml_append(method, ifctx);
return method;
}

View file

@ -301,3 +301,53 @@ void build_srat_generic_affinity_structures(GArray *table_data)
object_child_foreach_recursive(object_get_root(), build_acpi_generic_port,
table_data);
}
Aml *build_pci_host_bridge_osc_method(bool enable_native_pcie_hotplug)
{
Aml *if_ctx;
Aml *if_ctx2;
Aml *else_ctx;
Aml *method;
Aml *a_cwd1 = aml_name("CDW1");
Aml *a_ctrl = aml_local(0);
method = aml_method("_OSC", 4, AML_NOTSERIALIZED);
aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
if_ctx = aml_if(aml_equal(
aml_arg(0), aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766")));
aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
aml_append(if_ctx, aml_store(aml_name("CDW3"), a_ctrl));
/*
* Always allow native PME, AER (no dependencies)
* Allow SHPC (PCI bridges can have SHPC controller)
* Disable PCIe Native Hot-plug if ACPI PCI Hot-plug is enabled.
*/
aml_append(if_ctx, aml_and(a_ctrl,
aml_int(0x1E | (enable_native_pcie_hotplug ? 0x1 : 0x0)), a_ctrl));
if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1))));
/* Unknown revision */
aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x08), a_cwd1));
aml_append(if_ctx, if_ctx2);
if_ctx2 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl)));
/* Capabilities bits were masked */
aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x10), a_cwd1));
aml_append(if_ctx, if_ctx2);
/* Update DWORD3 in the buffer */
aml_append(if_ctx, aml_store(a_ctrl, aml_name("CDW3")));
aml_append(method, if_ctx);
else_ctx = aml_else();
/* Unrecognized UUID */
aml_append(else_ctx, aml_or(a_cwd1, aml_int(4), a_cwd1));
aml_append(method, else_ctx);
aml_append(method, aml_return(aml_arg(3)));
return method;
}

View file

@ -26,7 +26,8 @@
#include "qemu/osdep.h"
#include "hw/acpi/pcihp.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/acpi_aml_interface.h"
#include "hw/pci-host/i440fx.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bridge.h"
@ -39,9 +40,9 @@
#include "migration/vmstate.h"
#include "qapi/error.h"
#include "qom/qom-qobject.h"
#include "qobject/qnum.h"
#include "trace.h"
#define ACPI_PCIHP_SIZE 0x0018
#define PCI_UP_BASE 0x0000
#define PCI_DOWN_BASE 0x0004
#define PCI_EJ_BASE 0x0008
@ -97,10 +98,10 @@ static void *acpi_set_bsel(PCIBus *bus, void *opaque)
return info;
}
static void acpi_set_pci_info(bool has_bridge_hotplug)
static void acpi_set_pci_info(AcpiPciHpState *s)
{
static bool bsel_is_set;
Object *host = acpi_get_i386_pci_host();
bool has_bridge_hotplug = s->use_acpi_hotplug_bridge;
PCIBus *bus;
BSELInfo info = { .bsel_alloc = ACPI_PCIHP_BSEL_DEFAULT,
.has_bridge_hotplug = has_bridge_hotplug };
@ -110,11 +111,8 @@ static void acpi_set_pci_info(bool has_bridge_hotplug)
}
bsel_is_set = true;
if (!host) {
return;
}
bus = PCI_HOST_BRIDGE(host)->bus;
bus = s->root;
if (bus) {
/* Scan all PCI buses. Set property to enable acpi based hotplug. */
pci_for_each_bus_depth_first(bus, acpi_set_bsel, NULL, &info);
@ -264,7 +262,7 @@ static void acpi_pcihp_update(AcpiPciHpState *s)
void acpi_pcihp_reset(AcpiPciHpState *s)
{
acpi_set_pci_info(s->use_acpi_hotplug_bridge);
acpi_set_pci_info(s);
acpi_pcihp_update(s);
}
@ -495,13 +493,13 @@ static const MemoryRegionOps acpi_pcihp_io_ops = {
},
};
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
void acpi_pcihp_init(Object *owner, AcpiPciHpState *s,
MemoryRegion *io, uint16_t io_base)
{
s->io_len = ACPI_PCIHP_SIZE;
s->io_base = io_base;
s->root = root_bus;
assert(s->root);
memory_region_init_io(&s->io, owner, &acpi_pcihp_io_ops, s,
"acpi-pci-hotplug", s->io_len);
@ -513,6 +511,425 @@ void acpi_pcihp_init(Object *owner, AcpiPciHpState *s, PCIBus *root_bus,
OBJ_PROP_FLAG_READ);
}
void build_append_pci_dsm_func0_common(Aml *ctx, Aml *retvar)
{
Aml *UUID, *ifctx1;
uint8_t byte_list[1] = { 0 }; /* nothing supported yet */
aml_append(ctx, aml_store(aml_buffer(1, byte_list), retvar));
/*
* PCI Firmware Specification 3.1
* 4.6. _DSM Definitions for PCI
*/
UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID)));
{
/* call is for unsupported UUID, bail out */
aml_append(ifctx1, aml_return(retvar));
}
aml_append(ctx, ifctx1);
ifctx1 = aml_if(aml_lless(aml_arg(1), aml_int(2)));
{
/* call is for unsupported REV, bail out */
aml_append(ifctx1, aml_return(retvar));
}
aml_append(ctx, ifctx1);
}
static Aml *aml_pci_pdsm(void)
{
Aml *method, *ifctx, *ifctx1;
Aml *ret = aml_local(0);
Aml *caps = aml_local(1);
Aml *acpi_index = aml_local(2);
Aml *zero = aml_int(0);
Aml *one = aml_int(1);
Aml *not_supp = aml_int(0xFFFFFFFF);
Aml *func = aml_arg(2);
Aml *params = aml_arg(4);
Aml *bnum = aml_derefof(aml_index(params, aml_int(0)));
Aml *sunum = aml_derefof(aml_index(params, aml_int(1)));
method = aml_method("PDSM", 5, AML_SERIALIZED);
/* get supported functions */
ifctx = aml_if(aml_equal(func, zero));
{
build_append_pci_dsm_func0_common(ifctx, ret);
aml_append(ifctx, aml_store(zero, caps));
aml_append(ifctx,
aml_store(aml_call2("AIDX", bnum, sunum), acpi_index));
/*
* advertise function 7 if device has acpi-index
* acpi_index values:
* 0: not present (default value)
* FFFFFFFF: not supported (old QEMU without PIDX reg)
* other: device's acpi-index
*/
ifctx1 = aml_if(aml_lnot(
aml_or(aml_equal(acpi_index, zero),
aml_equal(acpi_index, not_supp), NULL)
));
{
/* have supported functions */
aml_append(ifctx1, aml_or(caps, one, caps));
/* support for function 7 */
aml_append(ifctx1,
aml_or(caps, aml_shiftleft(one, aml_int(7)), caps));
}
aml_append(ifctx, ifctx1);
aml_append(ifctx, aml_store(caps, aml_index(ret, zero)));
aml_append(ifctx, aml_return(ret));
}
aml_append(method, ifctx);
/* handle specific functions requests */
/*
* PCI Firmware Specification 3.1
* 4.6.7. _DSM for Naming a PCI or PCI Express Device Under
* Operating Systems
*/
ifctx = aml_if(aml_equal(func, aml_int(7)));
{
Aml *pkg = aml_package(2);
aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sunum), acpi_index));
aml_append(ifctx, aml_store(pkg, ret));
/*
* Windows calls func=7 without checking if it's available,
* as workaround Microsoft has suggested to return invalid for func7
* Package, so return 2 elements package but only initialize elements
* when acpi_index is supported and leave them uninitialized, which
* leads elements to being Uninitialized ObjectType and should trip
* Windows into discarding result as an unexpected and prevent setting
* bogus 'PCI Label' on the device.
*/
ifctx1 = aml_if(aml_lnot(aml_lor(
aml_equal(acpi_index, zero), aml_equal(acpi_index, not_supp)
)));
{
aml_append(ifctx1, aml_store(acpi_index, aml_index(ret, zero)));
/*
* optional, if not impl. should return null string
*/
aml_append(ifctx1, aml_store(aml_string("%s", ""),
aml_index(ret, one)));
}
aml_append(ifctx, ifctx1);
aml_append(ifctx, aml_return(ret));
}
aml_append(method, ifctx);
return method;
}
void build_acpi_pci_hotplug(Aml *table, AmlRegionSpace rs, uint64_t pcihp_addr)
{
Aml *scope;
Aml *field;
Aml *method;
scope = aml_scope("_SB.PCI0");
aml_append(scope,
aml_operation_region("PCST", rs, aml_int(pcihp_addr), 0x08));
field = aml_field("PCST", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("PCIU", 32));
aml_append(field, aml_named_field("PCID", 32));
aml_append(scope, field);
aml_append(scope,
aml_operation_region("SEJ", rs,
aml_int(pcihp_addr + ACPI_PCIHP_SEJ_BASE), 0x04));
field = aml_field("SEJ", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("B0EJ", 32));
aml_append(scope, field);
aml_append(scope,
aml_operation_region("BNMR", rs,
aml_int(pcihp_addr + ACPI_PCIHP_BNMR_BASE), 0x08));
field = aml_field("BNMR", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("BNUM", 32));
aml_append(field, aml_named_field("PIDX", 32));
aml_append(scope, field);
aml_append(scope, aml_mutex("BLCK", 0));
method = aml_method("PCEJ", 2, AML_NOTSERIALIZED);
aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF));
aml_append(method, aml_store(aml_arg(0), aml_name("BNUM")));
aml_append(method,
aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("B0EJ")));
aml_append(method, aml_release(aml_name("BLCK")));
aml_append(method, aml_return(aml_int(0)));
aml_append(scope, method);
method = aml_method("AIDX", 2, AML_NOTSERIALIZED);
aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF));
aml_append(method, aml_store(aml_arg(0), aml_name("BNUM")));
aml_append(method,
aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("PIDX")));
aml_append(method, aml_store(aml_name("PIDX"), aml_local(0)));
aml_append(method, aml_release(aml_name("BLCK")));
aml_append(method, aml_return(aml_local(0)));
aml_append(scope, method);
aml_append(scope, aml_pci_pdsm());
aml_append(table, scope);
}
/* Reserve PCIHP resources */
void build_append_pcihp_resources(Aml *scope /* \\_SB.PCI0 */,
uint64_t io_addr, uint64_t io_len)
{
Aml *dev, *crs;
dev = aml_device("PHPR");
aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06")));
aml_append(dev,
aml_name_decl("_UID", aml_string("PCI Hotplug resources")));
/* device present, functioning, decoding, not shown in UI */
aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
crs = aml_resource_template();
aml_append(crs, aml_io(AML_DECODE16, io_addr, io_addr, 1, io_len));
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
}
bool build_append_notification_callback(Aml *parent_scope, const PCIBus *bus)
{
Aml *method;
PCIBus *sec;
QObject *bsel;
int nr_notifiers = 0;
GQueue *pcnt_bus_list = g_queue_new();
QLIST_FOREACH(sec, &bus->child, sibling) {
Aml *br_scope = aml_scope("S%.02X", sec->parent_dev->devfn);
if (pci_bus_is_root(sec)) {
continue;
}
nr_notifiers = nr_notifiers +
build_append_notification_callback(br_scope, sec);
/*
* add new child scope to parent
* and keep track of bus that have PCNT,
* bus list is used later to call children PCNTs from this level PCNT
*/
if (nr_notifiers) {
g_queue_push_tail(pcnt_bus_list, sec);
aml_append(parent_scope, br_scope);
}
}
/*
* Append PCNT method to notify about events on local and child buses.
* ps: hostbridge might not have hotplug (bsel) enabled but might have
* child bridges that do have bsel.
*/
method = aml_method("PCNT", 0, AML_NOTSERIALIZED);
/* If bus supports hotplug select it and notify about local events */
bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL);
if (bsel) {
uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel));
aml_append(method, aml_store(aml_int(bsel_val), aml_name("BNUM")));
aml_append(method, aml_call2("DVNT", aml_name("PCIU"),
aml_int(1))); /* Device Check */
aml_append(method, aml_call2("DVNT", aml_name("PCID"),
aml_int(3))); /* Eject Request */
nr_notifiers++;
}
/* Notify about child bus events in any case */
while ((sec = g_queue_pop_head(pcnt_bus_list))) {
aml_append(method, aml_name("^S%.02X.PCNT", sec->parent_dev->devfn));
}
aml_append(parent_scope, method);
qobject_unref(bsel);
g_queue_free(pcnt_bus_list);
return !!nr_notifiers;
}
static Aml *aml_pci_device_dsm(void)
{
Aml *method;
method = aml_method("_DSM", 4, AML_SERIALIZED);
{
Aml *params = aml_local(0);
Aml *pkg = aml_package(2);
aml_append(pkg, aml_int(0));
aml_append(pkg, aml_int(0));
aml_append(method, aml_store(pkg, params));
aml_append(method,
aml_store(aml_name("BSEL"), aml_index(params, aml_int(0))));
aml_append(method,
aml_store(aml_name("ASUN"), aml_index(params, aml_int(1))));
aml_append(method,
aml_return(aml_call5("PDSM", aml_arg(0), aml_arg(1),
aml_arg(2), aml_arg(3), params))
);
}
return method;
}
static Aml *aml_pci_static_endpoint_dsm(PCIDevice *pdev)
{
Aml *method;
g_assert(pdev->acpi_index != 0);
method = aml_method("_DSM", 4, AML_SERIALIZED);
{
Aml *params = aml_local(0);
Aml *pkg = aml_package(1);
aml_append(pkg, aml_int(pdev->acpi_index));
aml_append(method, aml_store(pkg, params));
aml_append(method,
aml_return(aml_call5("EDSM", aml_arg(0), aml_arg(1),
aml_arg(2), aml_arg(3), params))
);
}
return method;
}
static void build_append_pcihp_notify_entry(Aml *method, int slot)
{
Aml *if_ctx;
int32_t devfn = PCI_DEVFN(slot, 0);
if_ctx = aml_if(aml_and(aml_arg(0), aml_int(0x1U << slot), NULL));
aml_append(if_ctx, aml_notify(aml_name("S%.02X", devfn), aml_arg(1)));
aml_append(method, if_ctx);
}
static bool is_devfn_ignored_generic(const int devfn, const PCIBus *bus)
{
const PCIDevice *pdev = bus->devices[devfn];
if (PCI_FUNC(devfn)) {
if (IS_PCI_BRIDGE(pdev)) {
/*
* Ignore only hotplugged PCI bridges on !0 functions, but
* allow describing cold plugged bridges on all functions
*/
if (DEVICE(pdev)->hotplugged) {
return true;
}
}
}
return false;
}
static bool is_devfn_ignored_hotplug(const int devfn, const PCIBus *bus)
{
PCIDevice *pdev = bus->devices[devfn];
if (pdev) {
return is_devfn_ignored_generic(devfn, bus) ||
!DEVICE_GET_CLASS(pdev)->hotpluggable ||
/* Cold plugged bridges aren't themselves hot-pluggable */
(IS_PCI_BRIDGE(pdev) && !DEVICE(pdev)->hotplugged);
} else { /* non populated slots */
/*
* hotplug is supported only for non-multifunction device
* so generate device description only for function 0
*/
if (PCI_FUNC(devfn) ||
(pci_bus_is_express(bus) && PCI_SLOT(devfn) > 0)) {
return true;
}
}
return false;
}
void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus)
{
int devfn;
Aml *dev, *notify_method = NULL, *method;
QObject *bsel = object_property_get_qobject(OBJECT(bus),
ACPI_PCIHP_PROP_BSEL, NULL);
uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel));
qobject_unref(bsel);
aml_append(parent_scope, aml_name_decl("BSEL", aml_int(bsel_val)));
notify_method = aml_method("DVNT", 2, AML_NOTSERIALIZED);
for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
int slot = PCI_SLOT(devfn);
int adr = slot << 16 | PCI_FUNC(devfn);
if (is_devfn_ignored_hotplug(devfn, bus)) {
continue;
}
if (bus->devices[devfn]) {
dev = aml_scope("S%.02X", devfn);
} else {
dev = aml_device("S%.02X", devfn);
aml_append(dev, aml_name_decl("_ADR", aml_int(adr)));
}
/*
* Can't declare _SUN here for every device as it changes 'slot'
* enumeration order in linux kernel, so use another variable for it
*/
aml_append(dev, aml_name_decl("ASUN", aml_int(slot)));
aml_append(dev, aml_pci_device_dsm());
aml_append(dev, aml_name_decl("_SUN", aml_int(slot)));
/* add _EJ0 to make slot hotpluggable */
method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
aml_append(method,
aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN"))
);
aml_append(dev, method);
build_append_pcihp_notify_entry(notify_method, slot);
/* device descriptor has been composed, add it into parent context */
aml_append(parent_scope, dev);
}
aml_append(parent_scope, notify_method);
}
void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus)
{
int devfn;
Aml *dev;
for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
/* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */
int adr = PCI_SLOT(devfn) << 16 | PCI_FUNC(devfn);
PCIDevice *pdev = bus->devices[devfn];
if (!pdev || is_devfn_ignored_generic(devfn, bus)) {
continue;
}
/* start to compose PCI device descriptor */
dev = aml_device("S%.02X", devfn);
aml_append(dev, aml_name_decl("_ADR", aml_int(adr)));
call_dev_aml_func(DEVICE(bus->devices[devfn]), dev);
/* add _DSM if device has acpi-index set */
if (pdev->acpi_index &&
!object_property_get_bool(OBJECT(pdev), "hotpluggable",
&error_abort)) {
aml_append(dev, aml_pci_static_endpoint_dsm(pdev));
}
/* device descriptor has been composed, add it into parent context */
aml_append(parent_scope, dev);
}
}
const VMStateDescription vmstate_acpi_pcihp_pci_status = {
.name = "acpi_pcihp_pci_status",
.version_id = 1,

View file

@ -567,7 +567,8 @@ static void piix4_acpi_system_hot_add_init(MemoryRegion *parent,
if (s->acpi_pci_hotplug.use_acpi_hotplug_bridge ||
s->acpi_pci_hotplug.use_acpi_root_pci_hotplug) {
acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, bus, parent,
object_property_set_link(OBJECT(s), "bus", OBJECT(bus), &error_abort);
acpi_pcihp_init(OBJECT(s), &s->acpi_pci_hotplug, parent,
ACPI_PCIHP_ADDR_PIIX4);
qbus_set_hotplug_handler(BUS(pci_get_bus(PCI_DEVICE(s))), OBJECT(s));
}
@ -611,6 +612,8 @@ static const Property piix4_pm_properties[] = {
acpi_pci_hotplug.use_acpi_hotplug_bridge, true),
DEFINE_PROP_BOOL(ACPI_PM_PROP_ACPI_PCI_ROOTHP, PIIX4PMState,
acpi_pci_hotplug.use_acpi_root_pci_hotplug, true),
DEFINE_PROP_LINK("bus", PIIX4PMState, acpi_pci_hotplug.root,
TYPE_PCI_BUS, PCIBus *),
DEFINE_PROP_BOOL("memory-hotplug-support", PIIX4PMState,
acpi_memory_hotplug.is_enabled, true),
DEFINE_PROP_BOOL("smm-compat", PIIX4PMState, smm_compat, false),

View file

@ -34,6 +34,8 @@ config ARM_VIRT
select ACPI_HW_REDUCED
select ACPI_APEI
select ACPI_VIOT
select ACPI_PCIHP
select ACPI_PCI_BRIDGE
select VIRTIO_MEM_SUPPORTED
select ACPI_CXL
select ACPI_HMAT

View file

@ -20,7 +20,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/bswap.h"
#include "qemu/module.h"
#include "qemu/units.h"
#include "hw/boards.h"

View file

@ -15,6 +15,7 @@
#include "hw/arm/boot.h"
#include "hw/arm/linux-boot-if.h"
#include "cpu.h"
#include "exec/tswap.h"
#include "exec/target_page.h"
#include "system/kvm.h"
#include "system/tcg.h"
@ -29,6 +30,7 @@
#include "qemu/config-file.h"
#include "qemu/option.h"
#include "qemu/units.h"
#include "qemu/bswap.h"
/* Kernel boot protocol is specified in the kernel docs
* Documentation/arm/Booting and Documentation/arm64/booting.txt

View file

@ -24,7 +24,7 @@
#include "hw/qdev-clock.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qemu/bswap.h"
#include "exec/tswap.h"
#include "qemu/units.h"
#include "system/system.h"
#include "target/arm/cpu-qom.h"

View file

@ -34,6 +34,7 @@
#include "hw/core/cpu.h"
#include "hw/acpi/acpi-defs.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/pcihp.h"
#include "hw/nvram/fw_cfg_acpi.h"
#include "hw/acpi/bios-linker-loader.h"
#include "hw/acpi/aml-build.h"
@ -144,12 +145,21 @@ static void acpi_dsdt_add_pci(Aml *scope, const MemMapEntry *memmap,
int ecam_id = VIRT_ECAM_ID(vms->highmem_ecam);
bool cxl_present = false;
PCIBus *bus = vms->bus;
bool acpi_pcihp = false;
if (vms->acpi_dev) {
acpi_pcihp = object_property_get_bool(OBJECT(vms->acpi_dev),
ACPI_PM_PROP_ACPI_PCIHP_BRIDGE,
NULL);
}
struct GPEXConfig cfg = {
.mmio32 = memmap[VIRT_PCIE_MMIO],
.pio = memmap[VIRT_PCIE_PIO],
.ecam = memmap[ecam_id],
.irq = irq,
.bus = vms->bus,
.pci_native_hotplug = !acpi_pcihp,
};
if (vms->highmem_mmio) {
@ -897,6 +907,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
const int *irqmap = vms->irqmap;
AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = vms->oem_id,
.oem_table_id = vms->oem_table_id };
Aml *pci0_scope;
acpi_table_begin(&table, table_data);
dsdt = init_aml_allocator();
@ -950,6 +961,33 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
aml_append(dsdt, scope);
pci0_scope = aml_scope("\\_SB.PCI0");
aml_append(pci0_scope, build_pci_bridge_edsm());
build_append_pci_bus_devices(pci0_scope, vms->bus);
if (object_property_find(OBJECT(vms->bus), ACPI_PCIHP_PROP_BSEL)) {
build_append_pcihp_slots(pci0_scope, vms->bus);
}
if (vms->acpi_dev) {
bool acpi_pcihp;
acpi_pcihp = object_property_get_bool(OBJECT(vms->acpi_dev),
ACPI_PM_PROP_ACPI_PCIHP_BRIDGE,
NULL);
if (acpi_pcihp) {
build_acpi_pci_hotplug(dsdt, AML_SYSTEM_MEMORY,
memmap[VIRT_ACPI_PCIHP].base);
build_append_pcihp_resources(pci0_scope,
memmap[VIRT_ACPI_PCIHP].base,
memmap[VIRT_ACPI_PCIHP].size);
build_append_notification_callback(pci0_scope, vms->bus);
}
}
aml_append(dsdt, pci0_scope);
/* copy AML table into ACPI tables blob */
g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len);
@ -1023,7 +1061,10 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
}
acpi_add_table(table_offsets, tables_blob);
spcr_setup(tables_blob, tables->linker, vms);
if (ms->acpi_spcr_enabled) {
spcr_setup(tables_blob, tables->linker, vms);
}
acpi_add_table(table_offsets, tables_blob);
build_dbg2(tables_blob, tables->linker, vms);

View file

@ -76,6 +76,7 @@
#include "standard-headers/linux/input.h"
#include "hw/arm/smmuv3.h"
#include "hw/acpi/acpi.h"
#include "hw/acpi/pcihp.h"
#include "target/arm/cpu-qom.h"
#include "target/arm/internals.h"
#include "target/arm/multiprocessing.h"
@ -186,6 +187,7 @@ static const MemMapEntry base_memmap[] = {
[VIRT_NVDIMM_ACPI] = { 0x09090000, NVDIMM_ACPI_IO_LEN},
[VIRT_PVTIME] = { 0x090a0000, 0x00010000 },
[VIRT_SECURE_GPIO] = { 0x090b0000, 0x00001000 },
[VIRT_ACPI_PCIHP] = { 0x090c0000, ACPI_PCIHP_SIZE },
[VIRT_MMIO] = { 0x0a000000, 0x00000200 },
/* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */
[VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 },
@ -686,8 +688,10 @@ static inline DeviceState *create_acpi_ged(VirtMachineState *vms)
{
DeviceState *dev;
MachineState *ms = MACHINE(vms);
SysBusDevice *sbdev;
int irq = vms->irqmap[VIRT_ACPI_GED];
uint32_t event = ACPI_GED_PWR_DOWN_EVT;
bool acpi_pcihp;
if (ms->ram_slots) {
event |= ACPI_GED_MEM_HOTPLUG_EVT;
@ -699,11 +703,26 @@ static inline DeviceState *create_acpi_ged(VirtMachineState *vms)
dev = qdev_new(TYPE_ACPI_GED);
qdev_prop_set_uint32(dev, "ged-event", event);
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
object_property_set_link(OBJECT(dev), "bus", OBJECT(vms->bus), &error_abort);
sbdev = SYS_BUS_DEVICE(dev);
sysbus_realize_and_unref(sbdev, &error_fatal);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_ACPI_GED].base);
sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, vms->memmap[VIRT_PCDIMM_ACPI].base);
sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(vms->gic, irq));
sysbus_mmio_map_name(sbdev, TYPE_ACPI_GED, vms->memmap[VIRT_ACPI_GED].base);
sysbus_mmio_map_name(sbdev, ACPI_MEMHP_REGION_NAME,
vms->memmap[VIRT_PCDIMM_ACPI].base);
acpi_pcihp = object_property_get_bool(OBJECT(dev),
ACPI_PM_PROP_ACPI_PCIHP_BRIDGE, NULL);
if (acpi_pcihp) {
int pcihp_region_index;
pcihp_region_index = sysbus_mmio_map_name(sbdev, ACPI_PCIHP_REGION_NAME,
vms->memmap[VIRT_ACPI_PCIHP].base);
assert(pcihp_region_index >= 0);
}
sysbus_connect_irq(sbdev, 0, qdev_get_gpio_in(vms->gic, irq));
return dev;
}

View file

@ -33,7 +33,6 @@
#include "qemu/osdep.h"
#include "system/block-backend.h"
#include "qapi/qapi-types-block.h"
#include "qemu/bswap.h"
#include "hw/block/block.h"
#include "trace.h"

View file

@ -13,7 +13,9 @@ system_ss.add(when: 'CONFIG_SSI_M25P80', if_true: files('m25p80_sfdp.c'))
system_ss.add(when: 'CONFIG_SWIM', if_true: files('swim.c'))
system_ss.add(when: 'CONFIG_XEN_BUS', if_true: files('xen-block.c'))
specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c', 'virtio-blk-common.c'))
specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c', 'virtio-blk-common.c'))
specific_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk.c'))
system_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio-blk-common.c'))
specific_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk.c'))
system_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('virtio-blk-common.c'))
subdir('dataplane')

View file

@ -210,6 +210,7 @@ static int vhost_user_blk_stop(VirtIODevice *vdev)
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
int ret;
bool force_stop = false;
if (!s->started_vu) {
return 0;
@ -220,7 +221,11 @@ static int vhost_user_blk_stop(VirtIODevice *vdev)
return 0;
}
ret = vhost_dev_stop(&s->dev, vdev, true);
force_stop = s->skip_get_vring_base_on_force_shutdown &&
qemu_force_shutdown_requested();
ret = force_stop ? vhost_dev_force_stop(&s->dev, vdev, true) :
vhost_dev_stop(&s->dev, vdev, true);
if (k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false) < 0) {
error_report("vhost guest notifier cleanup failed: %d", ret);
@ -584,6 +589,8 @@ static const Property vhost_user_blk_properties[] = {
VIRTIO_BLK_F_DISCARD, true),
DEFINE_PROP_BIT64("write-zeroes", VHostUserBlk, parent_obj.host_features,
VIRTIO_BLK_F_WRITE_ZEROES, true),
DEFINE_PROP_BOOL("skip-get-vring-base-on-force-shutdown", VHostUserBlk,
skip_get_vring_base_on_force_shutdown, false),
};
static void vhost_user_blk_class_init(ObjectClass *klass, const void *data)

View file

@ -29,7 +29,6 @@
#include "qemu/timer.h"
#include "qemu/error-report.h"
#include "system/address-spaces.h"
#include "exec/tswap.h"
#include "system/dma.h"
#include "system/runstate.h"
#include "trace.h"

View file

@ -24,7 +24,7 @@
#include "exec/cputlb.h"
#include "system/memory.h"
#include "exec/tb-flush.h"
#include "exec/tswap.h"
#include "qemu/target-info.h"
#include "hw/qdev-core.h"
#include "hw/qdev-properties.h"
#include "hw/core/sysemu-cpu-ops.h"

View file

@ -19,7 +19,7 @@
#include "qapi/qobject-input-visitor.h"
#include "qapi/type-helpers.h"
#include "qemu/uuid.h"
#include "qemu/target-info.h"
#include "qemu/target-info-qapi.h"
#include "qom/qom-qobject.h"
#include "system/hostmem.h"
#include "system/hw_accel.h"
@ -37,8 +37,7 @@ CpuInfoFastList *qmp_query_cpus_fast(Error **errp)
MachineState *ms = MACHINE(qdev_get_machine());
MachineClass *mc = MACHINE_GET_CLASS(ms);
CpuInfoFastList *head = NULL, **tail = &head;
SysEmuTarget target = qapi_enum_parse(&SysEmuTarget_lookup, target_name(),
-1, &error_abort);
SysEmuTarget target = target_arch();
CPUState *cpu;
CPU_FOREACH(cpu) {
@ -139,8 +138,7 @@ QemuTargetInfo *qmp_query_target(Error **errp)
{
QemuTargetInfo *info = g_malloc0(sizeof(*info));
info->arch = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), -1,
&error_abort);
info->arch = target_arch();
return info;
}

View file

@ -577,6 +577,20 @@ static void machine_set_nvdimm(Object *obj, bool value, Error **errp)
ms->nvdimms_state->is_enabled = value;
}
static bool machine_get_spcr(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
return ms->acpi_spcr_enabled;
}
static void machine_set_spcr(Object *obj, bool value, Error **errp)
{
MachineState *ms = MACHINE(obj);
ms->acpi_spcr_enabled = value;
}
static bool machine_get_hmat(Object *obj, Error **errp)
{
MachineState *ms = MACHINE(obj);
@ -1281,6 +1295,14 @@ static void machine_initfn(Object *obj)
"Table (HMAT)");
}
/* SPCR */
ms->acpi_spcr_enabled = true;
object_property_add_bool(obj, "spcr", machine_get_spcr, machine_set_spcr);
object_property_set_description(obj, "spcr",
"Set on/off to enable/disable "
"ACPI Serial Port Console Redirection "
"Table (spcr)");
/* default to mc->default_cpus */
ms->smp.cpus = mc->default_cpus;
ms->smp.max_cpus = mc->default_cpus;

View file

@ -2,6 +2,7 @@
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qapi/qapi-types-misc.h"
#include "qapi/qapi-visit-common.h"
#include "qobject/qlist.h"
#include "qemu/ctype.h"
#include "qemu/error-report.h"
@ -180,7 +181,8 @@ const PropertyInfo qdev_prop_bit = {
static uint64_t qdev_get_prop_mask64(const Property *prop)
{
assert(prop->info == &qdev_prop_bit64);
assert(prop->info == &qdev_prop_bit64 ||
prop->info == &qdev_prop_on_off_auto_bit64);
return 0x1ull << prop->bitnr;
}
@ -225,6 +227,69 @@ const PropertyInfo qdev_prop_bit64 = {
.set_default_value = set_default_value_bool,
};
static void prop_get_on_off_auto_bit64(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
Property *prop = opaque;
OnOffAutoBit64 *p = object_field_prop_ptr(obj, prop);
OnOffAuto value;
uint64_t mask = qdev_get_prop_mask64(prop);
if (p->auto_bits & mask) {
value = ON_OFF_AUTO_AUTO;
} else if (p->on_bits & mask) {
value = ON_OFF_AUTO_ON;
} else {
value = ON_OFF_AUTO_OFF;
}
visit_type_OnOffAuto(v, name, &value, errp);
}
static void prop_set_on_off_auto_bit64(Object *obj, Visitor *v,
const char *name, void *opaque,
Error **errp)
{
Property *prop = opaque;
OnOffAutoBit64 *p = object_field_prop_ptr(obj, prop);
OnOffAuto value;
uint64_t mask = qdev_get_prop_mask64(prop);
if (!visit_type_OnOffAuto(v, name, &value, errp)) {
return;
}
switch (value) {
case ON_OFF_AUTO_AUTO:
p->on_bits &= ~mask;
p->auto_bits |= mask;
break;
case ON_OFF_AUTO_ON:
p->on_bits |= mask;
p->auto_bits &= ~mask;
break;
case ON_OFF_AUTO_OFF:
p->on_bits &= ~mask;
p->auto_bits &= ~mask;
break;
case ON_OFF_AUTO__MAX:
g_assert_not_reached();
}
}
const PropertyInfo qdev_prop_on_off_auto_bit64 = {
.type = "OnOffAuto",
.description = "on/off/auto",
.enum_table = &OnOffAuto_lookup,
.get = prop_get_on_off_auto_bit64,
.set = prop_set_on_off_auto_bit64,
.set_default_value = qdev_propinfo_set_default_value_enum,
};
/* --- bool --- */
static void get_bool(Object *obj, Visitor *v, const char *name, void *opaque,

View file

@ -151,6 +151,17 @@ void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr)
sysbus_mmio_map_common(dev, n, addr, false, 0);
}
int sysbus_mmio_map_name(SysBusDevice *dev, const char *name, hwaddr addr)
{
for (int i = 0; i < dev->num_mmio; i++) {
if (!strcmp(dev->mmio[i].memory->name, name)) {
sysbus_mmio_map(dev, i, addr);
return i;
}
}
return -1;
}
void sysbus_mmio_map_overlap(SysBusDevice *dev, int n, hwaddr addr,
int priority)
{

View file

@ -8,8 +8,6 @@
*/
#include "qemu/osdep.h"
#include "qemu/bswap.h"
#include "qemu/error-report.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
@ -260,3 +258,41 @@ void cxl_event_irq_assert(CXLType3Dev *ct3d)
}
}
}
void cxl_create_dc_event_records_for_extents(CXLType3Dev *ct3d,
CXLDCEventType type,
CXLDCExtentRaw extents[],
uint32_t ext_count)
{
CXLEventDynamicCapacity event_rec = {};
int i;
cxl_assign_event_header(&event_rec.hdr,
&dynamic_capacity_uuid,
(1 << CXL_EVENT_TYPE_INFO),
sizeof(event_rec),
cxl_device_get_timestamp(&ct3d->cxl_dstate));
event_rec.type = type;
event_rec.validity_flags = 1;
event_rec.host_id = 0;
event_rec.updated_region_id = 0;
event_rec.extents_avail = CXL_NUM_EXTENTS_SUPPORTED -
ct3d->dc.total_extent_count;
for (i = 0; i < ext_count; i++) {
memcpy(&event_rec.dynamic_capacity_extent,
&extents[i],
sizeof(CXLDCExtentRaw));
event_rec.flags = 0;
if (i < ext_count - 1) {
/* Set "More" flag */
event_rec.flags |= BIT(0);
}
if (cxl_event_insert(&ct3d->cxl_dstate,
CXL_EVENT_TYPE_DYNAMIC_CAP,
(CXLEventRecordRaw *)&event_rec)) {
cxl_event_irq_assert(ct3d);
}
}
}

View file

@ -18,15 +18,16 @@
#include "hw/pci/pci.h"
#include "hw/pci-bridge/cxl_upstream_port.h"
#include "qemu/cutils.h"
#include "qemu/host-utils.h"
#include "qemu/log.h"
#include "qemu/units.h"
#include "qemu/uuid.h"
#include "system/hostmem.h"
#include "qemu/range.h"
#include "qapi/qapi-types-cxl.h"
#define CXL_CAPACITY_MULTIPLIER (256 * MiB)
#define CXL_DC_EVENT_LOG_SIZE 8
#define CXL_NUM_EXTENTS_SUPPORTED 512
#define CXL_NUM_TAGS_SUPPORTED 0
#define CXL_ALERTS_LIFE_USED_WARN_THRESH (1 << 0)
#define CXL_ALERTS_OVER_TEMP_WARN_THRESH (1 << 1)
@ -117,6 +118,13 @@ enum {
#define GET_PHYSICAL_PORT_STATE 0x1
TUNNEL = 0x53,
#define MANAGEMENT_COMMAND 0x0
FMAPI_DCD_MGMT = 0x56,
#define GET_DCD_INFO 0x0
#define GET_HOST_DC_REGION_CONFIG 0x1
#define SET_DC_REGION_CONFIG 0x2
#define GET_DC_REGION_EXTENT_LIST 0x3
#define INITIATE_DC_ADD 0x4
#define INITIATE_DC_RELEASE 0x5
};
/* CCI Message Format CXL r3.1 Figure 7-19 */
@ -2750,7 +2758,7 @@ static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd,
uint16_t out_pl_len, size;
CXLDCExtent *ent;
if (start_extent_id > ct3d->dc.total_extent_count) {
if (start_extent_id > ct3d->dc.nr_extents_accepted) {
return CXL_MBOX_INVALID_INPUT;
}
@ -2761,7 +2769,7 @@ static CXLRetCode cmd_dcd_get_dyn_cap_ext_list(const struct cxl_cmd *cmd,
out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
stl_le_p(&out->count, record_count);
stl_le_p(&out->total_extents, ct3d->dc.total_extent_count);
stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted);
stl_le_p(&out->generation_num, ct3d->dc.ext_list_gen_seq);
if (record_count > 0) {
@ -2883,16 +2891,20 @@ void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list,
QTAILQ_INSERT_TAIL(list, group, node);
}
void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list)
uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list)
{
CXLDCExtent *ent, *ent_next;
CXLDCExtentGroup *group = QTAILQ_FIRST(list);
uint32_t extents_deleted = 0;
QTAILQ_REMOVE(list, group, node);
QTAILQ_FOREACH_SAFE(ent, &group->list, node, ent_next) {
cxl_remove_extent_from_extent_list(&group->list, ent);
extents_deleted++;
}
g_free(group);
return extents_deleted;
}
/*
@ -3011,7 +3023,7 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
CXLUpdateDCExtentListInPl *in = (void *)payload_in;
CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
CXLDCExtentList *extent_list = &ct3d->dc.extents;
uint32_t i;
uint32_t i, num;
uint64_t dpa, len;
CXLRetCode ret;
@ -3020,7 +3032,8 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
}
if (in->num_entries_updated == 0) {
cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
ct3d->dc.total_extent_count -= num;
return CXL_MBOX_SUCCESS;
}
@ -3051,10 +3064,12 @@ static CXLRetCode cmd_dcd_add_dyn_cap_rsp(const struct cxl_cmd *cmd,
cxl_insert_extent_to_extent_list(extent_list, dpa, len, NULL, 0);
ct3d->dc.total_extent_count += 1;
ct3d->dc.nr_extents_accepted += 1;
ct3_set_region_block_backed(ct3d, dpa, len);
}
/* Remove the first extent group in the pending list */
cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
num = cxl_extent_group_list_delete_front(&ct3d->dc.extents_pending);
ct3d->dc.total_extent_count -= num;
return CXL_MBOX_SUCCESS;
}
@ -3160,7 +3175,7 @@ free_and_exit:
}
*updated_list_size = 0;
} else {
*updated_list_size = ct3d->dc.total_extent_count + cnt_delta;
*updated_list_size = ct3d->dc.nr_extents_accepted + cnt_delta;
}
return ret;
@ -3222,11 +3237,495 @@ static CXLRetCode cmd_dcd_release_dyn_cap(const struct cxl_cmd *cmd,
ct3_set_region_block_backed(ct3d, ent->start_dpa, ent->len);
cxl_remove_extent_from_extent_list(&updated_list, ent);
}
ct3d->dc.total_extent_count = updated_list_size;
ct3d->dc.total_extent_count += (updated_list_size -
ct3d->dc.nr_extents_accepted);
ct3d->dc.nr_extents_accepted = updated_list_size;
return CXL_MBOX_SUCCESS;
}
/* CXL r3.2 section 7.6.7.6.1: Get DCD Info (Opcode 5600h) */
static CXLRetCode cmd_fm_get_dcd_info(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
uint8_t *payload_out,
size_t *len_out,
CXLCCI *cci)
{
struct {
uint8_t num_hosts;
uint8_t num_regions_supported;
uint8_t rsvd1[2];
uint16_t supported_add_sel_policy_bitmask;
uint8_t rsvd2[2];
uint16_t supported_removal_policy_bitmask;
uint8_t sanitize_on_release_bitmask;
uint8_t rsvd3;
uint64_t total_dynamic_capacity;
uint64_t region_blk_size_bitmasks[8];
} QEMU_PACKED *out = (void *)payload_out;
CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
CXLDCRegion *region;
int i;
out->num_hosts = 1;
out->num_regions_supported = ct3d->dc.num_regions;
stw_le_p(&out->supported_add_sel_policy_bitmask,
BIT(CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE));
stw_le_p(&out->supported_removal_policy_bitmask,
BIT(CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE));
out->sanitize_on_release_bitmask = 0;
stq_le_p(&out->total_dynamic_capacity,
ct3d->dc.total_capacity / CXL_CAPACITY_MULTIPLIER);
for (i = 0; i < ct3d->dc.num_regions; i++) {
region = &ct3d->dc.regions[i];
memcpy(&out->region_blk_size_bitmasks[i],
&region->supported_blk_size_bitmask,
sizeof(out->region_blk_size_bitmasks[i]));
}
*len_out = sizeof(*out);
return CXL_MBOX_SUCCESS;
}
static void build_dsmas_flags(uint8_t *flags, CXLDCRegion *region)
{
*flags = 0;
if (region->nonvolatile) {
*flags |= BIT(CXL_DSMAS_FLAGS_NONVOLATILE);
}
if (region->sharable) {
*flags |= BIT(CXL_DSMAS_FLAGS_SHARABLE);
}
if (region->hw_managed_coherency) {
*flags |= BIT(CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY);
}
if (region->ic_specific_dc_management) {
*flags |= BIT(CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT);
}
if (region->rdonly) {
*flags |= BIT(CXL_DSMAS_FLAGS_RDONLY);
}
}
/*
* CXL r3.2 section 7.6.7.6.2:
* Get Host DC Region Configuration (Opcode 5601h)
*/
static CXLRetCode cmd_fm_get_host_dc_region_config(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
uint8_t *payload_out,
size_t *len_out,
CXLCCI *cci)
{
struct {
uint16_t host_id;
uint8_t region_cnt;
uint8_t start_rid;
} QEMU_PACKED *in = (void *)payload_in;
struct {
uint16_t host_id;
uint8_t num_regions;
uint8_t regions_returned;
struct {
uint64_t base;
uint64_t decode_len;
uint64_t region_len;
uint64_t block_size;
uint8_t flags;
uint8_t rsvd1[3];
uint8_t sanitize;
uint8_t rsvd2[3];
} QEMU_PACKED records[];
} QEMU_PACKED *out = (void *)payload_out;
struct {
uint32_t num_extents_supported;
uint32_t num_extents_available;
uint32_t num_tags_supported;
uint32_t num_tags_available;
} QEMU_PACKED *extra_out;
CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
uint16_t record_count, out_pl_len, i;
if (in->start_rid >= ct3d->dc.num_regions) {
return CXL_MBOX_INVALID_INPUT;
}
record_count = MIN(ct3d->dc.num_regions - in->start_rid, in->region_cnt);
out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
extra_out = (void *)out + out_pl_len;
out_pl_len += sizeof(*extra_out);
assert(out_pl_len <= CXL_MAILBOX_MAX_PAYLOAD_SIZE);
stw_le_p(&out->host_id, 0);
out->num_regions = ct3d->dc.num_regions;
out->regions_returned = record_count;
for (i = 0; i < record_count; i++) {
stq_le_p(&out->records[i].base,
ct3d->dc.regions[in->start_rid + i].base);
stq_le_p(&out->records[i].decode_len,
ct3d->dc.regions[in->start_rid + i].decode_len /
CXL_CAPACITY_MULTIPLIER);
stq_le_p(&out->records[i].region_len,
ct3d->dc.regions[in->start_rid + i].len);
stq_le_p(&out->records[i].block_size,
ct3d->dc.regions[in->start_rid + i].block_size);
build_dsmas_flags(&out->records[i].flags,
&ct3d->dc.regions[in->start_rid + i]);
/* Sanitize is bit 0 of flags. */
out->records[i].sanitize =
ct3d->dc.regions[in->start_rid + i].flags & BIT(0);
}
stl_le_p(&extra_out->num_extents_supported, CXL_NUM_EXTENTS_SUPPORTED);
stl_le_p(&extra_out->num_extents_available, CXL_NUM_EXTENTS_SUPPORTED -
ct3d->dc.total_extent_count);
stl_le_p(&extra_out->num_tags_supported, CXL_NUM_TAGS_SUPPORTED);
stl_le_p(&extra_out->num_tags_available, CXL_NUM_TAGS_SUPPORTED);
*len_out = out_pl_len;
return CXL_MBOX_SUCCESS;
}
/* CXL r3.2 section 7.6.7.6.3: Set Host DC Region Configuration (Opcode 5602) */
static CXLRetCode cmd_fm_set_dc_region_config(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
uint8_t *payload_out,
size_t *len_out,
CXLCCI *cci)
{
struct {
uint8_t reg_id;
uint8_t rsvd[3];
uint64_t block_sz;
uint8_t flags;
uint8_t rsvd2[3];
} QEMU_PACKED *in = (void *)payload_in;
CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
CXLEventDynamicCapacity dcEvent = {};
CXLDCRegion *region = &ct3d->dc.regions[in->reg_id];
/*
* CXL r3.2 7.6.7.6.3: Set DC Region Configuration
* This command shall fail with Unsupported when the Sanitize on Release
* field does not match the regions configuration... and the device
* does not support reconfiguration of the Sanitize on Release setting.
*
* Currently not reconfigurable, so always fail if sanitize bit (bit 0)
* doesn't match.
*/
if ((in->flags & 0x1) != (region->flags & 0x1)) {
return CXL_MBOX_UNSUPPORTED;
}
if (in->reg_id >= DCD_MAX_NUM_REGION) {
return CXL_MBOX_UNSUPPORTED;
}
/* Check that no extents are in the region being reconfigured */
if (!bitmap_empty(region->blk_bitmap, region->len / region->block_size)) {
return CXL_MBOX_UNSUPPORTED;
}
/* Check that new block size is supported */
if (!is_power_of_2(in->block_sz) ||
!(in->block_sz & region->supported_blk_size_bitmask)) {
return CXL_MBOX_INVALID_INPUT;
}
/* Return success if new block size == current block size */
if (in->block_sz == region->block_size) {
return CXL_MBOX_SUCCESS;
}
/* Free bitmap and create new one for new block size. */
qemu_mutex_lock(&region->bitmap_lock);
g_free(region->blk_bitmap);
region->blk_bitmap = bitmap_new(region->len / in->block_sz);
qemu_mutex_unlock(&region->bitmap_lock);
region->block_size = in->block_sz;
/* Create event record and insert into event log */
cxl_assign_event_header(&dcEvent.hdr,
&dynamic_capacity_uuid,
(1 << CXL_EVENT_TYPE_INFO),
sizeof(dcEvent),
cxl_device_get_timestamp(&ct3d->cxl_dstate));
dcEvent.type = DC_EVENT_REGION_CONFIG_UPDATED;
dcEvent.validity_flags = 1;
dcEvent.host_id = 0;
dcEvent.updated_region_id = in->reg_id;
if (cxl_event_insert(&ct3d->cxl_dstate,
CXL_EVENT_TYPE_DYNAMIC_CAP,
(CXLEventRecordRaw *)&dcEvent)) {
cxl_event_irq_assert(ct3d);
}
return CXL_MBOX_SUCCESS;
}
/* CXL r3.2 section 7.6.7.6.4: Get DC Region Extent Lists (Opcode 5603h) */
static CXLRetCode cmd_fm_get_dc_region_extent_list(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
uint8_t *payload_out,
size_t *len_out,
CXLCCI *cci)
{
struct {
uint16_t host_id;
uint8_t rsvd[2];
uint32_t extent_cnt;
uint32_t start_extent_id;
} QEMU_PACKED *in = (void *)payload_in;
struct {
uint16_t host_id;
uint8_t rsvd[2];
uint32_t start_extent_id;
uint32_t extents_returned;
uint32_t total_extents;
uint32_t list_generation_num;
uint8_t rsvd2[4];
CXLDCExtentRaw records[];
} QEMU_PACKED *out = (void *)payload_out;
QEMU_BUILD_BUG_ON(sizeof(*in) != 0xc);
CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
CXLDCExtent *ent;
CXLDCExtentRaw *out_rec;
uint16_t record_count = 0, record_done = 0, i = 0;
uint16_t out_pl_len, max_size;
if (in->host_id != 0) {
return CXL_MBOX_INVALID_INPUT;
}
if (in->start_extent_id > ct3d->dc.nr_extents_accepted) {
return CXL_MBOX_INVALID_INPUT;
}
record_count = MIN(in->extent_cnt,
ct3d->dc.nr_extents_accepted - in->start_extent_id);
max_size = CXL_MAILBOX_MAX_PAYLOAD_SIZE - sizeof(*out);
record_count = MIN(record_count, max_size / sizeof(out->records[0]));
out_pl_len = sizeof(*out) + record_count * sizeof(out->records[0]);
stw_le_p(&out->host_id, in->host_id);
stl_le_p(&out->start_extent_id, in->start_extent_id);
stl_le_p(&out->extents_returned, record_count);
stl_le_p(&out->total_extents, ct3d->dc.nr_extents_accepted);
stl_le_p(&out->list_generation_num, ct3d->dc.ext_list_gen_seq);
if (record_count > 0) {
QTAILQ_FOREACH(ent, &ct3d->dc.extents, node) {
if (i++ < in->start_extent_id) {
continue;
}
out_rec = &out->records[record_done];
stq_le_p(&out_rec->start_dpa, ent->start_dpa);
stq_le_p(&out_rec->len, ent->len);
memcpy(&out_rec->tag, ent->tag, 0x10);
stw_le_p(&out_rec->shared_seq, ent->shared_seq);
record_done++;
if (record_done == record_count) {
break;
}
}
}
*len_out = out_pl_len;
return CXL_MBOX_SUCCESS;
}
/*
* Helper function to convert CXLDCExtentRaw to CXLUpdateDCExtentListInPl
* in order to reuse cxl_detect_malformed_extent_list() function which accepts
* CXLUpdateDCExtentListInPl as a parameter.
*/
static void convert_raw_extents(CXLDCExtentRaw raw_extents[],
CXLUpdateDCExtentListInPl *extent_list,
int count)
{
int i;
extent_list->num_entries_updated = count;
for (i = 0; i < count; i++) {
extent_list->updated_entries[i].start_dpa = raw_extents[i].start_dpa;
extent_list->updated_entries[i].len = raw_extents[i].len;
}
}
/* CXL r3.2 Section 7.6.7.6.5: Initiate Dynamic Capacity Add (Opcode 5604h) */
static CXLRetCode cmd_fm_initiate_dc_add(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
uint8_t *payload_out,
size_t *len_out,
CXLCCI *cci)
{
struct {
uint16_t host_id;
uint8_t selection_policy;
uint8_t reg_num;
uint64_t length;
uint8_t tag[0x10];
uint32_t ext_count;
CXLDCExtentRaw extents[];
} QEMU_PACKED *in = (void *)payload_in;
CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
int i, rc;
switch (in->selection_policy) {
case CXL_EXTENT_SELECTION_POLICY_PRESCRIPTIVE: {
/* Adding extents exceeds device's extent tracking ability. */
if (in->ext_count + ct3d->dc.total_extent_count >
CXL_NUM_EXTENTS_SUPPORTED) {
return CXL_MBOX_RESOURCES_EXHAUSTED;
}
g_autofree CXLUpdateDCExtentListInPl *list =
g_malloc0(sizeof(*list) +
in->ext_count * sizeof(*list->updated_entries));
convert_raw_extents(in->extents, list, in->ext_count);
rc = cxl_detect_malformed_extent_list(ct3d, list);
for (i = 0; i < in->ext_count; i++) {
CXLDCExtentRaw *ext = &in->extents[i];
/* Check requested extents do not overlap with pending ones. */
if (cxl_extent_groups_overlaps_dpa_range(&ct3d->dc.extents_pending,
ext->start_dpa,
ext->len)) {
return CXL_MBOX_INVALID_EXTENT_LIST;
}
/* Check requested extents do not overlap with existing ones. */
if (cxl_extents_overlaps_dpa_range(&ct3d->dc.extents,
ext->start_dpa,
ext->len)) {
return CXL_MBOX_INVALID_EXTENT_LIST;
}
}
if (rc) {
return rc;
}
CXLDCExtentGroup *group = NULL;
for (i = 0; i < in->ext_count; i++) {
CXLDCExtentRaw *ext = &in->extents[i];
group = cxl_insert_extent_to_extent_group(group, ext->start_dpa,
ext->len, ext->tag,
ext->shared_seq);
}
cxl_extent_group_list_insert_tail(&ct3d->dc.extents_pending, group);
ct3d->dc.total_extent_count += in->ext_count;
cxl_create_dc_event_records_for_extents(ct3d,
DC_EVENT_ADD_CAPACITY,
in->extents,
in->ext_count);
return CXL_MBOX_SUCCESS;
}
default: {
qemu_log_mask(LOG_UNIMP,
"CXL extent selection policy not supported.\n");
return CXL_MBOX_INVALID_INPUT;
}
}
}
#define CXL_EXTENT_REMOVAL_POLICY_MASK 0x0F
#define CXL_FORCED_REMOVAL_MASK (1 << 4)
/*
* CXL r3.2 Section 7.6.7.6.6:
* Initiate Dynamic Capacity Release (Opcode 5605h)
*/
static CXLRetCode cmd_fm_initiate_dc_release(const struct cxl_cmd *cmd,
uint8_t *payload_in,
size_t len_in,
uint8_t *payload_out,
size_t *len_out,
CXLCCI *cci)
{
struct {
uint16_t host_id;
uint8_t flags;
uint8_t reg_num;
uint64_t length;
uint8_t tag[0x10];
uint32_t ext_count;
CXLDCExtentRaw extents[];
} QEMU_PACKED *in = (void *)payload_in;
CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
int i, rc;
switch (in->flags & CXL_EXTENT_REMOVAL_POLICY_MASK) {
case CXL_EXTENT_REMOVAL_POLICY_PRESCRIPTIVE: {
CXLDCExtentList updated_list;
uint32_t updated_list_size;
g_autofree CXLUpdateDCExtentListInPl *list =
g_malloc0(sizeof(*list) +
in->ext_count * sizeof(*list->updated_entries));
convert_raw_extents(in->extents, list, in->ext_count);
rc = cxl_detect_malformed_extent_list(ct3d, list);
if (rc) {
return rc;
}
/*
* Fail with Invalid PA if an extent is pending and Forced Removal
* flag not set.
*/
if (!(in->flags & CXL_FORCED_REMOVAL_MASK)) {
for (i = 0; i < in->ext_count; i++) {
CXLDCExtentRaw ext = in->extents[i];
/*
* Check requested extents don't overlap with pending
* extents.
*/
if (cxl_extent_groups_overlaps_dpa_range(
&ct3d->dc.extents_pending,
ext.start_dpa,
ext.len)) {
return CXL_MBOX_INVALID_PA;
}
}
}
rc = cxl_dc_extent_release_dry_run(ct3d,
list,
&updated_list,
&updated_list_size);
if (rc) {
return rc;
}
cxl_create_dc_event_records_for_extents(ct3d,
DC_EVENT_RELEASE_CAPACITY,
in->extents,
in->ext_count);
return CXL_MBOX_SUCCESS;
}
default: {
qemu_log_mask(LOG_UNIMP,
"CXL extent removal policy not supported.\n");
return CXL_MBOX_INVALID_INPUT;
}
}
}
static const struct cxl_cmd cxl_cmd_set[256][256] = {
[INFOSTAT][BACKGROUND_OPERATION_ABORT] = { "BACKGROUND_OPERATION_ABORT",
cmd_infostat_bg_op_abort, 0, 0 },
@ -3340,6 +3839,36 @@ static const struct cxl_cmd cxl_cmd_set_sw[256][256] = {
cmd_tunnel_management_cmd, ~0, 0 },
};
static const struct cxl_cmd cxl_cmd_set_fm_dcd[256][256] = {
[FMAPI_DCD_MGMT][GET_DCD_INFO] = { "GET_DCD_INFO",
cmd_fm_get_dcd_info, 0, 0 },
[FMAPI_DCD_MGMT][GET_HOST_DC_REGION_CONFIG] = { "GET_HOST_DC_REGION_CONFIG",
cmd_fm_get_host_dc_region_config, 4, 0 },
[FMAPI_DCD_MGMT][SET_DC_REGION_CONFIG] = { "SET_DC_REGION_CONFIG",
cmd_fm_set_dc_region_config, 16,
(CXL_MBOX_CONFIG_CHANGE_COLD_RESET |
CXL_MBOX_CONFIG_CHANGE_CONV_RESET |
CXL_MBOX_CONFIG_CHANGE_CXL_RESET |
CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
CXL_MBOX_IMMEDIATE_DATA_CHANGE) },
[FMAPI_DCD_MGMT][GET_DC_REGION_EXTENT_LIST] = { "GET_DC_REGION_EXTENT_LIST",
cmd_fm_get_dc_region_extent_list, 12, 0 },
[FMAPI_DCD_MGMT][INITIATE_DC_ADD] = { "INIT_DC_ADD",
cmd_fm_initiate_dc_add, ~0,
(CXL_MBOX_CONFIG_CHANGE_COLD_RESET |
CXL_MBOX_CONFIG_CHANGE_CONV_RESET |
CXL_MBOX_CONFIG_CHANGE_CXL_RESET |
CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
CXL_MBOX_IMMEDIATE_DATA_CHANGE) },
[FMAPI_DCD_MGMT][INITIATE_DC_RELEASE] = { "INIT_DC_RELEASE",
cmd_fm_initiate_dc_release, ~0,
(CXL_MBOX_CONFIG_CHANGE_COLD_RESET |
CXL_MBOX_CONFIG_CHANGE_CONV_RESET |
CXL_MBOX_CONFIG_CHANGE_CXL_RESET |
CXL_MBOX_IMMEDIATE_CONFIG_CHANGE |
CXL_MBOX_IMMEDIATE_DATA_CHANGE) },
};
/*
* While the command is executing in the background, the device should
* update the percentage complete in the Background Command Status Register
@ -3614,7 +4143,12 @@ void cxl_initialize_t3_fm_owned_ld_mctpcci(CXLCCI *cci, DeviceState *d,
DeviceState *intf,
size_t payload_max)
{
CXLType3Dev *ct3d = CXL_TYPE3(d);
cxl_copy_cci_commands(cci, cxl_cmd_set_t3_fm_owned_ld_mctp);
if (ct3d->dc.num_regions) {
cxl_copy_cci_commands(cci, cxl_cmd_set_fm_dcd);
}
cci->d = d;
cci->intf = intf;
cxl_init_cci(cci, payload_max);

View file

@ -12,6 +12,7 @@
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/units.h"
#include "qemu/bswap.h"
#include "qapi/error.h"
#include "hw/sysbus.h"
#include "hw/loader.h"

View file

@ -22,6 +22,7 @@
#include "vga-access.h"
#include "hw/qdev-properties.h"
#include "vga_regs.h"
#include "qemu/bswap.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/error-report.h"

View file

@ -26,7 +26,7 @@
#include "qemu/units.h"
#include "system/reset.h"
#include "qapi/error.h"
#include "exec/tswap.h"
#include "qemu/target-info.h"
#include "hw/display/vga.h"
#include "hw/i386/x86.h"
#include "hw/pci/pci.h"

View file

@ -338,405 +338,6 @@ build_facs(GArray *table_data)
g_array_append_vals(table_data, reserved, 40); /* Reserved */
}
Aml *aml_pci_device_dsm(void)
{
Aml *method;
method = aml_method("_DSM", 4, AML_SERIALIZED);
{
Aml *params = aml_local(0);
Aml *pkg = aml_package(2);
aml_append(pkg, aml_int(0));
aml_append(pkg, aml_int(0));
aml_append(method, aml_store(pkg, params));
aml_append(method,
aml_store(aml_name("BSEL"), aml_index(params, aml_int(0))));
aml_append(method,
aml_store(aml_name("ASUN"), aml_index(params, aml_int(1))));
aml_append(method,
aml_return(aml_call5("PDSM", aml_arg(0), aml_arg(1),
aml_arg(2), aml_arg(3), params))
);
}
return method;
}
static void build_append_pci_dsm_func0_common(Aml *ctx, Aml *retvar)
{
Aml *UUID, *ifctx1;
uint8_t byte_list[1] = { 0 }; /* nothing supported yet */
aml_append(ctx, aml_store(aml_buffer(1, byte_list), retvar));
/*
* PCI Firmware Specification 3.1
* 4.6. _DSM Definitions for PCI
*/
UUID = aml_touuid("E5C937D0-3553-4D7A-9117-EA4D19C3434D");
ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(0), UUID)));
{
/* call is for unsupported UUID, bail out */
aml_append(ifctx1, aml_return(retvar));
}
aml_append(ctx, ifctx1);
ifctx1 = aml_if(aml_lless(aml_arg(1), aml_int(2)));
{
/* call is for unsupported REV, bail out */
aml_append(ifctx1, aml_return(retvar));
}
aml_append(ctx, ifctx1);
}
static Aml *aml_pci_edsm(void)
{
Aml *method, *ifctx;
Aml *zero = aml_int(0);
Aml *func = aml_arg(2);
Aml *ret = aml_local(0);
Aml *aidx = aml_local(1);
Aml *params = aml_arg(4);
method = aml_method("EDSM", 5, AML_SERIALIZED);
/* get supported functions */
ifctx = aml_if(aml_equal(func, zero));
{
/* 1: have supported functions */
/* 7: support for function 7 */
const uint8_t caps = 1 | BIT(7);
build_append_pci_dsm_func0_common(ifctx, ret);
aml_append(ifctx, aml_store(aml_int(caps), aml_index(ret, zero)));
aml_append(ifctx, aml_return(ret));
}
aml_append(method, ifctx);
/* handle specific functions requests */
/*
* PCI Firmware Specification 3.1
* 4.6.7. _DSM for Naming a PCI or PCI Express Device Under
* Operating Systems
*/
ifctx = aml_if(aml_equal(func, aml_int(7)));
{
Aml *pkg = aml_package(2);
aml_append(pkg, zero);
/* optional, if not impl. should return null string */
aml_append(pkg, aml_string("%s", ""));
aml_append(ifctx, aml_store(pkg, ret));
/*
* IASL is fine when initializing Package with computational data,
* however it makes guest unhappy /it fails to process such AML/.
* So use runtime assignment to set acpi-index after initializer
* to make OSPM happy.
*/
aml_append(ifctx,
aml_store(aml_derefof(aml_index(params, aml_int(0))), aidx));
aml_append(ifctx, aml_store(aidx, aml_index(ret, zero)));
aml_append(ifctx, aml_return(ret));
}
aml_append(method, ifctx);
return method;
}
static Aml *aml_pci_static_endpoint_dsm(PCIDevice *pdev)
{
Aml *method;
g_assert(pdev->acpi_index != 0);
method = aml_method("_DSM", 4, AML_SERIALIZED);
{
Aml *params = aml_local(0);
Aml *pkg = aml_package(1);
aml_append(pkg, aml_int(pdev->acpi_index));
aml_append(method, aml_store(pkg, params));
aml_append(method,
aml_return(aml_call5("EDSM", aml_arg(0), aml_arg(1),
aml_arg(2), aml_arg(3), params))
);
}
return method;
}
static void build_append_pcihp_notify_entry(Aml *method, int slot)
{
Aml *if_ctx;
int32_t devfn = PCI_DEVFN(slot, 0);
if_ctx = aml_if(aml_and(aml_arg(0), aml_int(0x1U << slot), NULL));
aml_append(if_ctx, aml_notify(aml_name("S%.02X", devfn), aml_arg(1)));
aml_append(method, if_ctx);
}
static bool is_devfn_ignored_generic(const int devfn, const PCIBus *bus)
{
const PCIDevice *pdev = bus->devices[devfn];
if (PCI_FUNC(devfn)) {
if (IS_PCI_BRIDGE(pdev)) {
/*
* Ignore only hotplugged PCI bridges on !0 functions, but
* allow describing cold plugged bridges on all functions
*/
if (DEVICE(pdev)->hotplugged) {
return true;
}
}
}
return false;
}
static bool is_devfn_ignored_hotplug(const int devfn, const PCIBus *bus)
{
PCIDevice *pdev = bus->devices[devfn];
if (pdev) {
return is_devfn_ignored_generic(devfn, bus) ||
!DEVICE_GET_CLASS(pdev)->hotpluggable ||
/* Cold plugged bridges aren't themselves hot-pluggable */
(IS_PCI_BRIDGE(pdev) && !DEVICE(pdev)->hotplugged);
} else { /* non populated slots */
/*
* hotplug is supported only for non-multifunction device
* so generate device description only for function 0
*/
if (PCI_FUNC(devfn) ||
(pci_bus_is_express(bus) && PCI_SLOT(devfn) > 0)) {
return true;
}
}
return false;
}
void build_append_pcihp_slots(Aml *parent_scope, PCIBus *bus)
{
int devfn;
Aml *dev, *notify_method = NULL, *method;
QObject *bsel = object_property_get_qobject(OBJECT(bus),
ACPI_PCIHP_PROP_BSEL, NULL);
uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel));
qobject_unref(bsel);
aml_append(parent_scope, aml_name_decl("BSEL", aml_int(bsel_val)));
notify_method = aml_method("DVNT", 2, AML_NOTSERIALIZED);
for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
int slot = PCI_SLOT(devfn);
int adr = slot << 16 | PCI_FUNC(devfn);
if (is_devfn_ignored_hotplug(devfn, bus)) {
continue;
}
if (bus->devices[devfn]) {
dev = aml_scope("S%.02X", devfn);
} else {
dev = aml_device("S%.02X", devfn);
aml_append(dev, aml_name_decl("_ADR", aml_int(adr)));
}
/*
* Can't declare _SUN here for every device as it changes 'slot'
* enumeration order in linux kernel, so use another variable for it
*/
aml_append(dev, aml_name_decl("ASUN", aml_int(slot)));
aml_append(dev, aml_pci_device_dsm());
aml_append(dev, aml_name_decl("_SUN", aml_int(slot)));
/* add _EJ0 to make slot hotpluggable */
method = aml_method("_EJ0", 1, AML_NOTSERIALIZED);
aml_append(method,
aml_call2("PCEJ", aml_name("BSEL"), aml_name("_SUN"))
);
aml_append(dev, method);
build_append_pcihp_notify_entry(notify_method, slot);
/* device descriptor has been composed, add it into parent context */
aml_append(parent_scope, dev);
}
aml_append(parent_scope, notify_method);
}
void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus)
{
int devfn;
Aml *dev;
for (devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
/* ACPI spec: 1.0b: Table 6-2 _ADR Object Bus Types, PCI type */
int adr = PCI_SLOT(devfn) << 16 | PCI_FUNC(devfn);
PCIDevice *pdev = bus->devices[devfn];
if (!pdev || is_devfn_ignored_generic(devfn, bus)) {
continue;
}
/* start to compose PCI device descriptor */
dev = aml_device("S%.02X", devfn);
aml_append(dev, aml_name_decl("_ADR", aml_int(adr)));
call_dev_aml_func(DEVICE(bus->devices[devfn]), dev);
/* add _DSM if device has acpi-index set */
if (pdev->acpi_index &&
!object_property_get_bool(OBJECT(pdev), "hotpluggable",
&error_abort)) {
aml_append(dev, aml_pci_static_endpoint_dsm(pdev));
}
/* device descriptor has been composed, add it into parent context */
aml_append(parent_scope, dev);
}
}
static bool build_append_notification_callback(Aml *parent_scope,
const PCIBus *bus)
{
Aml *method;
PCIBus *sec;
QObject *bsel;
int nr_notifiers = 0;
GQueue *pcnt_bus_list = g_queue_new();
QLIST_FOREACH(sec, &bus->child, sibling) {
Aml *br_scope = aml_scope("S%.02X", sec->parent_dev->devfn);
if (pci_bus_is_root(sec)) {
continue;
}
nr_notifiers = nr_notifiers +
build_append_notification_callback(br_scope, sec);
/*
* add new child scope to parent
* and keep track of bus that have PCNT,
* bus list is used later to call children PCNTs from this level PCNT
*/
if (nr_notifiers) {
g_queue_push_tail(pcnt_bus_list, sec);
aml_append(parent_scope, br_scope);
}
}
/*
* Append PCNT method to notify about events on local and child buses.
* ps: hostbridge might not have hotplug (bsel) enabled but might have
* child bridges that do have bsel.
*/
method = aml_method("PCNT", 0, AML_NOTSERIALIZED);
/* If bus supports hotplug select it and notify about local events */
bsel = object_property_get_qobject(OBJECT(bus), ACPI_PCIHP_PROP_BSEL, NULL);
if (bsel) {
uint64_t bsel_val = qnum_get_uint(qobject_to(QNum, bsel));
aml_append(method, aml_store(aml_int(bsel_val), aml_name("BNUM")));
aml_append(method, aml_call2("DVNT", aml_name("PCIU"),
aml_int(1))); /* Device Check */
aml_append(method, aml_call2("DVNT", aml_name("PCID"),
aml_int(3))); /* Eject Request */
nr_notifiers++;
}
/* Notify about child bus events in any case */
while ((sec = g_queue_pop_head(pcnt_bus_list))) {
aml_append(method, aml_name("^S%.02X.PCNT", sec->parent_dev->devfn));
}
aml_append(parent_scope, method);
qobject_unref(bsel);
g_queue_free(pcnt_bus_list);
return !!nr_notifiers;
}
static Aml *aml_pci_pdsm(void)
{
Aml *method, *ifctx, *ifctx1;
Aml *ret = aml_local(0);
Aml *caps = aml_local(1);
Aml *acpi_index = aml_local(2);
Aml *zero = aml_int(0);
Aml *one = aml_int(1);
Aml *not_supp = aml_int(0xFFFFFFFF);
Aml *func = aml_arg(2);
Aml *params = aml_arg(4);
Aml *bnum = aml_derefof(aml_index(params, aml_int(0)));
Aml *sunum = aml_derefof(aml_index(params, aml_int(1)));
method = aml_method("PDSM", 5, AML_SERIALIZED);
/* get supported functions */
ifctx = aml_if(aml_equal(func, zero));
{
build_append_pci_dsm_func0_common(ifctx, ret);
aml_append(ifctx, aml_store(zero, caps));
aml_append(ifctx,
aml_store(aml_call2("AIDX", bnum, sunum), acpi_index));
/*
* advertise function 7 if device has acpi-index
* acpi_index values:
* 0: not present (default value)
* FFFFFFFF: not supported (old QEMU without PIDX reg)
* other: device's acpi-index
*/
ifctx1 = aml_if(aml_lnot(
aml_or(aml_equal(acpi_index, zero),
aml_equal(acpi_index, not_supp), NULL)
));
{
/* have supported functions */
aml_append(ifctx1, aml_or(caps, one, caps));
/* support for function 7 */
aml_append(ifctx1,
aml_or(caps, aml_shiftleft(one, aml_int(7)), caps));
}
aml_append(ifctx, ifctx1);
aml_append(ifctx, aml_store(caps, aml_index(ret, zero)));
aml_append(ifctx, aml_return(ret));
}
aml_append(method, ifctx);
/* handle specific functions requests */
/*
* PCI Firmware Specification 3.1
* 4.6.7. _DSM for Naming a PCI or PCI Express Device Under
* Operating Systems
*/
ifctx = aml_if(aml_equal(func, aml_int(7)));
{
Aml *pkg = aml_package(2);
aml_append(ifctx, aml_store(aml_call2("AIDX", bnum, sunum), acpi_index));
aml_append(ifctx, aml_store(pkg, ret));
/*
* Windows calls func=7 without checking if it's available,
* as workaround Microsoft has suggested to return invalid for func7
* Package, so return 2 elements package but only initialize elements
* when acpi_index is supported and leave them uninitialized, which
* leads elements to being Uninitialized ObjectType and should trip
* Windows into discarding result as an unexpected and prevent setting
* bogus 'PCI Label' on the device.
*/
ifctx1 = aml_if(aml_lnot(aml_lor(
aml_equal(acpi_index, zero), aml_equal(acpi_index, not_supp)
)));
{
aml_append(ifctx1, aml_store(acpi_index, aml_index(ret, zero)));
/*
* optional, if not impl. should return null string
*/
aml_append(ifctx1, aml_store(aml_string("%s", ""),
aml_index(ret, one)));
}
aml_append(ifctx, ifctx1);
aml_append(ifctx, aml_return(ret));
}
aml_append(method, ifctx);
return method;
}
/*
* build_prt - Define interrupt routing rules
*
@ -1227,112 +828,6 @@ static Aml *build_q35_dram_controller(const AcpiMcfgInfo *mcfg)
return dev;
}
static void build_x86_acpi_pci_hotplug(Aml *table, uint64_t pcihp_addr)
{
Aml *scope;
Aml *field;
Aml *method;
scope = aml_scope("_SB.PCI0");
aml_append(scope,
aml_operation_region("PCST", AML_SYSTEM_IO, aml_int(pcihp_addr), 0x08));
field = aml_field("PCST", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("PCIU", 32));
aml_append(field, aml_named_field("PCID", 32));
aml_append(scope, field);
aml_append(scope,
aml_operation_region("SEJ", AML_SYSTEM_IO,
aml_int(pcihp_addr + ACPI_PCIHP_SEJ_BASE), 0x04));
field = aml_field("SEJ", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("B0EJ", 32));
aml_append(scope, field);
aml_append(scope,
aml_operation_region("BNMR", AML_SYSTEM_IO,
aml_int(pcihp_addr + ACPI_PCIHP_BNMR_BASE), 0x08));
field = aml_field("BNMR", AML_DWORD_ACC, AML_NOLOCK, AML_WRITE_AS_ZEROS);
aml_append(field, aml_named_field("BNUM", 32));
aml_append(field, aml_named_field("PIDX", 32));
aml_append(scope, field);
aml_append(scope, aml_mutex("BLCK", 0));
method = aml_method("PCEJ", 2, AML_NOTSERIALIZED);
aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF));
aml_append(method, aml_store(aml_arg(0), aml_name("BNUM")));
aml_append(method,
aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("B0EJ")));
aml_append(method, aml_release(aml_name("BLCK")));
aml_append(method, aml_return(aml_int(0)));
aml_append(scope, method);
method = aml_method("AIDX", 2, AML_NOTSERIALIZED);
aml_append(method, aml_acquire(aml_name("BLCK"), 0xFFFF));
aml_append(method, aml_store(aml_arg(0), aml_name("BNUM")));
aml_append(method,
aml_store(aml_shiftleft(aml_int(1), aml_arg(1)), aml_name("PIDX")));
aml_append(method, aml_store(aml_name("PIDX"), aml_local(0)));
aml_append(method, aml_release(aml_name("BLCK")));
aml_append(method, aml_return(aml_local(0)));
aml_append(scope, method);
aml_append(scope, aml_pci_pdsm());
aml_append(table, scope);
}
static Aml *build_q35_osc_method(bool enable_native_pcie_hotplug)
{
Aml *if_ctx;
Aml *if_ctx2;
Aml *else_ctx;
Aml *method;
Aml *a_cwd1 = aml_name("CDW1");
Aml *a_ctrl = aml_local(0);
method = aml_method("_OSC", 4, AML_NOTSERIALIZED);
aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
if_ctx = aml_if(aml_equal(
aml_arg(0), aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766")));
aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
aml_append(if_ctx, aml_store(aml_name("CDW3"), a_ctrl));
/*
* Always allow native PME, AER (no dependencies)
* Allow SHPC (PCI bridges can have SHPC controller)
* Disable PCIe Native Hot-plug if ACPI PCI Hot-plug is enabled.
*/
aml_append(if_ctx, aml_and(a_ctrl,
aml_int(0x1E | (enable_native_pcie_hotplug ? 0x1 : 0x0)), a_ctrl));
if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1))));
/* Unknown revision */
aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x08), a_cwd1));
aml_append(if_ctx, if_ctx2);
if_ctx2 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl)));
/* Capabilities bits were masked */
aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x10), a_cwd1));
aml_append(if_ctx, if_ctx2);
/* Update DWORD3 in the buffer */
aml_append(if_ctx, aml_store(a_ctrl, aml_name("CDW3")));
aml_append(method, if_ctx);
else_ctx = aml_else();
/* Unrecognized UUID */
aml_append(else_ctx, aml_or(a_cwd1, aml_int(4), a_cwd1));
aml_append(method, else_ctx);
aml_append(method, aml_return(aml_arg(3)));
return method;
}
static void build_acpi0017(Aml *table)
{
Aml *dev, *scope, *method;
@ -1389,12 +884,12 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
dev = aml_device("PCI0");
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid)));
aml_append(dev, aml_pci_edsm());
aml_append(dev, build_pci_bridge_edsm());
aml_append(sb_scope, dev);
aml_append(dsdt, sb_scope);
if (pm->pcihp_bridge_en || pm->pcihp_root_en) {
build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base);
build_acpi_pci_hotplug(dsdt, AML_SYSTEM_IO, pm->pcihp_io_base);
}
build_piix4_pci0_int(dsdt);
} else if (q35) {
@ -1403,8 +898,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08")));
aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03")));
aml_append(dev, aml_name_decl("_UID", aml_int(pcmc->pci_root_uid)));
aml_append(dev, build_q35_osc_method(!pm->pcihp_bridge_en));
aml_append(dev, aml_pci_edsm());
aml_append(dev, build_pci_host_bridge_osc_method(!pm->pcihp_bridge_en));
aml_append(dev, build_pci_bridge_edsm());
aml_append(sb_scope, dev);
if (mcfg_valid) {
aml_append(sb_scope, build_q35_dram_controller(&mcfg));
@ -1438,7 +933,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
aml_append(dsdt, sb_scope);
if (pm->pcihp_bridge_en) {
build_x86_acpi_pci_hotplug(dsdt, pm->pcihp_io_base);
build_acpi_pci_hotplug(dsdt, AML_SYSTEM_IO, pm->pcihp_io_base);
}
build_q35_pci0_int(dsdt);
}
@ -1525,7 +1020,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03")));
/* Expander bridges do not have ACPI PCI Hot-plug enabled */
aml_append(dev, build_q35_osc_method(true));
aml_append(dev, build_pci_host_bridge_osc_method(true));
} else {
aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A03")));
}
@ -1654,19 +1149,8 @@ build_dsdt(GArray *table_data, BIOSLinker *linker,
/* reserve PCIHP resources */
if (pm->pcihp_io_len && (pm->pcihp_bridge_en || pm->pcihp_root_en)) {
dev = aml_device("PHPR");
aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06")));
aml_append(dev,
aml_name_decl("_UID", aml_string("PCI Hotplug resources")));
/* device present, functioning, decoding, not shown in UI */
aml_append(dev, aml_name_decl("_STA", aml_int(0xB)));
crs = aml_resource_template();
aml_append(crs,
aml_io(AML_DECODE16, pm->pcihp_io_base, pm->pcihp_io_base, 1,
pm->pcihp_io_len)
);
aml_append(dev, aml_name_decl("_CRS", crs));
aml_append(scope, dev);
build_append_pcihp_resources(scope,
pm->pcihp_io_base, pm->pcihp_io_len);
}
aml_append(dsdt, scope);

View file

@ -5,10 +5,6 @@
extern const struct AcpiGenericAddress x86_nvdimm_acpi_dsmio;
/* PCI Hot-plug registers' base. See docs/specs/acpi_pci_hotplug.rst */
#define ACPI_PCIHP_SEJ_BASE 0x8
#define ACPI_PCIHP_BNMR_BASE 0x10
void acpi_setup(void);
Object *acpi_get_i386_pci_host(void);

View file

@ -140,7 +140,7 @@ static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val)
{
uint64_t romask = ldq_le_p(&s->romask[addr]);
uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
uint32_t oldval = ldq_le_p(&s->mmior[addr]);
uint64_t oldval = ldq_le_p(&s->mmior[addr]);
stq_le_p(&s->mmior[addr],
((oldval & romask) | (val & ~romask)) & ~(val & w1cmask));
}
@ -508,7 +508,7 @@ static void amdvi_inval_inttable(AMDVIState *s, uint64_t *cmd)
static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd)
{
uint16_t devid = extract64(cmd[0], 0, 16);
uint16_t devid = cpu_to_le16(extract64(cmd[0], 0, 16));
if (extract64(cmd[1], 1, 1) || extract64(cmd[1], 3, 1) ||
extract64(cmd[1], 6, 6)) {
amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4),
@ -521,7 +521,7 @@ static void iommu_inval_iotlb(AMDVIState *s, uint64_t *cmd)
&devid);
} else {
amdvi_iotlb_remove_page(s, cpu_to_le64(extract64(cmd[1], 12, 52)) << 12,
cpu_to_le16(extract64(cmd[1], 0, 16)));
devid);
}
trace_amdvi_iotlb_inval();
}
@ -665,8 +665,8 @@ static inline void amdvi_handle_devtab_write(AMDVIState *s)
uint64_t val = amdvi_readq(s, AMDVI_MMIO_DEVICE_TABLE);
s->devtab = (val & AMDVI_MMIO_DEVTAB_BASE_MASK);
/* set device table length */
s->devtab_len = ((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1 *
/* set device table length (i.e. number of entries table can hold) */
s->devtab_len = (((val & AMDVI_MMIO_DEVTAB_SIZE_MASK) + 1) *
(AMDVI_MMIO_DEVTAB_SIZE_UNIT /
AMDVI_MMIO_DEVTAB_ENTRY_SIZE));
}
@ -848,9 +848,10 @@ static inline uint64_t amdvi_get_perms(uint64_t entry)
static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid,
uint64_t *dte)
{
if ((dte[0] & AMDVI_DTE_LOWER_QUAD_RESERVED)
|| (dte[1] & AMDVI_DTE_MIDDLE_QUAD_RESERVED)
|| (dte[2] & AMDVI_DTE_UPPER_QUAD_RESERVED) || dte[3]) {
if ((dte[0] & AMDVI_DTE_QUAD0_RESERVED) ||
(dte[1] & AMDVI_DTE_QUAD1_RESERVED) ||
(dte[2] & AMDVI_DTE_QUAD2_RESERVED) ||
(dte[3] & AMDVI_DTE_QUAD3_RESERVED)) {
amdvi_log_illegaldevtab_error(s, devid,
s->devtab +
devid * AMDVI_DEVTAB_ENTRY_SIZE, 0);

View file

@ -25,6 +25,8 @@
#include "hw/i386/x86-iommu.h"
#include "qom/object.h"
#define GENMASK64(h, l) (((~0ULL) >> (63 - (h) + (l))) << (l))
/* Capability registers */
#define AMDVI_CAPAB_BAR_LOW 0x04
#define AMDVI_CAPAB_BAR_HIGH 0x08
@ -66,34 +68,34 @@
#define AMDVI_MMIO_SIZE 0x4000
#define AMDVI_MMIO_DEVTAB_SIZE_MASK ((1ULL << 12) - 1)
#define AMDVI_MMIO_DEVTAB_BASE_MASK (((1ULL << 52) - 1) & ~ \
AMDVI_MMIO_DEVTAB_SIZE_MASK)
#define AMDVI_MMIO_DEVTAB_SIZE_MASK GENMASK64(8, 0)
#define AMDVI_MMIO_DEVTAB_BASE_MASK GENMASK64(51, 12)
#define AMDVI_MMIO_DEVTAB_ENTRY_SIZE 32
#define AMDVI_MMIO_DEVTAB_SIZE_UNIT 4096
/* some of this are similar but just for readability */
#define AMDVI_MMIO_CMDBUF_SIZE_BYTE (AMDVI_MMIO_COMMAND_BASE + 7)
#define AMDVI_MMIO_CMDBUF_SIZE_MASK 0x0f
#define AMDVI_MMIO_CMDBUF_BASE_MASK AMDVI_MMIO_DEVTAB_BASE_MASK
#define AMDVI_MMIO_CMDBUF_HEAD_MASK (((1ULL << 19) - 1) & ~0x0f)
#define AMDVI_MMIO_CMDBUF_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
#define AMDVI_MMIO_CMDBUF_BASE_MASK GENMASK64(51, 12)
#define AMDVI_MMIO_CMDBUF_HEAD_MASK GENMASK64(18, 4)
#define AMDVI_MMIO_CMDBUF_TAIL_MASK GENMASK64(18, 4)
#define AMDVI_MMIO_EVTLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7)
#define AMDVI_MMIO_EVTLOG_SIZE_MASK AMDVI_MMIO_CMDBUF_SIZE_MASK
#define AMDVI_MMIO_EVTLOG_BASE_MASK AMDVI_MMIO_CMDBUF_BASE_MASK
#define AMDVI_MMIO_EVTLOG_HEAD_MASK (((1ULL << 19) - 1) & ~0x0f)
#define AMDVI_MMIO_EVTLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
#define AMDVI_MMIO_EVTLOG_SIZE_MASK 0x0f
#define AMDVI_MMIO_EVTLOG_BASE_MASK GENMASK64(51, 12)
#define AMDVI_MMIO_EVTLOG_HEAD_MASK GENMASK64(18, 4)
#define AMDVI_MMIO_EVTLOG_TAIL_MASK GENMASK64(18, 4)
#define AMDVI_MMIO_PPRLOG_SIZE_BYTE (AMDVI_MMIO_EVENT_BASE + 7)
#define AMDVI_MMIO_PPRLOG_HEAD_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
#define AMDVI_MMIO_PPRLOG_TAIL_MASK AMDVI_MMIO_EVTLOG_HEAD_MASK
#define AMDVI_MMIO_PPRLOG_BASE_MASK AMDVI_MMIO_EVTLOG_BASE_MASK
#define AMDVI_MMIO_PPRLOG_SIZE_MASK AMDVI_MMIO_EVTLOG_SIZE_MASK
#define AMDVI_MMIO_PPRLOG_SIZE_BYTE (AMDVI_MMIO_PPR_BASE + 7)
#define AMDVI_MMIO_PPRLOG_SIZE_MASK 0x0f
#define AMDVI_MMIO_PPRLOG_BASE_MASK GENMASK64(51, 12)
#define AMDVI_MMIO_PPRLOG_HEAD_MASK GENMASK64(18, 4)
#define AMDVI_MMIO_PPRLOG_TAIL_MASK GENMASK64(18, 4)
#define AMDVI_MMIO_EXCL_ENABLED_MASK (1ULL << 0)
#define AMDVI_MMIO_EXCL_ALLOW_MASK (1ULL << 1)
#define AMDVI_MMIO_EXCL_LIMIT_MASK AMDVI_MMIO_DEVTAB_BASE_MASK
#define AMDVI_MMIO_EXCL_LIMIT_MASK GENMASK64(51, 12)
#define AMDVI_MMIO_EXCL_LIMIT_LOW 0xfff
/* mmio control register flags */
@ -130,14 +132,14 @@
#define AMDVI_DEV_TRANSLATION_VALID (1ULL << 1)
#define AMDVI_DEV_MODE_MASK 0x7
#define AMDVI_DEV_MODE_RSHIFT 9
#define AMDVI_DEV_PT_ROOT_MASK 0xffffffffff000
#define AMDVI_DEV_PT_ROOT_MASK GENMASK64(51, 12)
#define AMDVI_DEV_PT_ROOT_RSHIFT 12
#define AMDVI_DEV_PERM_SHIFT 61
#define AMDVI_DEV_PERM_READ (1ULL << 61)
#define AMDVI_DEV_PERM_WRITE (1ULL << 62)
/* Device table entry bits 64:127 */
#define AMDVI_DEV_DOMID_ID_MASK ((1ULL << 16) - 1)
#define AMDVI_DEV_DOMID_ID_MASK GENMASK64(15, 0)
/* Event codes and flags, as stored in the info field */
#define AMDVI_EVENT_ILLEGAL_DEVTAB_ENTRY (0x1U << 12)
@ -162,9 +164,10 @@
#define AMDVI_FEATURE_PC (1ULL << 9) /* Perf counters */
/* reserved DTE bits */
#define AMDVI_DTE_LOWER_QUAD_RESERVED 0x80300000000000fc
#define AMDVI_DTE_MIDDLE_QUAD_RESERVED 0x0000000000000100
#define AMDVI_DTE_UPPER_QUAD_RESERVED 0x08f0000000000000
#define AMDVI_DTE_QUAD0_RESERVED (GENMASK64(6, 2) | GENMASK64(63, 63))
#define AMDVI_DTE_QUAD1_RESERVED 0
#define AMDVI_DTE_QUAD2_RESERVED GENMASK64(53, 52)
#define AMDVI_DTE_QUAD3_RESERVED (GENMASK64(14, 0) | GENMASK64(53, 48))
/* AMDVI paging mode */
#define AMDVI_GATS_MODE (2ULL << 12)
@ -194,19 +197,15 @@
#define AMDVI_PAGE_SIZE (1ULL << AMDVI_PAGE_SHIFT)
#define AMDVI_PAGE_SHIFT_4K 12
#define AMDVI_PAGE_MASK_4K (~((1ULL << AMDVI_PAGE_SHIFT_4K) - 1))
#define AMDVI_PAGE_MASK_4K GENMASK64(63, 12)
#define AMDVI_MAX_VA_ADDR (48UL << 5)
#define AMDVI_MAX_PH_ADDR (40UL << 8)
#define AMDVI_MAX_GVA_ADDR (48UL << 15)
#define AMDVI_MAX_GVA_ADDR (2UL << 5)
#define AMDVI_MAX_PH_ADDR (40UL << 8)
#define AMDVI_MAX_VA_ADDR (48UL << 15)
/* Completion Wait data size */
#define AMDVI_COMPLETION_DATA_SIZE 8
#define AMDVI_COMMAND_SIZE 16
/* Completion Wait data size */
#define AMDVI_COMPLETION_DATA_SIZE 8
#define AMDVI_COMMAND_SIZE 16
#define AMDVI_INT_ADDR_FIRST 0xfee00000
@ -228,7 +227,7 @@
#define AMDVI_IR_INTCTL_PASS 1
#define AMDVI_IR_INTCTL_REMAP 2
#define AMDVI_IR_PHYS_ADDR_MASK (((1ULL << 45) - 1) << 6)
#define AMDVI_IR_PHYS_ADDR_MASK GENMASK64(51, 6)
/* MSI data 10:0 bits (section 2.2.5.1 Fig 14) */
#define AMDVI_IRTE_OFFSET 0x7ff

View file

@ -1987,9 +1987,9 @@ static int vtd_iova_to_flpte(IntelIOMMUState *s, VTDContextEntry *ce,
uint32_t pasid)
{
dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce, pasid);
uint32_t level = vtd_get_iova_level(s, ce, pasid);
uint32_t offset;
uint64_t flpte, flag_ad = VTD_FL_A;
*flpte_level = vtd_get_iova_level(s, ce, pasid);
if (!vtd_iova_fl_check_canonical(s, iova, ce, pasid)) {
error_report_once("%s: detected non canonical IOVA (iova=0x%" PRIx64 ","
@ -1998,11 +1998,11 @@ static int vtd_iova_to_flpte(IntelIOMMUState *s, VTDContextEntry *ce,
}
while (true) {
offset = vtd_iova_level_offset(iova, level);
offset = vtd_iova_level_offset(iova, *flpte_level);
flpte = vtd_get_pte(addr, offset);
if (flpte == (uint64_t)-1) {
if (level == vtd_get_iova_level(s, ce, pasid)) {
if (*flpte_level == vtd_get_iova_level(s, ce, pasid)) {
/* Invalid programming of pasid-entry */
return -VTD_FR_PASID_ENTRY_FSPTPTR_INV;
} else {
@ -2028,15 +2028,15 @@ static int vtd_iova_to_flpte(IntelIOMMUState *s, VTDContextEntry *ce,
if (is_write && !(flpte & VTD_FL_RW)) {
return -VTD_FR_SM_WRITE;
}
if (vtd_flpte_nonzero_rsvd(flpte, level)) {
if (vtd_flpte_nonzero_rsvd(flpte, *flpte_level)) {
error_report_once("%s: detected flpte reserved non-zero "
"iova=0x%" PRIx64 ", level=0x%" PRIx32
"flpte=0x%" PRIx64 ", pasid=0x%" PRIX32 ")",
__func__, iova, level, flpte, pasid);
__func__, iova, *flpte_level, flpte, pasid);
return -VTD_FR_FS_PAGING_ENTRY_RSVD;
}
if (vtd_is_last_pte(flpte, level) && is_write) {
if (vtd_is_last_pte(flpte, *flpte_level) && is_write) {
flag_ad |= VTD_FL_D;
}
@ -2044,14 +2044,13 @@ static int vtd_iova_to_flpte(IntelIOMMUState *s, VTDContextEntry *ce,
return -VTD_FR_FS_BIT_UPDATE_FAILED;
}
if (vtd_is_last_pte(flpte, level)) {
if (vtd_is_last_pte(flpte, *flpte_level)) {
*flptep = flpte;
*flpte_level = level;
return 0;
}
addr = vtd_get_pte_addr(flpte, aw_bits);
level--;
(*flpte_level)--;
}
}
@ -2092,7 +2091,8 @@ static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
uint8_t bus_num = pci_bus_num(bus);
VTDContextCacheEntry *cc_entry;
uint64_t pte, page_mask;
uint32_t level, pasid = vtd_as->pasid;
uint32_t level = UINT32_MAX;
uint32_t pasid = vtd_as->pasid;
uint16_t source_id = PCI_BUILD_BDF(bus_num, devfn);
int ret_fr;
bool is_fpd_set = false;
@ -2251,14 +2251,19 @@ out:
entry->iova = addr & page_mask;
entry->translated_addr = vtd_get_pte_addr(pte, s->aw_bits) & page_mask;
entry->addr_mask = ~page_mask;
entry->perm = access_flags;
entry->perm = (is_write ? access_flags : (access_flags & (~IOMMU_WO)));
return true;
error:
vtd_iommu_unlock(s);
entry->iova = 0;
entry->translated_addr = 0;
entry->addr_mask = 0;
/*
* Set the mask for ATS (the range must be present even when the
* translation fails : PCIe rev 5 10.2.3.5)
*/
entry->addr_mask = (level != UINT32_MAX) ?
(~vtd_pt_level_page_mask(level)) : (~VTD_PAGE_MASK_4K);
entry->perm = IOMMU_NONE;
return false;
}
@ -2503,6 +2508,7 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
.translated_addr = 0,
.addr_mask = size - 1,
.perm = IOMMU_NONE,
.pasid = vtd_as->pasid,
},
};
memory_region_notify_iommu(&vtd_as->iommu, 0, event);
@ -3090,6 +3096,7 @@ static void do_invalidate_device_tlb(VTDAddressSpace *vtd_dev_as,
event.entry.iova = addr;
event.entry.perm = IOMMU_NONE;
event.entry.translated_addr = 0;
event.entry.pasid = vtd_dev_as->pasid;
memory_region_notify_iommu(&vtd_dev_as->iommu, 0, event);
}
@ -3672,6 +3679,7 @@ static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
IOMMUTLBEntry iotlb = {
/* We'll fill in the rest later. */
.target_as = &address_space_memory,
.pasid = vtd_as->pasid,
};
bool success;
@ -4587,7 +4595,7 @@ static void vtd_cap_init(IntelIOMMUState *s)
}
if (s->pasid) {
s->ecap |= VTD_ECAP_PASID;
s->ecap |= VTD_ECAP_PASID | VTD_ECAP_PSS;
}
}
@ -4730,10 +4738,118 @@ static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
return &vtd_as->as;
}
static IOMMUTLBEntry vtd_iommu_ats_do_translate(IOMMUMemoryRegion *iommu,
hwaddr addr,
IOMMUAccessFlags flags)
{
IOMMUTLBEntry entry;
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
if (vtd_is_interrupt_addr(addr)) {
vtd_report_ir_illegal_access(vtd_as, addr, flags & IOMMU_WO);
entry.target_as = &address_space_memory;
entry.iova = 0;
entry.translated_addr = 0;
entry.addr_mask = ~VTD_PAGE_MASK_4K;
entry.perm = IOMMU_NONE;
entry.pasid = PCI_NO_PASID;
} else {
entry = vtd_iommu_translate(iommu, addr, flags, 0);
}
return entry;
}
static ssize_t vtd_ats_request_translation(PCIBus *bus, void *opaque,
int devfn, uint32_t pasid,
bool priv_req, bool exec_req,
hwaddr addr, size_t length,
bool no_write, IOMMUTLBEntry *result,
size_t result_length,
uint32_t *err_count)
{
IntelIOMMUState *s = opaque;
VTDAddressSpace *vtd_as;
IOMMUAccessFlags flags = IOMMU_ACCESS_FLAG_FULL(true, !no_write, exec_req,
priv_req, false, false);
ssize_t res_index = 0;
hwaddr target_address = addr + length;
IOMMUTLBEntry entry;
vtd_as = vtd_find_add_as(s, bus, devfn, pasid);
*err_count = 0;
while ((addr < target_address) && (res_index < result_length)) {
entry = vtd_iommu_ats_do_translate(&vtd_as->iommu, addr, flags);
entry.perm &= ~IOMMU_GLOBAL; /* Spec 4.1.2: Global Mapping never set */
if ((entry.perm & flags) != flags) {
*err_count += 1; /* Less than expected */
}
result[res_index] = entry;
res_index += 1;
addr = (addr & (~entry.addr_mask)) + (entry.addr_mask + 1);
}
/* Buffer too small */
if (addr < target_address) {
return -ENOMEM;
}
return res_index;
}
static void vtd_init_iotlb_notifier(PCIBus *bus, void *opaque, int devfn,
IOMMUNotifier *n, IOMMUNotify fn,
void *user_opaque)
{
n->opaque = user_opaque;
iommu_notifier_init(n, fn, IOMMU_NOTIFIER_DEVIOTLB_EVENTS, 0,
HWADDR_MAX, 0);
}
static void vtd_get_iotlb_info(void *opaque, uint8_t *addr_width,
uint32_t *min_page_size)
{
IntelIOMMUState *s = opaque;
*addr_width = s->aw_bits;
*min_page_size = VTD_PAGE_SIZE;
}
static void vtd_register_iotlb_notifier(PCIBus *bus, void *opaque,
int devfn, uint32_t pasid,
IOMMUNotifier *n)
{
IntelIOMMUState *s = opaque;
VTDAddressSpace *vtd_as;
vtd_as = vtd_find_add_as(s, bus, devfn, pasid);
memory_region_register_iommu_notifier(MEMORY_REGION(&vtd_as->iommu), n,
&error_fatal);
}
static void vtd_unregister_iotlb_notifier(PCIBus *bus, void *opaque,
int devfn, uint32_t pasid,
IOMMUNotifier *n)
{
IntelIOMMUState *s = opaque;
VTDAddressSpace *vtd_as;
vtd_as = vtd_find_add_as(s, bus, devfn, pasid);
memory_region_unregister_iommu_notifier(MEMORY_REGION(&vtd_as->iommu), n);
}
static PCIIOMMUOps vtd_iommu_ops = {
.get_address_space = vtd_host_dma_iommu,
.set_iommu_device = vtd_dev_set_iommu_device,
.unset_iommu_device = vtd_dev_unset_iommu_device,
.get_iotlb_info = vtd_get_iotlb_info,
.init_iotlb_notifier = vtd_init_iotlb_notifier,
.register_iotlb_notifier = vtd_register_iotlb_notifier,
.unregister_iotlb_notifier = vtd_unregister_iotlb_notifier,
.ats_request_translation = vtd_ats_request_translation,
};
static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)

View file

@ -192,6 +192,7 @@
#define VTD_ECAP_SC (1ULL << 7)
#define VTD_ECAP_MHMV (15ULL << 20)
#define VTD_ECAP_SRS (1ULL << 31)
#define VTD_ECAP_PSS (7ULL << 35) /* limit: MemTxAttrs::pid */
#define VTD_ECAP_PASID (1ULL << 40)
#define VTD_ECAP_SMTS (1ULL << 43)
#define VTD_ECAP_SLTS (1ULL << 46)

View file

@ -557,7 +557,9 @@ static void acpi_build(AcpiBuildTables *tables, MachineState *machine)
acpi_add_table(table_offsets, tables_blob);
build_srat(tables_blob, tables->linker, machine);
acpi_add_table(table_offsets, tables_blob);
spcr_setup(tables_blob, tables->linker, machine);
if (machine->acpi_spcr_enabled)
spcr_setup(tables_blob, tables->linker, machine);
if (machine->numa_state->num_nodes) {
if (machine->numa_state->have_numa_distance) {

View file

@ -8,6 +8,7 @@
*
* SPDX-License-Identifier: GPL-v2-only
*/
#include <math.h>
#include "qemu/osdep.h"
#include "qemu/units.h"
@ -225,10 +226,16 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
* future.
*/
for (i = 0; i < ct3d->dc.num_regions; i++) {
ct3d->dc.regions[i].nonvolatile = false;
ct3d->dc.regions[i].sharable = false;
ct3d->dc.regions[i].hw_managed_coherency = false;
ct3d->dc.regions[i].ic_specific_dc_management = false;
ct3d->dc.regions[i].rdonly = false;
ct3_build_cdat_entries_for_mr(&(table[cur_ent]),
dsmad_handle++,
ct3d->dc.regions[i].len,
false, true, region_base);
ct3d->dc.regions[i].nonvolatile,
true, region_base);
ct3d->dc.regions[i].dsmadhandle = dsmad_handle - 1;
cur_ent += CT3_CDAT_NUM_ENTRIES;
@ -634,6 +641,8 @@ static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp)
uint64_t region_len;
uint64_t decode_len;
uint64_t blk_size = 2 * MiB;
/* Only 1 block size is supported for now. */
uint64_t supported_blk_size_bitmask = blk_size;
CXLDCRegion *region;
MemoryRegion *mr;
uint64_t dc_size;
@ -679,9 +688,11 @@ static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp)
.block_size = blk_size,
/* dsmad_handle set when creating CDAT table entries */
.flags = 0,
.supported_blk_size_bitmask = supported_blk_size_bitmask,
};
ct3d->dc.total_capacity += region->len;
region->blk_bitmap = bitmap_new(region->len / region->block_size);
qemu_mutex_init(&region->bitmap_lock);
}
QTAILQ_INIT(&ct3d->dc.extents);
QTAILQ_INIT(&ct3d->dc.extents_pending);
@ -1010,6 +1021,7 @@ void ct3_set_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
return;
}
QEMU_LOCK_GUARD(&region->bitmap_lock);
bitmap_set(region->blk_bitmap, (dpa - region->base) / region->block_size,
len / region->block_size);
}
@ -1036,6 +1048,7 @@ bool ct3_test_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
* if bits between [dpa, dpa + len) are all 1s, meaning the DPA range is
* backed with DC extents, return true; else return false.
*/
QEMU_LOCK_GUARD(&region->bitmap_lock);
return find_next_zero_bit(region->blk_bitmap, nr + nbits, nr) == nr + nbits;
}
@ -1057,6 +1070,7 @@ void ct3_clear_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
nr = (dpa - region->base) / region->block_size;
nbits = len / region->block_size;
QEMU_LOCK_GUARD(&region->bitmap_lock);
bitmap_clear(region->blk_bitmap, nr, nbits);
}
@ -1576,9 +1590,9 @@ void qmp_cxl_inject_correctable_error(const char *path, CxlCorErrorType type,
pcie_aer_inject_error(PCI_DEVICE(obj), &err);
}
static void cxl_assign_event_header(CXLEventRecordHdr *hdr,
const QemuUUID *uuid, uint32_t flags,
uint8_t length, uint64_t timestamp)
void cxl_assign_event_header(CXLEventRecordHdr *hdr,
const QemuUUID *uuid, uint32_t flags,
uint8_t length, uint64_t timestamp)
{
st24_le_p(&hdr->flags, flags);
hdr->length = length;
@ -1866,28 +1880,13 @@ void qmp_cxl_inject_memory_module_event(const char *path, CxlEventLog log,
}
}
/* CXL r3.1 Table 8-50: Dynamic Capacity Event Record */
static const QemuUUID dynamic_capacity_uuid = {
.data = UUID(0xca95afa7, 0xf183, 0x4018, 0x8c, 0x2f,
0x95, 0x26, 0x8e, 0x10, 0x1a, 0x2a),
};
typedef enum CXLDCEventType {
DC_EVENT_ADD_CAPACITY = 0x0,
DC_EVENT_RELEASE_CAPACITY = 0x1,
DC_EVENT_FORCED_RELEASE_CAPACITY = 0x2,
DC_EVENT_REGION_CONFIG_UPDATED = 0x3,
DC_EVENT_ADD_CAPACITY_RSP = 0x4,
DC_EVENT_CAPACITY_RELEASED = 0x5,
} CXLDCEventType;
/*
* Check whether the range [dpa, dpa + len - 1] has overlaps with extents in
* the list.
* Return value: return true if has overlaps; otherwise, return false
*/
static bool cxl_extents_overlaps_dpa_range(CXLDCExtentList *list,
uint64_t dpa, uint64_t len)
bool cxl_extents_overlaps_dpa_range(CXLDCExtentList *list,
uint64_t dpa, uint64_t len)
{
CXLDCExtent *ent;
Range range1, range2;
@ -1932,8 +1931,8 @@ bool cxl_extents_contains_dpa_range(CXLDCExtentList *list,
return false;
}
static bool cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList *list,
uint64_t dpa, uint64_t len)
bool cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList *list,
uint64_t dpa, uint64_t len)
{
CXLDCExtentGroup *group;
@ -1958,15 +1957,11 @@ static void qmp_cxl_process_dynamic_capacity_prescriptive(const char *path,
CxlDynamicCapacityExtentList *records, Error **errp)
{
Object *obj;
CXLEventDynamicCapacity dCap = {};
CXLEventRecordHdr *hdr = &dCap.hdr;
CXLType3Dev *dcd;
uint8_t flags = 1 << CXL_EVENT_TYPE_INFO;
uint32_t num_extents = 0;
CxlDynamicCapacityExtentList *list;
CXLDCExtentGroup *group = NULL;
g_autofree CXLDCExtentRaw *extents = NULL;
uint8_t enc_log = CXL_EVENT_TYPE_DYNAMIC_CAP;
uint64_t dpa, offset, len, block_size;
g_autofree unsigned long *blk_bitmap = NULL;
int i;
@ -2076,40 +2071,10 @@ static void qmp_cxl_process_dynamic_capacity_prescriptive(const char *path,
}
if (group) {
cxl_extent_group_list_insert_tail(&dcd->dc.extents_pending, group);
dcd->dc.total_extent_count += num_extents;
}
/*
* CXL r3.1 section 8.2.9.2.1.6: Dynamic Capacity Event Record
*
* All Dynamic Capacity event records shall set the Event Record Severity
* field in the Common Event Record Format to Informational Event. All
* Dynamic Capacity related events shall be logged in the Dynamic Capacity
* Event Log.
*/
cxl_assign_event_header(hdr, &dynamic_capacity_uuid, flags, sizeof(dCap),
cxl_device_get_timestamp(&dcd->cxl_dstate));
dCap.type = type;
/* FIXME: for now, validity flag is cleared */
dCap.validity_flags = 0;
stw_le_p(&dCap.host_id, hid);
/* only valid for DC_REGION_CONFIG_UPDATED event */
dCap.updated_region_id = 0;
for (i = 0; i < num_extents; i++) {
memcpy(&dCap.dynamic_capacity_extent, &extents[i],
sizeof(CXLDCExtentRaw));
dCap.flags = 0;
if (i < num_extents - 1) {
/* Set "More" flag */
dCap.flags |= BIT(0);
}
if (cxl_event_insert(&dcd->cxl_dstate, enc_log,
(CXLEventRecordRaw *)&dCap)) {
cxl_event_irq_assert(dcd);
}
}
cxl_create_dc_event_records_for_extents(dcd, type, extents, num_extents);
}
void qmp_cxl_add_dynamic_capacity(const char *path, uint16_t host_id,

View file

@ -28,7 +28,6 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/bswap.h"
#include "qemu/bitops.h"
#include "hw/irq.h"
#include "migration/vmstate.h"

View file

@ -21,6 +21,7 @@
#include "hw/ptimer.h"
#include "hw/qdev-properties.h"
#include "qapi/error.h"
#include "qemu/bswap.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include <zlib.h> /* for crc32 */

View file

@ -57,6 +57,7 @@
#include "system/dma.h"
#include "qemu/module.h"
#include "qemu/timer.h"
#include "qemu/bswap.h"
#include "net/net.h"
#include "net/eth.h"
#include "system/system.h"

View file

@ -158,7 +158,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ?
VIRTIO_NET_RSS_MAX_TABLE_LEN : 1);
virtio_stl_p(vdev, &netcfg.supported_hash_types,
VIRTIO_NET_RSS_SUPPORTED_HASHES);
n->rss_data.supported_hash_types);
memcpy(config, &netcfg, n->config_size);
/*
@ -756,79 +756,6 @@ static void virtio_net_set_queue_pairs(VirtIONet *n)
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
Error **errp)
{
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_queue(n->nic);
/* Firstly sync all virtio-net possible supported features */
features |= n->host_features;
virtio_add_feature(&features, VIRTIO_NET_F_MAC);
if (!peer_has_vnet_hdr(n)) {
virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
}
if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
}
if (!peer_has_uso(n)) {
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
}
if (!get_vhost_net(nc->peer)) {
return features;
}
if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
}
features = vhost_net_get_features(get_vhost_net(nc->peer), features);
vdev->backend_features = features;
if (n->mtu_bypass_backend &&
(n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
features |= (1ULL << VIRTIO_NET_F_MTU);
}
/*
* Since GUEST_ANNOUNCE is emulated the feature bit could be set without
* enabled. This happens in the vDPA case.
*
* Make sure the feature set is not incoherent, as the driver could refuse
* to start.
*
* TODO: QEMU is able to emulate a CVQ just for guest_announce purposes,
* helping guest to notify the new location with vDPA devices that does not
* support it.
*/
if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) {
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE);
}
return features;
}
static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
{
uint64_t features = 0;
@ -1255,7 +1182,7 @@ static void rss_data_to_rss_config(struct VirtioNetRssData *data,
{
config->redirect = data->redirect;
config->populate_hash = data->populate_hash;
config->hash_types = data->hash_types;
config->hash_types = data->runtime_hash_types;
config->indirections_len = data->indirections_len;
config->default_queue = data->default_queue;
}
@ -1290,6 +1217,10 @@ static void virtio_net_detach_ebpf_rss(VirtIONet *n)
static void virtio_net_commit_rss_config(VirtIONet *n)
{
if (n->rss_data.peer_hash_available) {
return;
}
if (n->rss_data.enabled) {
n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
if (n->rss_data.populate_hash) {
@ -1304,7 +1235,7 @@ static void virtio_net_commit_rss_config(VirtIONet *n)
}
trace_virtio_net_rss_enable(n,
n->rss_data.hash_types,
n->rss_data.runtime_hash_types,
n->rss_data.indirections_len,
sizeof(n->rss_data.key));
} else {
@ -1415,7 +1346,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
err_value = (uint32_t)s;
goto error;
}
n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
n->rss_data.runtime_hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
n->rss_data.indirections_len =
virtio_lduw_p(vdev, &cfg.indirection_table_mask);
if (!do_rss) {
@ -1478,12 +1409,12 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n,
err_value = temp.b;
goto error;
}
if (!temp.b && n->rss_data.hash_types) {
if (!temp.b && n->rss_data.runtime_hash_types) {
err_msg = "No key provided";
err_value = 0;
goto error;
}
if (!temp.b && !n->rss_data.hash_types) {
if (!temp.b && !n->rss_data.runtime_hash_types) {
virtio_net_disable_rss(n);
return queue_pairs;
}
@ -1885,7 +1816,7 @@ static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
net_rx_pkt_set_protocols(pkt, &iov, 1, n->host_hdr_len);
net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto,
n->rss_data.hash_types);
n->rss_data.runtime_hash_types);
if (net_hash_type > NetPktRssIpV6UdpEx) {
if (n->rss_data.populate_hash) {
hdr->hash_value = VIRTIO_NET_HASH_REPORT_NONE;
@ -3077,6 +3008,103 @@ static int virtio_net_pre_load_queues(VirtIODevice *vdev, uint32_t n)
return 0;
}
static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
Error **errp)
{
VirtIONet *n = VIRTIO_NET(vdev);
NetClientState *nc = qemu_get_queue(n->nic);
uint32_t supported_hash_types = n->rss_data.supported_hash_types;
uint32_t peer_hash_types = n->rss_data.peer_hash_types;
bool use_own_hash =
(supported_hash_types & VIRTIO_NET_RSS_SUPPORTED_HASHES) ==
supported_hash_types;
bool use_peer_hash =
n->rss_data.peer_hash_available &&
(supported_hash_types & peer_hash_types) == supported_hash_types;
/* Firstly sync all virtio-net possible supported features */
features |= n->host_features;
virtio_add_feature(&features, VIRTIO_NET_F_MAC);
if (!peer_has_vnet_hdr(n)) {
virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
}
if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
}
if (!peer_has_uso(n)) {
virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4);
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6);
}
if (!get_vhost_net(nc->peer)) {
if (!use_own_hash) {
virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
} else if (virtio_has_feature(features, VIRTIO_NET_F_RSS)) {
virtio_net_load_ebpf(n, errp);
}
return features;
}
if (!use_peer_hash) {
virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
if (!use_own_hash || !virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
if (!virtio_net_load_ebpf(n, errp)) {
return features;
}
virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
}
}
features = vhost_net_get_features(get_vhost_net(nc->peer), features);
vdev->backend_features = features;
if (n->mtu_bypass_backend &&
(n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
features |= (1ULL << VIRTIO_NET_F_MTU);
}
/*
* Since GUEST_ANNOUNCE is emulated the feature bit could be set without
* enabled. This happens in the vDPA case.
*
* Make sure the feature set is not incoherent, as the driver could refuse
* to start.
*
* TODO: QEMU is able to emulate a CVQ just for guest_announce purposes,
* helping guest to notify the new location with vDPA devices that does not
* support it.
*/
if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) {
virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE);
}
return features;
}
static int virtio_net_post_load_device(void *opaque, int version_id)
{
VirtIONet *n = opaque;
@ -3315,6 +3343,17 @@ static const VMStateDescription vmstate_virtio_net_has_vnet = {
},
};
static int virtio_net_rss_post_load(void *opaque, int version_id)
{
VirtIONet *n = VIRTIO_NET(opaque);
if (version_id == 1) {
n->rss_data.supported_hash_types = VIRTIO_NET_RSS_SUPPORTED_HASHES;
}
return 0;
}
static bool virtio_net_rss_needed(void *opaque)
{
return VIRTIO_NET(opaque)->rss_data.enabled;
@ -3322,14 +3361,16 @@ static bool virtio_net_rss_needed(void *opaque)
static const VMStateDescription vmstate_virtio_net_rss = {
.name = "virtio-net-device/rss",
.version_id = 1,
.version_id = 2,
.minimum_version_id = 1,
.post_load = virtio_net_rss_post_load,
.needed = virtio_net_rss_needed,
.fields = (const VMStateField[]) {
VMSTATE_BOOL(rss_data.enabled, VirtIONet),
VMSTATE_BOOL(rss_data.redirect, VirtIONet),
VMSTATE_BOOL(rss_data.populate_hash, VirtIONet),
VMSTATE_UINT32(rss_data.hash_types, VirtIONet),
VMSTATE_UINT32(rss_data.runtime_hash_types, VirtIONet),
VMSTATE_UINT32_V(rss_data.supported_hash_types, VirtIONet, 2),
VMSTATE_UINT16(rss_data.indirections_len, VirtIONet),
VMSTATE_UINT16(rss_data.default_queue, VirtIONet),
VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet,
@ -3916,8 +3957,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
net_rx_pkt_init(&n->rx_pkt);
if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
virtio_net_load_ebpf(n, errp);
if (qemu_get_vnet_hash_supported_types(qemu_get_queue(n->nic)->peer,
&n->rss_data.peer_hash_types)) {
n->rss_data.peer_hash_available = true;
n->rss_data.supported_hash_types =
n->rss_data.specified_hash_types.on_bits |
(n->rss_data.specified_hash_types.auto_bits &
n->rss_data.peer_hash_types);
} else {
n->rss_data.supported_hash_types =
n->rss_data.specified_hash_types.on_bits |
n->rss_data.specified_hash_types.auto_bits;
}
}
@ -4134,6 +4184,42 @@ static const Property virtio_net_properties[] = {
VIRTIO_NET_F_GUEST_USO6, true),
DEFINE_PROP_BIT64("host_uso", VirtIONet, host_features,
VIRTIO_NET_F_HOST_USO, true),
DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-ipv4", VirtIONet,
rss_data.specified_hash_types,
VIRTIO_NET_HASH_REPORT_IPv4 - 1,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-tcp4", VirtIONet,
rss_data.specified_hash_types,
VIRTIO_NET_HASH_REPORT_TCPv4 - 1,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-udp4", VirtIONet,
rss_data.specified_hash_types,
VIRTIO_NET_HASH_REPORT_UDPv4 - 1,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-ipv6", VirtIONet,
rss_data.specified_hash_types,
VIRTIO_NET_HASH_REPORT_IPv6 - 1,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-tcp6", VirtIONet,
rss_data.specified_hash_types,
VIRTIO_NET_HASH_REPORT_TCPv6 - 1,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-udp6", VirtIONet,
rss_data.specified_hash_types,
VIRTIO_NET_HASH_REPORT_UDPv6 - 1,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-ipv6ex", VirtIONet,
rss_data.specified_hash_types,
VIRTIO_NET_HASH_REPORT_IPv6_EX - 1,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-tcp6ex", VirtIONet,
rss_data.specified_hash_types,
VIRTIO_NET_HASH_REPORT_TCPv6_EX - 1,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_ON_OFF_AUTO_BIT64("hash-udp6ex", VirtIONet,
rss_data.specified_hash_types,
VIRTIO_NET_HASH_REPORT_UDPv6_EX - 1,
ON_OFF_AUTO_AUTO),
};
static void virtio_net_class_init(ObjectClass *klass, const void *data)

View file

@ -22,7 +22,6 @@
#include "net/tap.h"
#include "net/checksum.h"
#include "system/system.h"
#include "qemu/bswap.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "hw/pci/msix.h"

View file

@ -54,6 +54,7 @@ config PCI_EXPRESS_Q35
config PCI_EXPRESS_GENERIC_BRIDGE
bool
select PCI_EXPRESS
imply ACPI_PCI
config PCI_EXPRESS_XILINX
bool

View file

@ -1,5 +1,6 @@
#include "qemu/osdep.h"
#include "hw/acpi/aml-build.h"
#include "hw/acpi/pci.h"
#include "hw/pci-host/gpex.h"
#include "hw/arm/virt.h"
#include "hw/pci/pci_bus.h"
@ -50,61 +51,10 @@ static void acpi_dsdt_add_pci_route_table(Aml *dev, uint32_t irq,
}
}
static void acpi_dsdt_add_pci_osc(Aml *dev)
static Aml *build_pci_host_bridge_dsm_method(void)
{
Aml *method, *UUID, *ifctx, *ifctx1, *elsectx, *buf;
/* Declare an _OSC (OS Control Handoff) method */
aml_append(dev, aml_name_decl("SUPP", aml_int(0)));
aml_append(dev, aml_name_decl("CTRL", aml_int(0)));
method = aml_method("_OSC", 4, AML_NOTSERIALIZED);
aml_append(method,
aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1"));
/* PCI Firmware Specification 3.0
* 4.5.1. _OSC Interface for PCI Host Bridge Devices
* The _OSC interface for a PCI/PCI-X/PCI Express hierarchy is
* identified by the Universal Unique IDentifier (UUID)
* 33DB4D5B-1FF7-401C-9657-7441C03DD766
*/
UUID = aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766");
ifctx = aml_if(aml_equal(aml_arg(0), UUID));
aml_append(ifctx,
aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2"));
aml_append(ifctx,
aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3"));
aml_append(ifctx, aml_store(aml_name("CDW2"), aml_name("SUPP")));
aml_append(ifctx, aml_store(aml_name("CDW3"), aml_name("CTRL")));
/*
* Allow OS control for all 5 features:
* PCIeHotplug SHPCHotplug PME AER PCIeCapability.
*/
aml_append(ifctx, aml_and(aml_name("CTRL"), aml_int(0x1F),
aml_name("CTRL")));
ifctx1 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(0x1))));
aml_append(ifctx1, aml_or(aml_name("CDW1"), aml_int(0x08),
aml_name("CDW1")));
aml_append(ifctx, ifctx1);
ifctx1 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), aml_name("CTRL"))));
aml_append(ifctx1, aml_or(aml_name("CDW1"), aml_int(0x10),
aml_name("CDW1")));
aml_append(ifctx, ifctx1);
aml_append(ifctx, aml_store(aml_name("CTRL"), aml_name("CDW3")));
aml_append(ifctx, aml_return(aml_arg(3)));
aml_append(method, ifctx);
elsectx = aml_else();
aml_append(elsectx, aml_or(aml_name("CDW1"), aml_int(4),
aml_name("CDW1")));
aml_append(elsectx, aml_return(aml_arg(3)));
aml_append(method, elsectx);
aml_append(dev, method);
method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
Aml *method = aml_method("_DSM", 4, AML_NOTSERIALIZED);
Aml *UUID, *ifctx, *ifctx1, *buf;
/* PCI Firmware Specification 3.0
* 4.6.1. _DSM for PCI Express Slot Information
@ -123,7 +73,16 @@ static void acpi_dsdt_add_pci_osc(Aml *dev)
byte_list[0] = 0;
buf = aml_buffer(1, byte_list);
aml_append(method, aml_return(buf));
aml_append(dev, method);
return method;
}
static void acpi_dsdt_add_host_bridge_methods(Aml *dev,
bool enable_native_pcie_hotplug)
{
/* Declare an _OSC (OS Control Handoff) method */
aml_append(dev,
build_pci_host_bridge_osc_method(enable_native_pcie_hotplug));
aml_append(dev, build_pci_host_bridge_dsm_method());
}
void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg)
@ -192,7 +151,8 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg)
if (is_cxl) {
build_cxl_osc_method(dev);
} else {
acpi_dsdt_add_pci_osc(dev);
/* pxb bridges do not have ACPI PCI Hot-plug enabled */
acpi_dsdt_add_host_bridge_methods(dev, true);
}
aml_append(scope, dev);
@ -267,7 +227,7 @@ void acpi_dsdt_add_gpex(Aml *scope, struct GPEXConfig *cfg)
}
aml_append(dev, aml_name_decl("_CRS", rbuf));
acpi_dsdt_add_pci_osc(dev);
acpi_dsdt_add_host_bridge_methods(dev, cfg->pci_native_hotplug);
Aml *dev_res0 = aml_device("%s", "RES0");
aml_append(dev_res0, aml_name_decl("_HID", aml_string("PNP0C02")));

View file

@ -28,6 +28,7 @@
#include "qapi/error.h"
#include "qemu/units.h"
#include "qemu/log.h"
#include "qemu/bswap.h"
#include "hw/qdev-properties.h"
#include "hw/registerfields.h"
#include "hw/pci/pci_device.h"

View file

@ -8,6 +8,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/bswap.h"
#include "qapi/visitor.h"
#include "qapi/error.h"
#include "hw/pci-host/pnv_phb3_regs.h"

View file

@ -8,6 +8,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/bswap.h"
#include "qapi/visitor.h"
#include "qapi/error.h"
#include "target/ppc/cpu.h"

View file

@ -20,7 +20,6 @@
#include "migration/vmstate.h"
#include "hw/pci/pci_device.h"
#include "hw/pci/pci_host.h"
#include "qemu/bswap.h"
#include "hw/pci-host/ppce500.h"
#include "qom/object.h"

View file

@ -28,7 +28,6 @@
#include "hw/irq.h"
#include "hw/pci/pci_device.h"
#include "hw/pci/pci_host.h"
#include "qemu/bswap.h"
#include "qemu/module.h"
#include "qom/object.h"

View file

@ -894,7 +894,10 @@ static void virt_acpi_build(RISCVVirtState *s, AcpiBuildTables *tables)
}
acpi_add_table(table_offsets, tables_blob);
spcr_setup(tables_blob, tables->linker, s);
if (ms->acpi_spcr_enabled) {
spcr_setup(tables_blob, tables->linker, s);
}
acpi_add_table(table_offsets, tables_blob);
{

View file

@ -16,6 +16,7 @@
#include "exec/target_page.h"
#include "system/memory.h"
#include "qemu/error-report.h"
#include "qemu/bswap.h"
#include "system/hw_accel.h"
#include "hw/boards.h"
#include "hw/pci/pci_device.h"

View file

@ -28,7 +28,6 @@
#include "qapi/visitor.h"
#include "qemu/module.h"
#include "qemu/log.h"
#include "qemu/bswap.h"
enum LSM303DLHCMagReg {
LSM303DLHC_MAG_REG_CRA = 0x00,

View file

@ -17,6 +17,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qemu/bswap.h"
#include "qapi/error.h"
#include "qemu/config-file.h"
#include "qemu/module.h"

View file

@ -13,7 +13,6 @@
#include "hw/vfio/vfio-device.h"
#include "migration/misc.h"
#include "qapi/error.h"
#include "qemu/bswap.h"
#include "qemu/error-report.h"
#include "qemu/lockable.h"
#include "qemu/main-loop.h"

View file

@ -1,6 +1,7 @@
system_virtio_ss = ss.source_set()
system_virtio_ss.add(files('virtio-bus.c'))
system_virtio_ss.add(files('iothread-vq-mapping.c'))
system_virtio_ss.add(files('virtio-config-io.c'))
system_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('virtio-pci.c'))
system_virtio_ss.add(when: 'CONFIG_VIRTIO_MMIO', if_true: files('virtio-mmio.c'))
system_virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c'))
@ -10,11 +11,11 @@ system_virtio_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev.c')
specific_virtio_ss = ss.source_set()
specific_virtio_ss.add(files('virtio.c'))
specific_virtio_ss.add(files('virtio-config-io.c', 'virtio-qmp.c'))
specific_virtio_ss.add(files('virtio-qmp.c'))
if have_vhost
system_virtio_ss.add(files('vhost.c'))
specific_virtio_ss.add(files('vhost-backend.c', 'vhost-iova-tree.c'))
system_virtio_ss.add(files('vhost-backend.c', 'vhost-iova-tree.c'))
if have_vhost_user
# fixme - this really should be generic
specific_virtio_ss.add(files('vhost-user.c'))
@ -43,22 +44,22 @@ if have_vhost
endif
if have_vhost_vdpa
system_virtio_ss.add(files('vhost-vdpa.c'))
specific_virtio_ss.add(files('vhost-shadow-virtqueue.c'))
system_virtio_ss.add(files('vhost-shadow-virtqueue.c'))
endif
else
system_virtio_ss.add(files('vhost-stub.c'))
endif
system_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c'))
system_virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_VSOCK', if_true: files('vhost-user-vsock.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_RNG', if_true: files('virtio-rng.c'))
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: [files('virtio-nsm.c', 'cbor-helpers.c'), libcbor])
specific_virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c'))
specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c'))
specific_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_SCMI'], if_true: files('vhost-user-scmi-pci.c'))
system_virtio_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: files('virtio-nsm.c'))
system_virtio_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: [files('cbor-helpers.c'), libcbor])
system_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c'))
virtio_pci_ss = ss.source_set()
virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c'))
@ -67,6 +68,7 @@ virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_BLK', if_true: files('vhost-user-blk-
virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_SCSI', if_true: files('vhost-user-scsi-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_SCSI', if_true: files('vhost-scsi-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_INPUT_HOST', if_true: files('virtio-input-host-pci.c'))
@ -85,7 +87,7 @@ virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev-pci.c'))
virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MD', if_true: files('virtio-md-pci.c'))
specific_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss)
system_virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss)
system_ss.add_all(when: 'CONFIG_VIRTIO', if_true: system_virtio_ss)
system_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c'))

View file

@ -47,12 +47,6 @@ static struct vhost_log *vhost_log[VHOST_BACKEND_TYPE_MAX];
static struct vhost_log *vhost_log_shm[VHOST_BACKEND_TYPE_MAX];
static QLIST_HEAD(, vhost_dev) vhost_log_devs[VHOST_BACKEND_TYPE_MAX];
/* Memslots used by backends that support private memslots (without an fd). */
static unsigned int used_memslots;
/* Memslots used by backends that only support shared memslots (with an fd). */
static unsigned int used_shared_memslots;
static QLIST_HEAD(, vhost_dev) vhost_devices =
QLIST_HEAD_INITIALIZER(vhost_devices);
@ -74,15 +68,15 @@ unsigned int vhost_get_free_memslots(void)
QLIST_FOREACH(hdev, &vhost_devices, entry) {
unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
unsigned int cur_free;
unsigned int cur_free = r - hdev->mem->nregions;
if (hdev->vhost_ops->vhost_backend_no_private_memslots &&
hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) {
cur_free = r - used_shared_memslots;
if (unlikely(r < hdev->mem->nregions)) {
warn_report_once("used (%u) vhost backend memory slots exceed"
" the device limit (%u).", hdev->mem->nregions, r);
free = 0;
} else {
cur_free = r - used_memslots;
free = MIN(free, cur_free);
}
free = MIN(free, cur_free);
}
return free;
}
@ -666,13 +660,6 @@ static void vhost_commit(MemoryListener *listener)
dev->mem = g_realloc(dev->mem, regions_size);
dev->mem->nregions = dev->n_mem_sections;
if (dev->vhost_ops->vhost_backend_no_private_memslots &&
dev->vhost_ops->vhost_backend_no_private_memslots(dev)) {
used_shared_memslots = dev->mem->nregions;
} else {
used_memslots = dev->mem->nregions;
}
for (i = 0; i < dev->n_mem_sections; i++) {
struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
struct MemoryRegionSection *mrs = dev->mem_sections + i;
@ -1367,25 +1354,30 @@ fail_alloc_desc:
return r;
}
int vhost_virtqueue_stop(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
unsigned idx)
static int do_vhost_virtqueue_stop(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
unsigned idx, bool force)
{
int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
struct vhost_vring_state state = {
.index = vhost_vq_index,
};
int r;
int r = 0;
if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
/* Don't stop the virtqueue which might have not been started */
return 0;
}
r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
if (r < 0) {
VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r);
if (!force) {
r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
if (r < 0) {
VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r);
}
}
if (r < 0 || force) {
/* Connection to the backend is broken, so let's sync internal
* last avail idx to the device used idx.
*/
@ -1414,6 +1406,14 @@ int vhost_virtqueue_stop(struct vhost_dev *dev,
return r;
}
int vhost_virtqueue_stop(struct vhost_dev *dev,
struct VirtIODevice *vdev,
struct vhost_virtqueue *vq,
unsigned idx)
{
return do_vhost_virtqueue_stop(dev, vdev, vq, idx, false);
}
static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
int n, uint32_t timeout)
{
@ -1619,15 +1619,11 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
/*
* The listener we registered properly updated the corresponding counter.
* So we can trust that these values are accurate.
* The listener we registered properly setup the number of required
* memslots in vhost_commit().
*/
if (hdev->vhost_ops->vhost_backend_no_private_memslots &&
hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) {
used = used_shared_memslots;
} else {
used = used_memslots;
}
used = hdev->mem->nregions;
/*
* We assume that all reserved memslots actually require a real memslot
* in our vhost backend. This might not be true, for example, if the
@ -2136,7 +2132,8 @@ fail_features:
}
/* Host notifiers must be enabled at this point. */
int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
static int do_vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev,
bool vrings, bool force)
{
int i;
int rc = 0;
@ -2158,10 +2155,11 @@ int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
vhost_dev_set_vring_enable(hdev, false);
}
for (i = 0; i < hdev->nvqs; ++i) {
rc |= vhost_virtqueue_stop(hdev,
vdev,
hdev->vqs + i,
hdev->vq_index + i);
rc |= do_vhost_virtqueue_stop(hdev,
vdev,
hdev->vqs + i,
hdev->vq_index + i,
force);
}
if (hdev->vhost_ops->vhost_reset_status) {
hdev->vhost_ops->vhost_reset_status(hdev);
@ -2181,6 +2179,17 @@ int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
return rc;
}
int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
{
return do_vhost_dev_stop(hdev, vdev, vrings, false);
}
int vhost_dev_force_stop(struct vhost_dev *hdev, VirtIODevice *vdev,
bool vrings)
{
return do_vhost_dev_stop(hdev, vdev, vrings, true);
}
int vhost_net_set_backend(struct vhost_dev *hdev,
struct vhost_vring_file *file)
{

View file

@ -11,7 +11,6 @@
#include "qemu/osdep.h"
#include "hw/virtio/virtio.h"
#include "cpu.h"
uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
{

View file

@ -30,6 +30,7 @@
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qemu/bswap.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
#include "hw/loader.h"

View file

@ -20,7 +20,7 @@
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
#include "exec/tswap.h"
#include "qemu/target-info.h"
#include "qom/object_interfaces.h"
#include "hw/core/cpu.h"
#include "hw/virtio/virtio.h"

View file

@ -19,7 +19,6 @@
#include "hw/vmapple/vmapple.h"
#include "hw/virtio/virtio-blk.h"
#include "hw/virtio/virtio-pci.h"
#include "qemu/bswap.h"
#include "qemu/log.h"
#include "qemu/module.h"
#include "qapi/error.h"

View file

@ -54,6 +54,9 @@ typedef struct MemTxAttrs {
*/
unsigned int pid:8;
/* PCI - IOMMU operations, see PCIAddressType */
unsigned int address_type:1;
/*
* Bus masters which don't specify any attributes will get this
* (via the MEMTXATTRS_UNSPECIFIED constant), so that we can

View file

@ -9,18 +9,7 @@
#define TSWAP_H
#include "qemu/bswap.h"
/**
* target_big_endian:
* Returns true if the (default) endianness of the target is big endian,
* false otherwise. Common code should normally never need to know about the
* endianness of the target, so please do *not* use this function unless you
* know very well what you are doing!
*/
bool target_big_endian(void);
#ifdef COMPILING_PER_TARGET
#define target_big_endian() TARGET_BIG_ENDIAN
#endif
#include "qemu/target-info.h"
/*
* If we're in target-specific code, we can hard-code the swapping
@ -80,74 +69,4 @@ static inline void tswap64s(uint64_t *s)
}
}
/* Return ld{word}_{le,be}_p following target endianness. */
#define LOAD_IMPL(word, args...) \
do { \
if (target_big_endian()) { \
return glue(glue(ld, word), _be_p)(args); \
} else { \
return glue(glue(ld, word), _le_p)(args); \
} \
} while (0)
static inline int lduw_p(const void *ptr)
{
LOAD_IMPL(uw, ptr);
}
static inline int ldsw_p(const void *ptr)
{
LOAD_IMPL(sw, ptr);
}
static inline int ldl_p(const void *ptr)
{
LOAD_IMPL(l, ptr);
}
static inline uint64_t ldq_p(const void *ptr)
{
LOAD_IMPL(q, ptr);
}
static inline uint64_t ldn_p(const void *ptr, int sz)
{
LOAD_IMPL(n, ptr, sz);
}
#undef LOAD_IMPL
/* Call st{word}_{le,be}_p following target endianness. */
#define STORE_IMPL(word, args...) \
do { \
if (target_big_endian()) { \
glue(glue(st, word), _be_p)(args); \
} else { \
glue(glue(st, word), _le_p)(args); \
} \
} while (0)
static inline void stw_p(void *ptr, uint16_t v)
{
STORE_IMPL(w, ptr, v);
}
static inline void stl_p(void *ptr, uint32_t v)
{
STORE_IMPL(l, ptr, v);
}
static inline void stq_p(void *ptr, uint64_t v)
{
STORE_IMPL(q, ptr, v);
}
static inline void stn_p(void *ptr, int sz, uint64_t v)
{
STORE_IMPL(n, ptr, sz, v);
}
#undef STORE_IMPL
#endif /* TSWAP_H */

View file

@ -16,7 +16,8 @@
#error "gdbstub helpers should only be included by target specific code"
#endif
#include "exec/tswap.h"
#include "qemu/bswap.h"
#include "qemu/target-info.h"
#include "cpu-param.h"
/*
@ -33,40 +34,49 @@ static inline int gdb_get_reg8(GByteArray *buf, uint8_t val)
static inline int gdb_get_reg16(GByteArray *buf, uint16_t val)
{
uint16_t to_word = tswap16(val);
g_byte_array_append(buf, (uint8_t *) &to_word, 2);
if (target_big_endian()) {
cpu_to_be16s(&val);
} else {
cpu_to_le16s(&val);
}
g_byte_array_append(buf, (uint8_t *) &val, 2);
return 2;
}
static inline int gdb_get_reg32(GByteArray *buf, uint32_t val)
{
uint32_t to_long = tswap32(val);
g_byte_array_append(buf, (uint8_t *) &to_long, 4);
if (target_big_endian()) {
cpu_to_be32s(&val);
} else {
cpu_to_le32s(&val);
}
g_byte_array_append(buf, (uint8_t *) &val, 4);
return 4;
}
static inline int gdb_get_reg64(GByteArray *buf, uint64_t val)
{
uint64_t to_quad = tswap64(val);
g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
if (target_big_endian()) {
cpu_to_be64s(&val);
} else {
cpu_to_le64s(&val);
}
g_byte_array_append(buf, (uint8_t *) &val, 8);
return 8;
}
static inline int gdb_get_reg128(GByteArray *buf, uint64_t val_hi,
uint64_t val_lo)
{
uint64_t to_quad;
#if TARGET_BIG_ENDIAN
to_quad = tswap64(val_hi);
g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
to_quad = tswap64(val_lo);
g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
#else
to_quad = tswap64(val_lo);
g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
to_quad = tswap64(val_hi);
g_byte_array_append(buf, (uint8_t *) &to_quad, 8);
#endif
uint64_t tmp[2];
if (target_big_endian()) {
tmp[0] = cpu_to_be64(val_hi);
tmp[1] = cpu_to_be64(val_lo);
} else {
tmp[0] = cpu_to_le64(val_lo);
tmp[1] = cpu_to_le64(val_hi);
}
g_byte_array_append(buf, (uint8_t *)&tmp, 16);
return 16;
}

View file

@ -63,12 +63,13 @@
#include "hw/acpi/memory_hotplug.h"
#include "hw/acpi/ghes.h"
#include "hw/acpi/cpu.h"
#include "hw/acpi/pcihp.h"
#include "qom/object.h"
#define ACPI_POWER_BUTTON_DEVICE "PWRB"
#define TYPE_ACPI_GED "acpi-ged"
OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED)
OBJECT_DECLARE_TYPE(AcpiGedState, AcpiGedClass, ACPI_GED)
#define ACPI_GED_EVT_SEL_OFFSET 0x0
#define ACPI_GED_EVT_SEL_LEN 0x4
@ -101,6 +102,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED)
#define ACPI_GED_PWR_DOWN_EVT 0x2
#define ACPI_GED_NVDIMM_HOTPLUG_EVT 0x4
#define ACPI_GED_CPU_HOTPLUG_EVT 0x8
#define ACPI_GED_PCI_HOTPLUG_EVT 0x10
typedef struct GEDState {
MemoryRegion evt;
@ -108,18 +110,31 @@ typedef struct GEDState {
uint32_t sel;
} GEDState;
#define ACPI_PCIHP_REGION_NAME "pcihp container"
#define ACPI_MEMHP_REGION_NAME "memhp container"
struct AcpiGedState {
SysBusDevice parent_obj;
MemHotplugState memhp_state;
MemoryRegion container_memhp;
CPUHotplugState cpuhp_state;
MemoryRegion container_cpuhp;
AcpiPciHpState pcihp_state;
MemoryRegion container_pcihp;
GEDState ged_state;
uint32_t ged_event_bitmap;
qemu_irq irq;
AcpiGhesState ghes_state;
};
typedef struct AcpiGedClass {
/* <private> */
SysBusDeviceClass parent_class;
/*< public >*/
ResettablePhases parent_phases;
} AcpiGedClass;
void build_ged_aml(Aml *table, const char* name, HotplugHandler *hotplug_dev,
uint32_t ged_irq, AmlRegionSpace rs, hwaddr ged_base);
void acpi_dsdt_add_power_button(Aml *scope);

View file

@ -36,11 +36,12 @@ typedef struct AcpiMcfgInfo {
void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info,
const char *oem_id, const char *oem_table_id);
Aml *aml_pci_device_dsm(void);
void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus);
void build_pci_bridge_aml(AcpiDevAmlIf *adev, Aml *scope);
void build_srat_generic_affinity_structures(GArray *table_data);
Aml *build_pci_host_bridge_osc_method(bool enable_native_pcie_hotplug);
Aml *build_pci_bridge_edsm(void);
#endif

View file

@ -28,11 +28,18 @@
#define HW_ACPI_PCIHP_H
#include "hw/acpi/acpi.h"
#include "hw/acpi/aml-build.h"
#include "hw/hotplug.h"
#define ACPI_PCIHP_IO_BASE_PROP "acpi-pcihp-io-base"
#define ACPI_PCIHP_IO_LEN_PROP "acpi-pcihp-io-len"
/* PCI Hot-plug registers bases. See docs/specs/acpi_pci_hotplug.rst */
#define ACPI_PCIHP_SEJ_BASE 0x8
#define ACPI_PCIHP_BNMR_BASE 0x10
#define ACPI_PCIHP_SIZE 0x0018
typedef struct AcpiPciHpPciStatus {
uint32_t up;
uint32_t down;
@ -55,7 +62,7 @@ typedef struct AcpiPciHpState {
bool use_acpi_root_pci_hotplug;
} AcpiPciHpState;
void acpi_pcihp_init(Object *owner, AcpiPciHpState *, PCIBus *root,
void acpi_pcihp_init(Object *owner, AcpiPciHpState *,
MemoryRegion *io, uint16_t io_base);
bool acpi_pcihp_is_hotpluggable_bus(AcpiPciHpState *s, BusState *bus);
@ -69,6 +76,14 @@ void acpi_pcihp_device_unplug_request_cb(HotplugHandler *hotplug_dev,
AcpiPciHpState *s, DeviceState *dev,
Error **errp);
void build_acpi_pci_hotplug(Aml *table, AmlRegionSpace rs, uint64_t pcihp_addr);
void build_append_pci_dsm_func0_common(Aml *ctx, Aml *retvar);
void build_append_pcihp_resources(Aml *table,
uint64_t io_addr, uint64_t io_len);
bool build_append_notification_callback(Aml *parent_scope, const PCIBus *bus);
void build_append_pci_bus_devices(Aml *parent_scope, PCIBus *bus);
/* Called on reset */
void acpi_pcihp_reset(AcpiPciHpState *s);

View file

@ -80,6 +80,7 @@ enum {
VIRT_ACPI_GED,
VIRT_NVDIMM_ACPI,
VIRT_PVTIME,
VIRT_ACPI_PCIHP,
VIRT_LOWMEMMAP_LAST,
};

View file

@ -443,6 +443,7 @@ struct MachineState {
SmpCache smp_cache;
struct NVDIMMState *nvdimms_state;
struct NumaState *numa_state;
bool acpi_spcr_enabled;
};
/*

View file

@ -23,6 +23,7 @@
#define CXL_DEVICE_REG_BAR_IDX 2
#define CXL_WINDOW_MAX 10
#define CXL_NUM_EXTENTS_SUPPORTED 512
typedef struct PXBCXLDev PXBCXLDev;

View file

@ -133,6 +133,15 @@ typedef enum {
CXL_MBOX_MAX = 0x20
} CXLRetCode;
/* r3.2 Section 7.6.7.6.2: Table 7-66: DSMAS Flags Bits */
typedef enum {
CXL_DSMAS_FLAGS_NONVOLATILE = 2,
CXL_DSMAS_FLAGS_SHARABLE = 3,
CXL_DSMAS_FLAGS_HW_MANAGED_COHERENCY = 4,
CXL_DSMAS_FLAGS_IC_SPECIFIC_DC_MANAGEMENT = 5,
CXL_DSMAS_FLAGS_RDONLY = 6,
} CXLDSMASFlags;
typedef struct CXLCCI CXLCCI;
typedef struct cxl_device_state CXLDeviceState;
struct cxl_cmd;
@ -530,6 +539,14 @@ typedef struct CXLDCRegion {
uint32_t dsmadhandle;
uint8_t flags;
unsigned long *blk_bitmap;
uint64_t supported_blk_size_bitmask;
QemuMutex bitmap_lock;
/* Following bools make up dsmas flags, as defined in the CDAT */
bool nonvolatile;
bool sharable;
bool hw_managed_coherency;
bool ic_specific_dc_management;
bool rdonly;
} CXLDCRegion;
typedef struct CXLSetFeatureInfo {
@ -618,6 +635,7 @@ struct CXLType3Dev {
CXLDCExtentList extents;
CXLDCExtentGroupList extents_pending;
uint32_t total_extent_count;
uint32_t nr_extents_accepted;
uint32_t ext_list_gen_seq;
uint8_t num_regions; /* 0-8 regions */
@ -696,11 +714,22 @@ CXLDCExtentGroup *cxl_insert_extent_to_extent_group(CXLDCExtentGroup *group,
uint16_t shared_seq);
void cxl_extent_group_list_insert_tail(CXLDCExtentGroupList *list,
CXLDCExtentGroup *group);
void cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list);
uint32_t cxl_extent_group_list_delete_front(CXLDCExtentGroupList *list);
void ct3_set_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
uint64_t len);
void ct3_clear_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
uint64_t len);
bool ct3_test_region_block_backed(CXLType3Dev *ct3d, uint64_t dpa,
uint64_t len);
void cxl_assign_event_header(CXLEventRecordHdr *hdr,
const QemuUUID *uuid, uint32_t flags,
uint8_t length, uint64_t timestamp);
void cxl_create_dc_event_records_for_extents(CXLType3Dev *ct3d,
CXLDCEventType type,
CXLDCExtentRaw extents[],
uint32_t ext_count);
bool cxl_extents_overlaps_dpa_range(CXLDCExtentList *list,
uint64_t dpa, uint64_t len);
bool cxl_extent_groups_overlaps_dpa_range(CXLDCExtentGroupList *list,
uint64_t dpa, uint64_t len);
#endif

View file

@ -184,4 +184,19 @@ typedef struct CXLEventDynamicCapacity {
uint32_t tags_avail;
} QEMU_PACKED CXLEventDynamicCapacity;
/* CXL r3.1 Table 8-50: Dynamic Capacity Event Record */
static const QemuUUID dynamic_capacity_uuid = {
.data = UUID(0xca95afa7, 0xf183, 0x4018, 0x8c, 0x2f,
0x95, 0x26, 0x8e, 0x10, 0x1a, 0x2a),
};
typedef enum CXLDCEventType {
DC_EVENT_ADD_CAPACITY = 0x0,
DC_EVENT_RELEASE_CAPACITY = 0x1,
DC_EVENT_FORCED_RELEASE_CAPACITY = 0x2,
DC_EVENT_REGION_CONFIG_UPDATED = 0x3,
DC_EVENT_ADD_CAPACITY_RSP = 0x4,
DC_EVENT_CAPACITY_RELEASED = 0x5,
} CXLDCEventType;
#endif /* CXL_EVENTS_H */

View file

@ -8,6 +8,7 @@
#ifndef CXL_MAILBOX_H
#define CXL_MAILBOX_H
#define CXL_MBOX_CONFIG_CHANGE_COLD_RESET (1)
#define CXL_MBOX_IMMEDIATE_CONFIG_CHANGE (1 << 1)
#define CXL_MBOX_IMMEDIATE_DATA_CHANGE (1 << 2)
#define CXL_MBOX_IMMEDIATE_POLICY_CHANGE (1 << 3)
@ -15,5 +16,10 @@
#define CXL_MBOX_SECURITY_STATE_CHANGE (1 << 5)
#define CXL_MBOX_BACKGROUND_OPERATION (1 << 6)
#define CXL_MBOX_BACKGROUND_OPERATION_ABORT (1 << 7)
#define CXL_MBOX_SECONDARY_MBOX_SUPPORTED (1 << 8)
#define CXL_MBOX_REQUEST_ABORT_BACKGROUND_OP_SUPPORTED (1 << 9)
#define CXL_MBOX_CEL_10_TO_11_VALID (1 << 10)
#define CXL_MBOX_CONFIG_CHANGE_CONV_RESET (1 << 11)
#define CXL_MBOX_CONFIG_CHANGE_CXL_RESET (1 << 12)
#endif

View file

@ -45,6 +45,7 @@ struct GPEXConfig {
MemMapEntry pio;
int irq;
PCIBus *bus;
bool pci_native_hotplug;
};
typedef struct GPEXIrq GPEXIrq;

View file

@ -134,6 +134,15 @@ struct PCIHostDeviceAddress {
unsigned int function;
};
/*
* Represents the Address Type (AT) field in a PCI request,
* see MemTxAttrs.address_type
*/
typedef enum PCIAddressType {
PCI_AT_UNTRANSLATED = 0, /* Default when no attribute is set */
PCI_AT_TRANSLATED = 1,
} PCIAddressType;
typedef void PCIConfigWriteFunc(PCIDevice *pci_dev,
uint32_t address, uint32_t data, int len);
typedef uint32_t PCIConfigReadFunc(PCIDevice *pci_dev,

View file

@ -43,11 +43,22 @@ struct PropertyInfo {
ObjectPropertyRelease *release;
};
/**
* struct OnOffAutoBit64 - OnOffAuto storage with 64 elements.
* @on_bits: Bitmap of elements with "on".
* @auto_bits: Bitmap of elements with "auto".
*/
typedef struct OnOffAutoBit64 {
uint64_t on_bits;
uint64_t auto_bits;
} OnOffAutoBit64;
/*** qdev-properties.c ***/
extern const PropertyInfo qdev_prop_bit;
extern const PropertyInfo qdev_prop_bit64;
extern const PropertyInfo qdev_prop_on_off_auto_bit64;
extern const PropertyInfo qdev_prop_bool;
extern const PropertyInfo qdev_prop_uint8;
extern const PropertyInfo qdev_prop_uint16;
@ -100,6 +111,13 @@ extern const PropertyInfo qdev_prop_link;
.set_default = true, \
.defval.u = (bool)_defval)
#define DEFINE_PROP_ON_OFF_AUTO_BIT64(_name, _state, _field, _bit, _defval) \
DEFINE_PROP(_name, _state, _field, qdev_prop_on_off_auto_bit64, \
OnOffAutoBit64, \
.bitnr = (_bit), \
.set_default = true, \
.defval.i = (OnOffAuto)_defval)
#define DEFINE_PROP_BOOL(_name, _state, _field, _defval) \
DEFINE_PROP(_name, _state, _field, qdev_prop_bool, bool, \
.set_default = true, \

View file

@ -82,6 +82,7 @@ void sysbus_connect_irq(SysBusDevice *dev, int n, qemu_irq irq);
bool sysbus_is_irq_connected(SysBusDevice *dev, int n);
qemu_irq sysbus_get_connected_irq(SysBusDevice *dev, int n);
void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr);
int sysbus_mmio_map_name(SysBusDevice *dev, const char*name, hwaddr addr);
void sysbus_mmio_map_overlap(SysBusDevice *dev, int n, hwaddr addr,
int priority);

View file

@ -50,6 +50,8 @@ struct VHostUserBlk {
bool connected;
/* vhost_user_blk_start/vhost_user_blk_stop */
bool started_vu;
bool skip_get_vring_base_on_force_shutdown;
};
#endif

View file

@ -242,6 +242,21 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
*/
int vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings);
/**
* vhost_dev_force_stop() - force stop the vhost device
* @hdev: common vhost_dev structure
* @vdev: the VirtIODevice structure
* @vrings: true to have vrings disabled in this call
*
* Force stop the vhost device. After the device is stopped the notifiers
* can be disabled (@vhost_dev_disable_notifiers) and the device can
* be torn down (@vhost_dev_cleanup). Unlike @vhost_dev_stop, this doesn't
* attempt to flush in-flight backend requests by skipping GET_VRING_BASE
* entirely.
*/
int vhost_dev_force_stop(struct vhost_dev *hdev, VirtIODevice *vdev,
bool vrings);
/**
* DOC: vhost device configuration handling
*

View file

@ -144,7 +144,11 @@ typedef struct VirtioNetRssData {
bool enabled_software_rss;
bool redirect;
bool populate_hash;
uint32_t hash_types;
bool peer_hash_available;
uint32_t runtime_hash_types;
uint32_t supported_hash_types;
uint32_t peer_hash_types;
OnOffAutoBit64 specified_hash_types;
uint8_t key[VIRTIO_NET_RSS_MAX_KEY_SIZE];
uint16_t indirections_len;
uint16_t *indirections_table;

View file

@ -60,6 +60,7 @@ typedef bool (HasVnetHdrLen)(NetClientState *, int);
typedef void (SetOffload)(NetClientState *, int, int, int, int, int, int, int);
typedef int (GetVnetHdrLen)(NetClientState *);
typedef void (SetVnetHdrLen)(NetClientState *, int);
typedef bool (GetVnetHashSupportedTypes)(NetClientState *, uint32_t *);
typedef int (SetVnetLE)(NetClientState *, bool);
typedef int (SetVnetBE)(NetClientState *, bool);
typedef struct SocketReadState SocketReadState;
@ -90,6 +91,7 @@ typedef struct NetClientInfo {
SetVnetHdrLen *set_vnet_hdr_len;
SetVnetLE *set_vnet_le;
SetVnetBE *set_vnet_be;
GetVnetHashSupportedTypes *get_vnet_hash_supported_types;
NetAnnounce *announce;
SetSteeringEBPF *set_steering_ebpf;
NetCheckPeerType *check_peer_type;
@ -191,6 +193,7 @@ void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
int ecn, int ufo, int uso4, int uso6);
int qemu_get_vnet_hdr_len(NetClientState *nc);
void qemu_set_vnet_hdr_len(NetClientState *nc, int len);
bool qemu_get_vnet_hash_supported_types(NetClientState *nc, uint32_t *types);
int qemu_set_vnet_le(NetClientState *nc, bool is_le);
int qemu_set_vnet_be(NetClientState *nc, bool is_be);
void qemu_macaddr_default_if_unset(MACAddr *macaddr);

View file

@ -1,6 +1,8 @@
#ifndef BSWAP_H
#define BSWAP_H
#include "qemu/target-info.h"
#undef bswap16
#define bswap16(_x) __builtin_bswap16(_x)
#undef bswap32
@ -432,4 +434,75 @@ DO_STN_LDN_P(be)
#undef le_bswaps
#undef be_bswaps
/* Return ld{word}_{le,be}_p following target endianness. */
#define LOAD_IMPL(word, args...) \
do { \
if (target_big_endian()) { \
return glue(glue(ld, word), _be_p)(args); \
} else { \
return glue(glue(ld, word), _le_p)(args); \
} \
} while (0)
static inline int lduw_p(const void *ptr)
{
LOAD_IMPL(uw, ptr);
}
static inline int ldsw_p(const void *ptr)
{
LOAD_IMPL(sw, ptr);
}
static inline int ldl_p(const void *ptr)
{
LOAD_IMPL(l, ptr);
}
static inline uint64_t ldq_p(const void *ptr)
{
LOAD_IMPL(q, ptr);
}
static inline uint64_t ldn_p(const void *ptr, int sz)
{
LOAD_IMPL(n, ptr, sz);
}
#undef LOAD_IMPL
/* Call st{word}_{le,be}_p following target endianness. */
#define STORE_IMPL(word, args...) \
do { \
if (target_big_endian()) { \
glue(glue(st, word), _be_p)(args); \
} else { \
glue(glue(st, word), _le_p)(args); \
} \
} while (0)
static inline void stw_p(void *ptr, uint16_t v)
{
STORE_IMPL(w, ptr, v);
}
static inline void stl_p(void *ptr, uint32_t v)
{
STORE_IMPL(l, ptr, v);
}
static inline void stq_p(void *ptr, uint64_t v)
{
STORE_IMPL(q, ptr, v);
}
static inline void stn_p(void *ptr, int sz, uint64_t v)
{
STORE_IMPL(n, ptr, sz, v);
}
#undef STORE_IMPL
#endif /* BSWAP_H */

View file

@ -9,17 +9,21 @@
#ifndef QEMU_TARGET_INFO_IMPL_H
#define QEMU_TARGET_INFO_IMPL_H
#include "qemu/target-info.h"
#include "qapi/qapi-types-machine.h"
typedef struct TargetInfo {
/* runtime equivalent of TARGET_NAME definition */
const char *target_name;
/* related to TARGET_ARCH definition */
SysEmuTarget target_arch;
/* runtime equivalent of TARGET_LONG_BITS definition */
unsigned long_bits;
/* runtime equivalent of CPU_RESOLVING_TYPE definition */
const char *cpu_type;
/* QOM typename machines for this binary must implement */
const char *machine_typename;
/* related to TARGET_BIG_ENDIAN definition */
EndianMode endianness;
} TargetInfo;
/**

View file

@ -0,0 +1,29 @@
/*
* QEMU target info API (returning QAPI types)
*
* Copyright (c) Linaro
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef QEMU_TARGET_INFO_EXTRA_H
#define QEMU_TARGET_INFO_EXTRA_H
#include "qapi/qapi-types-common.h"
#include "qapi/qapi-types-machine.h"
/**
* target_arch:
*
* Returns: QAPI SysEmuTarget enum (e.g. SYS_EMU_TARGET_X86_64).
*/
SysEmuTarget target_arch(void);
/**
* target_endian_mode:
*
* Returns: QAPI EndianMode enum (e.g. ENDIAN_MODE_LITTLE).
*/
EndianMode target_endian_mode(void);
#endif

View file

@ -1,5 +1,5 @@
/*
* QEMU target info API
* QEMU target info API (returning native types)
*
* Copyright (c) Linaro
*
@ -38,4 +38,16 @@ const char *target_machine_typename(void);
*/
const char *target_cpu_type(void);
/**
* target_big_endian:
*
* Returns: %true if the (default) endianness of the target is big endian,
* %false otherwise.
*
* Common code should normally never need to know about the endianness of
* the target, so please do *not* use this function unless you know very
* well what you are doing!
*/
bool target_big_endian(void);
#endif

View file

@ -19,7 +19,6 @@
#include "exec/memattrs.h"
#include "exec/memop.h"
#include "exec/ramlist.h"
#include "exec/tswap.h"
#include "qemu/bswap.h"
#include "qemu/queue.h"
#include "qemu/int128.h"
@ -109,15 +108,34 @@ struct MemoryRegionSection {
typedef struct IOMMUTLBEntry IOMMUTLBEntry;
/* See address_space_translate: bit 0 is read, bit 1 is write. */
/*
* See address_space_translate:
* - bit 0 : read
* - bit 1 : write
* - bit 2 : exec
* - bit 3 : priv
* - bit 4 : global
* - bit 5 : untranslated only
*/
typedef enum {
IOMMU_NONE = 0,
IOMMU_RO = 1,
IOMMU_WO = 2,
IOMMU_RW = 3,
IOMMU_EXEC = 4,
IOMMU_PRIV = 8,
IOMMU_GLOBAL = 16,
IOMMU_UNTRANSLATED_ONLY = 32,
} IOMMUAccessFlags;
#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
#define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | \
((w) ? IOMMU_WO : 0))
#define IOMMU_ACCESS_FLAG_FULL(r, w, x, p, g, uo) \
(IOMMU_ACCESS_FLAG(r, w) | \
((x) ? IOMMU_EXEC : 0) | \
((p) ? IOMMU_PRIV : 0) | \
((g) ? IOMMU_GLOBAL : 0) | \
((uo) ? IOMMU_UNTRANSLATED_ONLY : 0))
struct IOMMUTLBEntry {
AddressSpace *target_as;
@ -125,6 +143,7 @@ struct IOMMUTLBEntry {
hwaddr translated_addr;
hwaddr addr_mask; /* 0xfff = 4k translation */
IOMMUAccessFlags perm;
uint32_t pasid;
};
/*

View file

@ -107,6 +107,7 @@ void qemu_system_vmstop_request(RunState reason);
void qemu_system_vmstop_request_prepare(void);
bool qemu_vmstop_requested(RunState *r);
ShutdownCause qemu_shutdown_requested_get(void);
bool qemu_force_shutdown_requested(void);
ShutdownCause qemu_reset_requested_get(void);
void qemu_system_killed(int signal, pid_t pid);
void qemu_system_reset(ShutdownCause reason);

View file

@ -6,7 +6,6 @@
#endif
#include "exec/cpu-defs.h"
#include "exec/tswap.h"
#include "user/tswap-target.h"
#ifdef TARGET_ABI32

View file

@ -573,6 +573,15 @@ void qemu_set_vnet_hdr_len(NetClientState *nc, int len)
nc->info->set_vnet_hdr_len(nc, len);
}
bool qemu_get_vnet_hash_supported_types(NetClientState *nc, uint32_t *types)
{
if (!nc || !nc->info->get_vnet_hash_supported_types) {
return false;
}
return nc->info->get_vnet_hash_supported_types(nc, types);
}
int qemu_set_vnet_le(NetClientState *nc, bool is_le)
{
#if HOST_BIG_ENDIAN

View file

@ -244,12 +244,6 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
g_free(s->vhost_vdpa.shared);
}
/** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */
static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd)
{
return true;
}
static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
{
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
@ -257,6 +251,32 @@ static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
return true;
}
static bool vhost_vdpa_get_vnet_hash_supported_types(NetClientState *nc,
uint32_t *types)
{
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
uint64_t features = s->vhost_vdpa.dev->features;
int fd = s->vhost_vdpa.shared->device_fd;
struct {
struct vhost_vdpa_config hdr;
uint32_t supported_hash_types;
} config;
if (!virtio_has_feature(features, VIRTIO_NET_F_HASH_REPORT) &&
!virtio_has_feature(features, VIRTIO_NET_F_RSS)) {
return false;
}
config.hdr.off = offsetof(struct virtio_net_config, supported_hash_types);
config.hdr.len = sizeof(config.supported_hash_types);
assert(!ioctl(fd, VHOST_VDPA_GET_CONFIG, &config));
*types = le32_to_cpu(config.supported_hash_types);
return true;
}
static bool vhost_vdpa_has_ufo(NetClientState *nc)
{
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
@ -433,10 +453,10 @@ static NetClientInfo net_vhost_vdpa_info = {
.stop = vhost_vdpa_net_client_stop,
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
.get_vnet_hash_supported_types = vhost_vdpa_get_vnet_hash_supported_types,
.has_ufo = vhost_vdpa_has_ufo,
.set_vnet_le = vhost_vdpa_set_vnet_le,
.check_peer_type = vhost_vdpa_check_peer_type,
.set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
.get_vhost_net = vhost_vdpa_get_vhost_net,
};
@ -844,13 +864,13 @@ static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n,
* configuration only at live migration.
*/
if (!n->rss_data.enabled ||
n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) {
n->rss_data.runtime_hash_types == VIRTIO_NET_HASH_REPORT_NONE) {
return 0;
}
table = g_malloc_n(n->rss_data.indirections_len,
sizeof(n->rss_data.indirections_table[0]));
cfg.hash_types = cpu_to_le32(n->rss_data.hash_types);
cfg.hash_types = cpu_to_le32(n->rss_data.runtime_hash_types);
if (do_rss) {
/*
@ -1290,9 +1310,9 @@ static NetClientInfo net_vhost_vdpa_cvq_info = {
.stop = vhost_vdpa_net_cvq_stop,
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
.get_vnet_hash_supported_types = vhost_vdpa_get_vnet_hash_supported_types,
.has_ufo = vhost_vdpa_has_ufo,
.check_peer_type = vhost_vdpa_check_peer_type,
.set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
.get_vhost_net = vhost_vdpa_get_vhost_net,
};

View file

@ -38,6 +38,7 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \
" nvdimm=on|off controls NVDIMM support (default=off)\n"
" memory-encryption=@var{} memory encryption object to use (default=none)\n"
" hmat=on|off controls ACPI HMAT support (default=off)\n"
" spcr=on|off controls ACPI SPCR support (default=on)\n"
#ifdef CONFIG_POSIX
" aux-ram-share=on|off allocate auxiliary guest RAM as shared (default: off)\n"
#endif
@ -105,6 +106,10 @@ SRST
Enables or disables ACPI Heterogeneous Memory Attribute Table
(HMAT) support. The default is off.
``spcr=on|off``
Enables or disables ACPI Serial Port Console Redirection Table
(SPCR) support. The default is on.
``aux-ram-share=on|off``
Allocate auxiliary guest RAM as an anonymous file that is
shareable with an external process. This option applies to

View file

@ -14,7 +14,8 @@
clippy::missing_const_for_fn,
clippy::ptr_offset_with_cast,
clippy::useless_transmute,
clippy::missing_safety_doc
clippy::missing_safety_doc,
clippy::too_many_arguments
)]
//! `bindgen`-generated declarations.

View file

@ -22,6 +22,7 @@
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
#include "qemu/qemu-print.h"
#include "qemu/target-info.h"
#include "qom/object.h"
#include "trace.h"
#include "system/ram_addr.h"

View file

@ -29,6 +29,7 @@
#include "qemu/error-report.h"
#include "qemu/module.h"
#include "qemu/cutils.h"
#include "qemu/target-info.h"
#include "qom/object_interfaces.h"
#define MAX_IRQ 256

View file

@ -437,6 +437,7 @@ static ShutdownCause reset_requested;
static ShutdownCause shutdown_requested;
static int shutdown_exit_code = EXIT_SUCCESS;
static int shutdown_signal;
static bool force_shutdown;
static pid_t shutdown_pid;
static int powerdown_requested;
static int debug_requested;
@ -457,6 +458,11 @@ ShutdownCause qemu_shutdown_requested_get(void)
return shutdown_requested;
}
bool qemu_force_shutdown_requested(void)
{
return force_shutdown;
}
ShutdownCause qemu_reset_requested_get(void)
{
return reset_requested;
@ -805,6 +811,7 @@ void qemu_system_killed(int signal, pid_t pid)
* we are in a signal handler.
*/
shutdown_requested = SHUTDOWN_CAUSE_HOST_SIGNAL;
force_shutdown = true;
qemu_notify_event();
}
@ -820,6 +827,9 @@ void qemu_system_shutdown_request(ShutdownCause reason)
trace_qemu_system_shutdown_request(reason);
replay_shutdown_request(reason);
shutdown_requested = reason;
if (reason == SHUTDOWN_CAUSE_HOST_QMP_QUIT) {
force_shutdown = true;
}
qemu_notify_event();
}

View file

@ -14,9 +14,11 @@
static const TargetInfo target_info_stub = {
.target_name = TARGET_NAME,
.target_arch = SYS_EMU_TARGET__MAX,
.long_bits = TARGET_LONG_BITS,
.cpu_type = CPU_RESOLVING_TYPE,
.machine_typename = TYPE_MACHINE,
.endianness = TARGET_BIG_ENDIAN ? ENDIAN_MODE_BIG : ENDIAN_MODE_LITTLE,
};
const TargetInfo *target_info(void)

View file

@ -8,7 +8,9 @@
#include "qemu/osdep.h"
#include "qemu/target-info.h"
#include "qemu/target-info-qapi.h"
#include "qemu/target-info-impl.h"
#include "qapi/error.h"
const char *target_name(void)
{
@ -20,6 +22,17 @@ unsigned target_long_bits(void)
return target_info()->long_bits;
}
SysEmuTarget target_arch(void)
{
SysEmuTarget arch = target_info()->target_arch;
if (arch == SYS_EMU_TARGET__MAX) {
arch = qapi_enum_parse(&SysEmuTarget_lookup, target_name(), -1,
&error_abort);
}
return arch;
}
const char *target_cpu_type(void)
{
return target_info()->cpu_type;
@ -29,3 +42,13 @@ const char *target_machine_typename(void)
{
return target_info()->machine_typename;
}
EndianMode target_endian_mode(void)
{
return target_info()->endianness;
}
bool target_big_endian(void)
{
return target_endian_mode() == ENDIAN_MODE_BIG;
}

View file

@ -21,6 +21,7 @@
*/
#include "qemu/osdep.h"
#include "qemu/target-info.h"
#include "hw/boards.h"
#include "kvm_arm.h"
#include "qapi/error.h"
@ -241,7 +242,7 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
CpuDefinitionInfoList *cpu_list = NULL;
GSList *list;
list = object_class_get_list(TYPE_ARM_CPU, false);
list = object_class_get_list(target_cpu_type(), false);
g_slist_foreach(list, arm_cpu_add_definition, &cpu_list);
g_slist_free(list);

View file

@ -23,7 +23,6 @@
#include "qemu/timer.h"
#include "qemu/log.h"
#include "exec/page-vary.h"
#include "exec/tswap.h"
#include "target/arm/idau.h"
#include "qemu/module.h"
#include "qapi/error.h"

View file

@ -25,7 +25,6 @@
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "exec/tlb-flags.h"
#include "exec/tswap.h"
#include "tcg/helper-tcg.h"
typedef struct TranslateParams {

Some files were not shown because too many files have changed in this diff Show more