Memory patches

- Cleanups on RAMBlock API
 - Cleanups on Physical Memory API
 - Remove cpu_physical_memory_is_io()
 - Remove cpu_physical_memory_rw()
 - Legacy conversion [cpu_physical_memory -> address_space]_[un]map()
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmjkgzUACgkQ4+MsLN6t
 wN7ouQ//V/goGzlt3AueQCs3UwYLsnLbHq6PIlddEmbEe4VobgBMY6qhgvM54dhS
 EX/uBikOVHNSjFzKTcr67fLk+wIaGpY6VeZWHibKTh9dFPGjPCBOshYRQl8Oe31p
 4sIppkJGcuvQyXO07avLoSbpUo7REDja1G688D45ANOEPpb+7nHJlgq7uWUlbT5k
 ICqT6TF+V0nmM/4yK19sFdimyS++PtnAgLwdODhG6apMNy8CkjE2I6IaPFHaCJEf
 +we8iHxhPn6NkP7P7EHDNkLAOz/jhL7pIf3/kC3Pc+aMtXYOrH1dVcmMhF9zbP/F
 MXMQIOIhKbuEksqIpTqP80UegTO4WVBPPDNZafRtA3Fzfex5WU0PxbtrwZtSNCP4
 e5mSp36Xp2fLaAsXMZBIUQrRnj4Hy0m7YMFSi8aoMsJRvnou6cJ02BCWjP+VUk74
 rrGpHKZ3vIhwVsRVcU43m8Xctk0H9KkhOTVvOGqzBUnJUo2eQb7w4qUxim7L8EoP
 g2AOak+wdwpi0402QFPf2PD1WKGu3kc3Kuny/C7YeIUdTrRR8wPCWsIjmusFRnFv
 1deZOlwVu4ESIuPiuHsLN7WbSZjEPxZH4cgUZmdeoKCTn6vNkC6FWlluMnS8Y+Bb
 c3AKo9kVatznQvY/qiH8h8rYqI8RFta8F8QHoIo1t41YJKlzuOY=
 =FLzC
 -----END PGP SIGNATURE-----

Merge tag 'physmem-20251007' of https://github.com/philmd/qemu into staging

Memory patches

- Cleanups on RAMBlock API
- Cleanups on Physical Memory API
- Remove cpu_physical_memory_is_io()
- Remove cpu_physical_memory_rw()
- Legacy conversion [cpu_physical_memory -> address_space]_[un]map()

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEE+qvnXhKRciHc/Wuy4+MsLN6twN4FAmjkgzUACgkQ4+MsLN6t
# wN7ouQ//V/goGzlt3AueQCs3UwYLsnLbHq6PIlddEmbEe4VobgBMY6qhgvM54dhS
# EX/uBikOVHNSjFzKTcr67fLk+wIaGpY6VeZWHibKTh9dFPGjPCBOshYRQl8Oe31p
# 4sIppkJGcuvQyXO07avLoSbpUo7REDja1G688D45ANOEPpb+7nHJlgq7uWUlbT5k
# ICqT6TF+V0nmM/4yK19sFdimyS++PtnAgLwdODhG6apMNy8CkjE2I6IaPFHaCJEf
# +we8iHxhPn6NkP7P7EHDNkLAOz/jhL7pIf3/kC3Pc+aMtXYOrH1dVcmMhF9zbP/F
# MXMQIOIhKbuEksqIpTqP80UegTO4WVBPPDNZafRtA3Fzfex5WU0PxbtrwZtSNCP4
# e5mSp36Xp2fLaAsXMZBIUQrRnj4Hy0m7YMFSi8aoMsJRvnou6cJ02BCWjP+VUk74
# rrGpHKZ3vIhwVsRVcU43m8Xctk0H9KkhOTVvOGqzBUnJUo2eQb7w4qUxim7L8EoP
# g2AOak+wdwpi0402QFPf2PD1WKGu3kc3Kuny/C7YeIUdTrRR8wPCWsIjmusFRnFv
# 1deZOlwVu4ESIuPiuHsLN7WbSZjEPxZH4cgUZmdeoKCTn6vNkC6FWlluMnS8Y+Bb
# c3AKo9kVatznQvY/qiH8h8rYqI8RFta8F8QHoIo1t41YJKlzuOY=
# =FLzC
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 06 Oct 2025 08:04:21 PM PDT
# gpg:                using RSA key FAABE75E12917221DCFD6BB2E3E32C2CDEADC0DE
# gpg: Good signature from "Philippe Mathieu-Daudé (F4BUG) <f4bug@amsat.org>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: FAAB E75E 1291 7221 DCFD  6BB2 E3E3 2C2C DEAD C0DE

* tag 'physmem-20251007' of https://github.com/philmd/qemu: (41 commits)
  system/physmem: Extract API out of 'system/ram_addr.h' header
  system/physmem: Drop 'cpu_' prefix in Physical Memory API
  system/physmem: Reduce cpu_physical_memory_sync_dirty_bitmap() scope
  system/physmem: Reduce cpu_physical_memory_clear_dirty_range() scope
  system/physmem: Un-inline cpu_physical_memory_dirty_bits_cleared()
  system/physmem: Un-inline cpu_physical_memory_set_dirty_lebitmap()
  system/physmem: Remove _WIN32 #ifdef'ry
  system/physmem: Un-inline cpu_physical_memory_set_dirty_range()
  system/physmem: Un-inline cpu_physical_memory_set_dirty_flag()
  system/physmem: Un-inline cpu_physical_memory_range_includes_clean()
  system/physmem: Un-inline cpu_physical_memory_is_clean()
  system/physmem: Un-inline cpu_physical_memory_get_dirty_flag()
  hw: Remove unnecessary 'system/ram_addr.h' header
  target/arm/tcg/mte: Include missing 'exec/target_page.h' header
  hw/vfio/listener: Include missing 'exec/target_page.h' header
  hw/s390x/s390-stattrib: Include missing 'exec/target_page.h' header
  accel/kvm: Include missing 'exec/target_page.h' header
  system/ram_addr: Remove unnecessary 'exec/cpu-common.h' header
  hw/virtio/virtio: Replace legacy cpu_physical_memory_map() call
  hw/virtio/vhost: Replace legacy cpu_physical_memory_*map() calls
  ...

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2025-10-07 08:46:12 -07:00
commit 637a8b25a6
41 changed files with 635 additions and 633 deletions

View file

@ -3222,6 +3222,7 @@ S: Supported
F: include/system/ioport.h
F: include/exec/memop.h
F: include/system/memory.h
F: include/system/physmem.h
F: include/system/ram_addr.h
F: include/system/ramblock.h
F: include/system/memory_mapping.h

View file

@ -32,11 +32,13 @@
#include "system/runstate.h"
#include "system/cpus.h"
#include "system/accel-blocker.h"
#include "system/physmem.h"
#include "system/ramblock.h"
#include "accel/accel-ops.h"
#include "qemu/bswap.h"
#include "exec/tswap.h"
#include "exec/target_page.h"
#include "system/memory.h"
#include "system/ram_addr.h"
#include "qemu/event_notifier.h"
#include "qemu/main-loop.h"
#include "trace.h"
@ -756,7 +758,7 @@ static void kvm_slot_sync_dirty_pages(KVMSlot *slot)
ram_addr_t start = slot->ram_start_offset;
ram_addr_t pages = slot->memory_size / qemu_real_host_page_size();
cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages);
}
static void kvm_slot_reset_dirty_pages(KVMSlot *slot)

View file

@ -25,6 +25,7 @@
#include "accel/tcg/probe.h"
#include "exec/page-protection.h"
#include "system/memory.h"
#include "system/physmem.h"
#include "accel/tcg/cpu-ldst-common.h"
#include "accel/tcg/cpu-mmu-index.h"
#include "exec/cputlb.h"
@ -858,7 +859,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
}
@ -867,7 +868,7 @@ void tlb_protect_code(ram_addr_t ram_addr)
tested for self modifying code */
void tlb_unprotect_code(ram_addr_t ram_addr)
{
cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
}
@ -1085,7 +1086,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
if (prot & PAGE_WRITE) {
if (section->readonly) {
write_flags |= TLB_DISCARD_WRITE;
} else if (cpu_physical_memory_is_clean(iotlb)) {
} else if (physical_memory_is_clean(iotlb)) {
write_flags |= TLB_NOTDIRTY;
}
}
@ -1341,7 +1342,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
if (!physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr);
}
@ -1349,10 +1350,10 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
* Set both VGA and migration bits for simplicity and to remove
* the notdirty callback faster.
*/
cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
/* We remove the notdirty callback only if the code has been flushed. */
if (!cpu_physical_memory_is_clean(ram_addr)) {
if (!physical_memory_is_clean(ram_addr)) {
trace_memory_notdirty_set_dirty(mem_vaddr);
tlb_set_dirty(cpu, mem_vaddr);
}

View file

@ -460,10 +460,8 @@ For new code they are better avoided:
``cpu_physical_memory_write``
``cpu_physical_memory_rw``
Regexes for git grep:
- ``\<cpu_physical_memory_\(read\|write\|rw\)\>``
- ``\<cpu_physical_memory_\(read\|write\)\>``
``cpu_memory_rw_debug``
~~~~~~~~~~~~~~~~~~~~~~~
@ -474,7 +472,7 @@ This function is intended for use by the GDB stub and similar code.
It takes a virtual address, converts it to a physical address via
an MMU lookup using the current settings of the specified CPU,
and then performs the access (using ``address_space_rw`` for
reads or ``cpu_physical_memory_write_rom`` for writes).
reads or ``address_space_write_rom`` for writes).
This means that if the access is a write to a ROM then this
function will modify the contents (whereas a normal guest CPU access
would ignore the write attempt).

View file

@ -1242,7 +1242,7 @@ static void rom_reset(void *unused)
* that the instruction cache for that new region is clear, so that the
* CPU definitely fetches its instructions from the just written data.
*/
cpu_flush_icache_range(rom->addr, rom->datasize);
address_space_flush_icache_range(rom->as, rom->addr, rom->datasize);
trace_loader_write_rom(rom->name, rom->addr, rom->datasize, rom->isrom);
}

View file

@ -8,6 +8,7 @@
*/
#include "qemu/osdep.h"
#include "system/ramblock.h"
#include "hv-balloon-internal.h"
#include "hv-balloon-our_range_memslots.h"
#include "trace.h"

View file

@ -77,7 +77,6 @@
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/vhost-scsi-common.h"
#include "system/ram_addr.h"
#include "system/confidential-guest-support.h"
#include "hw/usb.h"
#include "qemu/config-file.h"

View file

@ -27,7 +27,6 @@
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "system/hw_accel.h"
#include "system/ram_addr.h"
#include "target/ppc/cpu.h"
#include "target/ppc/mmu-hash64.h"
#include "cpu-models.h"

View file

@ -34,7 +34,6 @@
#include "hw/pci/pci_host.h"
#include "hw/ppc/spapr.h"
#include "hw/pci-host/spapr.h"
#include "system/ram_addr.h"
#include <libfdt.h>
#include "trace.h"
#include "qemu/error-report.h"

View file

@ -11,7 +11,6 @@
#include "qemu/osdep.h"
#include "hw/remote/memory.h"
#include "system/ram_addr.h"
#include "qapi/error.h"
static void remote_sysmem_reset(void)

View file

@ -12,7 +12,6 @@
#include "qemu/range.h"
#include "system/memory.h"
#include "exec/cpu-common.h"
#include "system/ram_addr.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
#include "hw/remote/mpqemu-link.h"

View file

@ -10,13 +10,13 @@
*/
#include "qemu/osdep.h"
#include "exec/target_page.h"
#include "hw/s390x/s390-virtio-ccw.h"
#include "migration/qemu-file.h"
#include "hw/s390x/storage-attributes.h"
#include "qemu/error-report.h"
#include "system/kvm.h"
#include "system/memory_mapping.h"
#include "system/ram_addr.h"
#include "kvm/kvm_s390x.h"
#include "qapi/error.h"

View file

@ -11,12 +11,12 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "exec/target_page.h"
#include "migration/qemu-file.h"
#include "migration/register.h"
#include "hw/qdev-properties.h"
#include "hw/s390x/storage-attributes.h"
#include "qemu/error-report.h"
#include "system/ram_addr.h"
#include "qapi/error.h"
#include "qobject/qdict.h"
#include "cpu.h"

View file

@ -13,7 +13,6 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "system/ram_addr.h"
#include "system/confidential-guest-support.h"
#include "hw/boards.h"
#include "hw/s390x/sclp.h"

View file

@ -16,6 +16,7 @@
#include "qemu/units.h"
#include "qapi/error.h"
#include "hw/boards.h"
#include "system/memory.h"
#include "hw/s390x/sclp.h"
#include "hw/s390x/event-facility.h"
#include "hw/s390x/s390-pci-bus.h"
@ -303,12 +304,14 @@ int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code)
SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp);
SCCBHeader header;
g_autofree SCCB *work_sccb = NULL;
AddressSpace *as = CPU(cpu)->as;
const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
/* first some basic checks on program checks */
if (env->psw.mask & PSW_MASK_PSTATE) {
return -PGM_PRIVILEGED;
}
if (cpu_physical_memory_is_io(sccb)) {
if (address_space_is_io(CPU(cpu)->as, sccb)) {
return -PGM_ADDRESSING;
}
if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa
@ -317,7 +320,7 @@ int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code)
}
/* the header contains the actual length of the sccb */
cpu_physical_memory_read(sccb, &header, sizeof(SCCBHeader));
address_space_read(as, sccb, attrs, &header, sizeof(SCCBHeader));
/* Valid sccb sizes */
if (be16_to_cpu(header.length) < sizeof(SCCBHeader)) {
@ -330,7 +333,7 @@ int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code)
* the host has checked the values
*/
work_sccb = g_malloc0(be16_to_cpu(header.length));
cpu_physical_memory_read(sccb, work_sccb, be16_to_cpu(header.length));
address_space_read(as, sccb, attrs, work_sccb, be16_to_cpu(header.length));
if (!sclp_command_code_valid(code)) {
work_sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND);
@ -344,8 +347,7 @@ int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code)
sclp_c->execute(sclp, work_sccb, code);
out_write:
cpu_physical_memory_write(sccb, work_sccb,
be16_to_cpu(work_sccb->h.length));
address_space_write(as, sccb, attrs, work_sccb, be16_to_cpu(header.length));
sclp_c->service_interrupt(sclp, sccb);

View file

@ -25,7 +25,7 @@
#include "hw/vfio/vfio-device.h"
#include "system/address-spaces.h"
#include "system/memory.h"
#include "system/ram_addr.h"
#include "system/physmem.h"
#include "qemu/error-report.h"
#include "qemu/range.h"
#include "system/reset.h"
@ -92,7 +92,7 @@ static int vfio_dma_unmap_bitmap(const VFIOLegacyContainer *container,
bitmap = (struct vfio_bitmap *)&unmap->data;
/*
* cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
* to qemu_real_host_page_size.
*/
@ -108,7 +108,7 @@ static int vfio_dma_unmap_bitmap(const VFIOLegacyContainer *container,
ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
if (!ret) {
cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
physical_memory_set_dirty_lebitmap(vbmap.bitmap,
iotlb->translated_addr, vbmap.pages);
} else {
error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
@ -266,7 +266,7 @@ static int vfio_legacy_query_dirty_bitmap(const VFIOContainer *bcontainer,
range->size = size;
/*
* cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
* to qemu_real_host_page_size.
*/
@ -485,7 +485,7 @@ static void vfio_get_iommu_info_migration(VFIOLegacyContainer *container,
header);
/*
* cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* physical_memory_set_dirty_lebitmap() supports pages in bitmap of
* qemu_real_host_page_size to mark those dirty.
*/
if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {

View file

@ -20,6 +20,7 @@
#include "qemu/error-report.h"
#include "hw/vfio/vfio-container.h"
#include "hw/vfio/vfio-device.h" /* vfio_device_reset_handler */
#include "system/physmem.h"
#include "system/reset.h"
#include "vfio-helpers.h"
@ -255,7 +256,7 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
int ret;
if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
cpu_physical_memory_set_dirty_range(translated_addr, size,
physical_memory_set_dirty_range(translated_addr, size,
tcg_enabled() ? DIRTY_CLIENTS_ALL :
DIRTY_CLIENTS_NOCODE);
return 0;
@ -280,7 +281,7 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
goto out;
}
dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
dirty_pages = physical_memory_set_dirty_lebitmap(vbmap.bitmap,
translated_addr,
vbmap.pages);

View file

@ -25,11 +25,11 @@
#endif
#include <linux/vfio.h>
#include "exec/target_page.h"
#include "hw/vfio/vfio-device.h"
#include "hw/vfio/pci.h"
#include "system/address-spaces.h"
#include "system/memory.h"
#include "system/ram_addr.h"
#include "hw/hw.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"

View file

@ -17,7 +17,6 @@
#include "hw/vfio/vfio-container-legacy.h"
#include "hw/hw.h"
#include "system/ram_addr.h"
#include "qemu/error-report.h"
#include "qapi/error.h"
#include "trace.h"

View file

@ -27,6 +27,7 @@
#include "migration/blocker.h"
#include "migration/qemu-file-types.h"
#include "system/dma.h"
#include "system/memory.h"
#include "trace.h"
/* enabled until disconnected backend stabilizes */
@ -455,7 +456,8 @@ static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
hwaddr *plen, bool is_write)
{
if (!vhost_dev_has_iommu(dev)) {
return cpu_physical_memory_map(addr, plen, is_write);
return address_space_map(dev->vdev->dma_as, addr, plen, is_write,
MEMTXATTRS_UNSPECIFIED);
} else {
return (void *)(uintptr_t)addr;
}
@ -466,7 +468,8 @@ static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
hwaddr access_len)
{
if (!vhost_dev_has_iommu(dev)) {
cpu_physical_memory_unmap(buffer, len, is_write, access_len);
address_space_unmap(dev->vdev->dma_as, buffer, len, is_write,
access_len);
}
}

View file

@ -23,6 +23,7 @@
#include "hw/qdev-properties.h"
#include "hw/boards.h"
#include "system/balloon.h"
#include "system/ramblock.h"
#include "hw/virtio/virtio-balloon.h"
#include "system/address-spaces.h"
#include "qapi/error.h"

View file

@ -17,6 +17,7 @@
#include "qemu/units.h"
#include "system/numa.h"
#include "system/system.h"
#include "system/ramblock.h"
#include "system/reset.h"
#include "system/runstate.h"
#include "hw/virtio/virtio.h"
@ -24,7 +25,6 @@
#include "hw/virtio/virtio-mem.h"
#include "qapi/error.h"
#include "qapi/visitor.h"
#include "system/ram_addr.h"
#include "migration/misc.h"
#include "hw/boards.h"
#include "hw/qdev-properties.h"

View file

@ -32,6 +32,7 @@
#include "hw/virtio/virtio-access.h"
#include "system/dma.h"
#include "system/iothread.h"
#include "system/memory.h"
#include "system/runstate.h"
#include "virtio-qmp.h"
@ -1632,7 +1633,8 @@ out:
* virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to
* yet.
*/
static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
static void virtqueue_undo_map_desc(AddressSpace *as,
unsigned int out_num, unsigned int in_num,
struct iovec *iov)
{
unsigned int i;
@ -1640,7 +1642,7 @@ static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
for (i = 0; i < out_num + in_num; i++) {
int is_write = i >= out_num;
cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
address_space_unmap(as, iov->iov_base, iov->iov_len, is_write, 0);
iov++;
}
}
@ -1842,7 +1844,7 @@ done:
return elem;
err_undo_map:
virtqueue_undo_map_desc(out_num, in_num, iov);
virtqueue_undo_map_desc(vdev->dma_as, out_num, in_num, iov);
goto done;
}
@ -1992,7 +1994,7 @@ done:
return elem;
err_undo_map:
virtqueue_undo_map_desc(out_num, in_num, iov);
virtqueue_undo_map_desc(vdev->dma_as, out_num, in_num, iov);
goto done;
}

View file

@ -12,6 +12,7 @@
#include "hw/xen/xen-bus.h"
#include "hw/boards.h"
#include "hw/xen/arch_hvm.h"
#include "system/memory.h"
#include "system/runstate.h"
#include "system/system.h"
#include "system/xen.h"
@ -279,8 +280,8 @@ static void do_outp(uint32_t addr,
* memory, as part of the implementation of an ioreq.
*
* Equivalent to
* cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
* val, req->size, 0/1)
* address_space_rw(as, addr + (req->df ? -1 : +1) * req->size * i,
* attrs, val, req->size, 0/1)
* except without the integer overflow problems.
*/
static void rw_phys_req_item(hwaddr addr,
@ -295,7 +296,8 @@ static void rw_phys_req_item(hwaddr addr,
} else {
addr += offset;
}
cpu_physical_memory_rw(addr, val, req->size, rw);
address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
val, req->size, rw);
}
static inline void read_phys_req_item(hwaddr addr,

View file

@ -131,26 +131,14 @@ void cpu_address_space_init(CPUState *cpu, int asidx,
*/
void cpu_destroy_address_spaces(CPUState *cpu);
void cpu_physical_memory_rw(hwaddr addr, void *buf,
hwaddr len, bool is_write);
static inline void cpu_physical_memory_read(hwaddr addr,
void *buf, hwaddr len)
{
cpu_physical_memory_rw(addr, buf, len, false);
}
static inline void cpu_physical_memory_write(hwaddr addr,
const void *buf, hwaddr len)
{
cpu_physical_memory_rw(addr, (void *)buf, len, true);
}
void cpu_physical_memory_read(hwaddr addr, void *buf, hwaddr len);
void cpu_physical_memory_write(hwaddr addr, const void *buf, hwaddr len);
void *cpu_physical_memory_map(hwaddr addr,
hwaddr *plen,
bool is_write);
void cpu_physical_memory_unmap(void *buffer, hwaddr len,
bool is_write, hwaddr access_len);
bool cpu_physical_memory_is_io(hwaddr phys_addr);
/* Coalesced MMIO regions are areas where write operations can be reordered.
* This usually implies that write operations are side-effect free. This allows
* batching which can make a major impact on performance when using
@ -158,14 +146,9 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr);
*/
void qemu_flush_coalesced_mmio_buffer(void);
void cpu_flush_icache_range(hwaddr start, hwaddr len);
typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque);
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque);
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length);
int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
size_t length);
/* Returns: 0 on success, -1 on error */
int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,

View file

@ -2995,6 +2995,8 @@ void address_space_cache_invalidate(MemoryRegionCache *cache,
*/
void address_space_cache_destroy(MemoryRegionCache *cache);
void address_space_flush_icache_range(AddressSpace *as, hwaddr addr, hwaddr len);
/* address_space_get_iotlb_entry: translate an address into an IOTLB
* entry. Should be called from an RCU critical section.
*/
@ -3047,6 +3049,15 @@ static inline MemoryRegion *address_space_translate(AddressSpace *as,
bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
bool is_write, MemTxAttrs attrs);
/**
* address_space_is_io: check whether an guest physical addresses
* whithin an address space is I/O memory.
*
* @as: #AddressSpace to be accessed
* @addr: address within that address space
*/
bool address_space_is_io(AddressSpace *as, hwaddr addr);
/* address_space_map: map a physical memory region into a host virtual address
*
* May map a subset of the requested range, given by and returned in @plen.

54
include/system/physmem.h Normal file
View file

@ -0,0 +1,54 @@
/*
* QEMU physical memory interfaces (target independent).
*
* Copyright (c) 2003 Fabrice Bellard
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#ifndef QEMU_SYSTEM_PHYSMEM_H
#define QEMU_SYSTEM_PHYSMEM_H
#include "exec/hwaddr.h"
#include "exec/ramlist.h"
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
bool physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client);
bool physical_memory_is_clean(ram_addr_t addr);
uint8_t physical_memory_range_includes_clean(ram_addr_t start,
ram_addr_t length,
uint8_t mask);
void physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client);
void physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
uint8_t mask);
/*
* Contrary to physical_memory_sync_dirty_bitmap() this function returns
* the number of dirty pages in @bitmap passed as argument. On the other hand,
* physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
* weren't set in the global migration bitmap.
*/
uint64_t physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
ram_addr_t pages);
void physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length);
bool physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
DirtyBitmapSnapshot *
physical_memory_snapshot_and_clear_dirty(MemoryRegion *mr, hwaddr offset,
hwaddr length, unsigned client);
bool physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
ram_addr_t length);
#endif

View file

@ -19,17 +19,9 @@
#ifndef SYSTEM_RAM_ADDR_H
#define SYSTEM_RAM_ADDR_H
#include "system/xen.h"
#include "system/tcg.h"
#include "exec/cputlb.h"
#include "exec/ramlist.h"
#include "system/ramblock.h"
#include "system/memory.h"
#include "exec/target_page.h"
#include "qemu/rcu.h"
#include "exec/hwaddr.h"
#include "exec/cpu-common.h"
extern uint64_t total_dirty_pages;
@ -80,17 +72,6 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1);
}
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
{
return (b && b->host && offset < b->used_length) ? true : false;
}
static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
{
assert(offset_in_ramblock(block, offset));
return (char *)block->host + offset;
}
static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
RAMBlock *rb)
{
@ -99,8 +80,6 @@ static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr,
return host_addr_offset >> TARGET_PAGE_BITS;
}
bool ramblock_is_pmem(RAMBlock *rb);
/**
* qemu_ram_alloc_from_file,
* qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing
@ -153,409 +132,4 @@ static inline void qemu_ram_block_writeback(RAMBlock *block)
qemu_ram_msync(block, 0, block->used_length);
}
#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
DirtyMemoryBlocks *blocks;
unsigned long end, page;
unsigned long idx, offset, base;
bool dirty = false;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
WITH_RCU_READ_LOCK_GUARD() {
blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
base = page - offset;
while (page < end) {
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
unsigned long num = next - base;
unsigned long found = find_next_bit(blocks->blocks[idx],
num, offset);
if (found < num) {
dirty = true;
break;
}
page = next;
idx++;
offset = 0;
base += DIRTY_MEMORY_BLOCK_SIZE;
}
}
return dirty;
}
static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
DirtyMemoryBlocks *blocks;
unsigned long end, page;
unsigned long idx, offset, base;
bool dirty = true;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
RCU_READ_LOCK_GUARD();
blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
base = page - offset;
while (page < end) {
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
unsigned long num = next - base;
unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset);
if (found < num) {
dirty = false;
break;
}
page = next;
idx++;
offset = 0;
base += DIRTY_MEMORY_BLOCK_SIZE;
}
return dirty;
}
static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
unsigned client)
{
return cpu_physical_memory_get_dirty(addr, 1, client);
}
static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
{
bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
bool migration =
cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
return !(vga && code && migration);
}
static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
uint8_t ret = 0;
if (mask & (1 << DIRTY_MEMORY_VGA) &&
!cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
ret |= (1 << DIRTY_MEMORY_VGA);
}
if (mask & (1 << DIRTY_MEMORY_CODE) &&
!cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
ret |= (1 << DIRTY_MEMORY_CODE);
}
if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
!cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
ret |= (1 << DIRTY_MEMORY_MIGRATION);
}
return ret;
}
static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
unsigned client)
{
unsigned long page, idx, offset;
DirtyMemoryBlocks *blocks;
assert(client < DIRTY_MEMORY_NUM);
page = addr >> TARGET_PAGE_BITS;
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
RCU_READ_LOCK_GUARD();
blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
set_bit_atomic(offset, blocks->blocks[idx]);
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
unsigned long end, page;
unsigned long idx, offset, base;
int i;
if (!mask && !xen_enabled()) {
return;
}
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
WITH_RCU_READ_LOCK_GUARD() {
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
}
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
base = page - offset;
while (page < end) {
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
offset, next - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
offset, next - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
offset, next - page);
}
page = next;
idx++;
offset = 0;
base += DIRTY_MEMORY_BLOCK_SIZE;
}
}
if (xen_enabled()) {
xen_hvm_modified_memory(start, length);
}
}
#if !defined(_WIN32)
/*
* Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
* the number of dirty pages in @bitmap passed as argument. On the other hand,
* cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
* weren't set in the global migration bitmap.
*/
static inline
uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
ram_addr_t pages)
{
unsigned long i, j;
unsigned long page_number, c, nbits;
hwaddr addr;
ram_addr_t ram_addr;
uint64_t num_dirty = 0;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
(hpratio == 1)) {
unsigned long **blocks[DIRTY_MEMORY_NUM];
unsigned long idx;
unsigned long offset;
long k;
long nr = BITS_TO_LONGS(pages);
idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
DIRTY_MEMORY_BLOCK_SIZE);
WITH_RCU_READ_LOCK_GUARD() {
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
blocks[i] =
qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
}
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
nbits = ctpopl(temp);
qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
if (global_dirty_tracking) {
qatomic_or(
&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
temp);
if (unlikely(
global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
total_dirty_pages += nbits;
}
}
num_dirty += nbits;
if (tcg_enabled()) {
qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
temp);
}
}
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
offset = 0;
idx++;
}
}
}
if (xen_enabled()) {
xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
}
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
if (!global_dirty_tracking) {
clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
}
/*
* bitmap-traveling is faster than memory-traveling (for addr...)
* especially when most of the memory is not dirty.
*/
for (i = 0; i < len; i++) {
if (bitmap[i] != 0) {
c = leul_to_cpu(bitmap[i]);
nbits = ctpopl(c);
if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
total_dirty_pages += nbits;
}
num_dirty += nbits;
do {
j = ctzl(c);
c &= ~(1ul << j);
page_number = (i * HOST_LONG_BITS + j) * hpratio;
addr = page_number * TARGET_PAGE_SIZE;
ram_addr = start + addr;
cpu_physical_memory_set_dirty_range(ram_addr,
TARGET_PAGE_SIZE * hpratio, clients);
} while (c != 0);
}
}
}
return num_dirty;
}
#endif /* not _WIN32 */
static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start,
ram_addr_t length)
{
if (tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
}
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client);
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
ram_addr_t length);
static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
ram_addr_t length)
{
cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
}
/* Called with RCU critical section */
static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
ram_addr_t start,
ram_addr_t length)
{
ram_addr_t addr;
unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
uint64_t num_dirty = 0;
unsigned long *dest = rb->bmap;
/* start address and length is aligned at the start of a word? */
if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
(start + rb->offset) &&
!(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
unsigned long * const *src;
unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
DIRTY_MEMORY_BLOCK_SIZE);
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
src = qatomic_rcu_read(
&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
for (k = page; k < page + nr; k++) {
if (src[idx][offset]) {
unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
num_dirty += ctpopl(new_dirty);
}
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
offset = 0;
idx++;
}
}
if (num_dirty) {
cpu_physical_memory_dirty_bits_cleared(start, length);
}
if (rb->clear_bmap) {
/*
* Postpone the dirty bitmap clear to the point before we
* really send the pages, also we will split the clear
* dirty procedure into smaller chunks.
*/
clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
length >> TARGET_PAGE_BITS);
} else {
/* Slow path - still do that in a huge chunk */
memory_region_clear_dirty_bitmap(rb->mr, start, length);
}
} else {
ram_addr_t offset = rb->offset;
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (cpu_physical_memory_test_and_clear_dirty(
start + addr + offset,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
long k = (start + addr) >> TARGET_PAGE_BITS;
if (!test_and_set_bit(k, dest)) {
num_dirty++;
}
}
}
}
return num_dirty;
}
#endif

View file

@ -11,11 +11,6 @@
*
*/
/*
* This header is for use by exec.c and memory.c ONLY. Do not include it.
* The functions declared here will be removed soon.
*/
#ifndef SYSTEM_RAMBLOCK_H
#define SYSTEM_RAMBLOCK_H
@ -108,9 +103,31 @@ struct RamBlockAttributes {
QLIST_HEAD(, RamDiscardListener) rdl_list;
};
/* @offset: the offset within the RAMBlock */
int ram_block_discard_range(RAMBlock *rb, uint64_t offset, size_t length);
/* @offset: the offset within the RAMBlock */
int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t offset,
size_t length);
RamBlockAttributes *ram_block_attributes_create(RAMBlock *ram_block);
void ram_block_attributes_destroy(RamBlockAttributes *attr);
int ram_block_attributes_state_change(RamBlockAttributes *attr, uint64_t offset,
uint64_t size, bool to_discard);
/**
* ram_block_is_pmem: Whether the RAM block is of persistent memory
*/
bool ram_block_is_pmem(RAMBlock *rb);
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
{
return b && b->host && (offset < b->used_length);
}
static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset)
{
assert(offset_in_ramblock(block, offset));
return (char *)block->host + offset;
}
#endif

View file

@ -53,6 +53,8 @@
#include "qemu/rcu_queue.h"
#include "migration/colo.h"
#include "system/cpu-throttle.h"
#include "system/physmem.h"
#include "system/ramblock.h"
#include "savevm.h"
#include "qemu/iov.h"
#include "multifd.h"
@ -935,11 +937,86 @@ bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start)
return false;
}
/* Called with RCU critical section */
static uint64_t physical_memory_sync_dirty_bitmap(RAMBlock *rb,
ram_addr_t start,
ram_addr_t length)
{
ram_addr_t addr;
unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS);
uint64_t num_dirty = 0;
unsigned long *dest = rb->bmap;
/* start address and length is aligned at the start of a word? */
if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) ==
(start + rb->offset) &&
!(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) {
int k;
int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
unsigned long * const *src;
unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE;
unsigned long offset = BIT_WORD((word * BITS_PER_LONG) %
DIRTY_MEMORY_BLOCK_SIZE);
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
src = qatomic_rcu_read(
&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
for (k = page; k < page + nr; k++) {
if (src[idx][offset]) {
unsigned long bits = qatomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
num_dirty += ctpopl(new_dirty);
}
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
offset = 0;
idx++;
}
}
if (num_dirty) {
physical_memory_dirty_bits_cleared(start, length);
}
if (rb->clear_bmap) {
/*
* Postpone the dirty bitmap clear to the point before we
* really send the pages, also we will split the clear
* dirty procedure into smaller chunks.
*/
clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
length >> TARGET_PAGE_BITS);
} else {
/* Slow path - still do that in a huge chunk */
memory_region_clear_dirty_bitmap(rb->mr, start, length);
}
} else {
ram_addr_t offset = rb->offset;
for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
if (physical_memory_test_and_clear_dirty(
start + addr + offset,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
long k = (start + addr) >> TARGET_PAGE_BITS;
if (!test_and_set_bit(k, dest)) {
num_dirty++;
}
}
}
}
return num_dirty;
}
/* Called with RCU critical section */
static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
{
uint64_t new_dirty_pages =
cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length);
physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length);
rs->migration_dirty_pages += new_dirty_pages;
rs->num_dirty_pages_period += new_dirty_pages;
@ -4370,7 +4447,7 @@ static bool ram_has_postcopy(void *opaque)
{
RAMBlock *rb;
RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
if (ramblock_is_pmem(rb)) {
if (ram_block_is_pmem(rb)) {
info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
"is not supported now!", rb->idstr, rb->host);
return false;

View file

@ -21,13 +21,6 @@ expression E1, E2, E3, E4, E5;
+ address_space_rw(E1, E2, E3, E4, E5, true)
|
- cpu_physical_memory_rw(E1, E2, E3, 0)
+ cpu_physical_memory_rw(E1, E2, E3, false)
|
- cpu_physical_memory_rw(E1, E2, E3, 1)
+ cpu_physical_memory_rw(E1, E2, E3, true)
|
- cpu_physical_memory_map(E1, E2, 0)
+ cpu_physical_memory_map(E1, E2, false)
|
@ -62,18 +55,6 @@ symbol true, false;
+ address_space_write(E1, E2, E3, E4, E5)
)
// Avoid uses of cpu_physical_memory_rw() with a constant is_write argument.
@@
expression E1, E2, E3;
@@
(
- cpu_physical_memory_rw(E1, E2, E3, false)
+ cpu_physical_memory_read(E1, E2, E3)
|
- cpu_physical_memory_rw(E1, E2, E3, true)
+ cpu_physical_memory_write(E1, E2, E3)
)
// Remove useless cast
@@
expression E1, E2, E3, E4, E5, E6;
@ -93,9 +74,6 @@ type T;
+ address_space_write_rom(E1, E2, E3, E4, E5)
|
- cpu_physical_memory_rw(E1, (T *)(E2), E3, E4)
+ cpu_physical_memory_rw(E1, E2, E3, E4)
|
- cpu_physical_memory_read(E1, (T *)(E2), E3)
+ cpu_physical_memory_read(E1, E2, E3)
|

View file

@ -25,6 +25,7 @@
#include "qemu/target-info.h"
#include "qom/object.h"
#include "trace.h"
#include "system/physmem.h"
#include "system/ram_addr.h"
#include "system/kvm.h"
#include "system/runstate.h"
@ -2271,7 +2272,7 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size)
{
assert(mr->ram_block);
cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
size,
memory_region_get_dirty_log_mask(mr));
}
@ -2375,7 +2376,7 @@ DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
DirtyBitmapSnapshot *snapshot;
assert(mr->ram_block);
memory_region_sync_dirty_bitmap(mr, false);
snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
snapshot = physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
memory_global_after_dirty_log_sync();
return snapshot;
}
@ -2384,7 +2385,7 @@ bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *sna
hwaddr addr, hwaddr size)
{
assert(mr->ram_block);
return cpu_physical_memory_snapshot_get_dirty(snap,
return physical_memory_snapshot_get_dirty(snap,
memory_region_get_ram_addr(mr) + addr, size);
}
@ -2422,7 +2423,7 @@ void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size, unsigned client)
{
assert(mr->ram_block);
cpu_physical_memory_test_and_clear_dirty(
physical_memory_test_and_clear_dirty(
memory_region_get_ram_addr(mr) + addr, size, client);
}

View file

@ -287,7 +287,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL,
dirty_log_mask = memory_region_get_dirty_log_mask(mr);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr,
4, dirty_log_mask);
r = MEMTX_OK;
}

View file

@ -43,6 +43,8 @@
#include "system/kvm.h"
#include "system/tcg.h"
#include "system/qtest.h"
#include "system/physmem.h"
#include "system/ramblock.h"
#include "qemu/timer.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
@ -898,8 +900,197 @@ void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
}
}
void physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length)
{
if (tcg_enabled()) {
tlb_reset_dirty_range_all(start, length);
}
}
static bool physical_memory_get_dirty(ram_addr_t start, ram_addr_t length,
unsigned client)
{
DirtyMemoryBlocks *blocks;
unsigned long end, page;
unsigned long idx, offset, base;
bool dirty = false;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
WITH_RCU_READ_LOCK_GUARD() {
blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
base = page - offset;
while (page < end) {
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
unsigned long num = next - base;
unsigned long found = find_next_bit(blocks->blocks[idx],
num, offset);
if (found < num) {
dirty = true;
break;
}
page = next;
idx++;
offset = 0;
base += DIRTY_MEMORY_BLOCK_SIZE;
}
}
return dirty;
}
bool physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client)
{
return physical_memory_get_dirty(addr, 1, client);
}
bool physical_memory_is_clean(ram_addr_t addr)
{
bool vga = physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
bool code = physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
bool migration =
physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
return !(vga && code && migration);
}
static bool physical_memory_all_dirty(ram_addr_t start, ram_addr_t length,
unsigned client)
{
DirtyMemoryBlocks *blocks;
unsigned long end, page;
unsigned long idx, offset, base;
bool dirty = true;
assert(client < DIRTY_MEMORY_NUM);
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
RCU_READ_LOCK_GUARD();
blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
base = page - offset;
while (page < end) {
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
unsigned long num = next - base;
unsigned long found = find_next_zero_bit(blocks->blocks[idx],
num, offset);
if (found < num) {
dirty = false;
break;
}
page = next;
idx++;
offset = 0;
base += DIRTY_MEMORY_BLOCK_SIZE;
}
return dirty;
}
uint8_t physical_memory_range_includes_clean(ram_addr_t start,
ram_addr_t length,
uint8_t mask)
{
uint8_t ret = 0;
if (mask & (1 << DIRTY_MEMORY_VGA) &&
!physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
ret |= (1 << DIRTY_MEMORY_VGA);
}
if (mask & (1 << DIRTY_MEMORY_CODE) &&
!physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
ret |= (1 << DIRTY_MEMORY_CODE);
}
if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
!physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
ret |= (1 << DIRTY_MEMORY_MIGRATION);
}
return ret;
}
void physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client)
{
unsigned long page, idx, offset;
DirtyMemoryBlocks *blocks;
assert(client < DIRTY_MEMORY_NUM);
page = addr >> TARGET_PAGE_BITS;
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
RCU_READ_LOCK_GUARD();
blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
set_bit_atomic(offset, blocks->blocks[idx]);
}
void physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
uint8_t mask)
{
DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
unsigned long end, page;
unsigned long idx, offset, base;
int i;
if (!mask && !xen_enabled()) {
return;
}
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
WITH_RCU_READ_LOCK_GUARD() {
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
}
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
base = page - offset;
while (page < end) {
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
offset, next - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
offset, next - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
offset, next - page);
}
page = next;
idx++;
offset = 0;
base += DIRTY_MEMORY_BLOCK_SIZE;
}
}
if (xen_enabled()) {
xen_hvm_modified_memory(start, length);
}
}
/* Note: start and end must be within the same ram block. */
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
bool physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,
unsigned client)
{
@ -941,13 +1132,20 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
}
if (dirty) {
cpu_physical_memory_dirty_bits_cleared(start, length);
physical_memory_dirty_bits_cleared(start, length);
}
return dirty;
}
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
static void physical_memory_clear_dirty_range(ram_addr_t addr, ram_addr_t length)
{
physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_MIGRATION);
physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_VGA);
physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_CODE);
}
DirtyBitmapSnapshot *physical_memory_snapshot_and_clear_dirty
(MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
{
DirtyMemoryBlocks *blocks;
@ -994,14 +1192,14 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
}
}
cpu_physical_memory_dirty_bits_cleared(start, length);
physical_memory_dirty_bits_cleared(start, length);
memory_region_clear_dirty_bitmap(mr, offset, length);
return snap;
}
bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
bool physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
ram_addr_t length)
{
@ -1022,6 +1220,109 @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
return false;
}
uint64_t physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
ram_addr_t start,
ram_addr_t pages)
{
unsigned long i, j;
unsigned long page_number, c, nbits;
hwaddr addr;
ram_addr_t ram_addr;
uint64_t num_dirty = 0;
unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
/* start address is aligned at the start of a word? */
if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
(hpratio == 1)) {
unsigned long **blocks[DIRTY_MEMORY_NUM];
unsigned long idx;
unsigned long offset;
long k;
long nr = BITS_TO_LONGS(pages);
idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
DIRTY_MEMORY_BLOCK_SIZE);
WITH_RCU_READ_LOCK_GUARD() {
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
blocks[i] =
qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
}
for (k = 0; k < nr; k++) {
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
nbits = ctpopl(temp);
qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
if (global_dirty_tracking) {
qatomic_or(
&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
temp);
if (unlikely(
global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
total_dirty_pages += nbits;
}
}
num_dirty += nbits;
if (tcg_enabled()) {
qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
temp);
}
}
if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
offset = 0;
idx++;
}
}
}
if (xen_enabled()) {
xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
}
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL
: DIRTY_CLIENTS_NOCODE;
if (!global_dirty_tracking) {
clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
}
/*
* bitmap-traveling is faster than memory-traveling (for addr...)
* especially when most of the memory is not dirty.
*/
for (i = 0; i < len; i++) {
if (bitmap[i] != 0) {
c = leul_to_cpu(bitmap[i]);
nbits = ctpopl(c);
if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) {
total_dirty_pages += nbits;
}
num_dirty += nbits;
do {
j = ctzl(c);
c &= ~(1ul << j);
page_number = (i * HOST_LONG_BITS + j) * hpratio;
addr = page_number * TARGET_PAGE_SIZE;
ram_addr = start + addr;
physical_memory_set_dirty_range(ram_addr,
TARGET_PAGE_SIZE * hpratio, clients);
} while (c != 0);
}
}
}
return num_dirty;
}
static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end,
uint16_t section);
static subpage_t *subpage_init(FlatView *fv, hwaddr base);
@ -1778,9 +2079,9 @@ int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp)
ram_block_notify_resize(block->host, oldsize, newsize);
}
cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
physical_memory_clear_dirty_range(block->offset, block->used_length);
block->used_length = newsize;
cpu_physical_memory_set_dirty_range(block->offset, block->used_length,
physical_memory_set_dirty_range(block->offset, block->used_length,
DIRTY_CLIENTS_ALL);
memory_region_set_size(block->mr, unaligned_size);
if (block->resized) {
@ -1802,7 +2103,7 @@ void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
#ifdef CONFIG_LIBPMEM
/* The lack of support for pmem should not block the sync */
if (ramblock_is_pmem(block)) {
if (ram_block_is_pmem(block)) {
void *addr = ramblock_ptr(block, start);
pmem_persist(addr, length);
return;
@ -1985,7 +2286,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
ram_list.version++;
qemu_mutex_unlock_ramlist();
cpu_physical_memory_set_dirty_range(new_block->offset,
physical_memory_set_dirty_range(new_block->offset,
new_block->used_length,
DIRTY_CLIENTS_ALL);
@ -2834,19 +3135,19 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr,
addr += ramaddr;
/* No early return if dirty_log_mask is or becomes 0, because
* cpu_physical_memory_set_dirty_range will still call
* physical_memory_set_dirty_range will still call
* xen_modified_memory.
*/
if (dirty_log_mask) {
dirty_log_mask =
cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask);
physical_memory_range_includes_clean(addr, length, dirty_log_mask);
}
if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) {
assert(tcg_enabled());
tb_invalidate_phys_range(NULL, addr, addr + length - 1);
dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE);
}
cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask);
physical_memory_set_dirty_range(addr, length, dirty_log_mask);
}
void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size)
@ -3178,58 +3479,16 @@ MemTxResult address_space_set(AddressSpace *as, hwaddr addr,
return error;
}
void cpu_physical_memory_rw(hwaddr addr, void *buf,
hwaddr len, bool is_write)
void cpu_physical_memory_read(hwaddr addr, void *buf, hwaddr len)
{
address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
buf, len, is_write);
address_space_read(&address_space_memory, addr,
MEMTXATTRS_UNSPECIFIED, buf, len);
}
enum write_rom_type {
WRITE_DATA,
FLUSH_CACHE,
};
static inline MemTxResult address_space_write_rom_internal(AddressSpace *as,
hwaddr addr,
MemTxAttrs attrs,
const void *ptr,
hwaddr len,
enum write_rom_type type)
void cpu_physical_memory_write(hwaddr addr, const void *buf, hwaddr len)
{
hwaddr l;
uint8_t *ram_ptr;
hwaddr addr1;
MemoryRegion *mr;
const uint8_t *buf = ptr;
RCU_READ_LOCK_GUARD();
while (len > 0) {
l = len;
mr = address_space_translate(as, addr, &addr1, &l, true, attrs);
if (!memory_region_supports_direct_access(mr)) {
l = memory_access_size(mr, l, addr1);
} else {
/* ROM/RAM case */
ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
switch (type) {
case WRITE_DATA:
memcpy(ram_ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
break;
case FLUSH_CACHE:
flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l);
break;
}
}
len -= l;
addr += l;
if (buf) {
buf += l;
}
}
return MEMTX_OK;
address_space_write(&address_space_memory, addr,
MEMTXATTRS_UNSPECIFIED, buf, len);
}
/* used for ROM loading : can write in RAM and ROM */
@ -3237,11 +3496,28 @@ MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
MemTxAttrs attrs,
const void *buf, hwaddr len)
{
return address_space_write_rom_internal(as, addr, attrs,
buf, len, WRITE_DATA);
RCU_READ_LOCK_GUARD();
while (len > 0) {
hwaddr addr1, l = len;
MemoryRegion *mr = address_space_translate(as, addr, &addr1, &l,
true, attrs);
if (!memory_region_supports_direct_access(mr)) {
l = memory_access_size(mr, l, addr1);
} else {
/* ROM/RAM case */
void *ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
memcpy(ram_ptr, buf, l);
invalidate_and_set_dirty(mr, addr1, l);
}
len -= l;
addr += l;
buf += l;
}
return MEMTX_OK;
}
void cpu_flush_icache_range(hwaddr start, hwaddr len)
void address_space_flush_icache_range(AddressSpace *as, hwaddr addr, hwaddr len)
{
/*
* This function should do the same thing as an icache flush that was
@ -3253,9 +3529,22 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len)
return;
}
address_space_write_rom_internal(&address_space_memory,
start, MEMTXATTRS_UNSPECIFIED,
NULL, len, FLUSH_CACHE);
RCU_READ_LOCK_GUARD();
while (len > 0) {
hwaddr addr1, l = len;
MemoryRegion *mr = address_space_translate(as, addr, &addr1, &l, true,
MEMTXATTRS_UNSPECIFIED);
if (!memory_region_supports_direct_access(mr)) {
l = memory_access_size(mr, l, addr1);
} else {
/* ROM/RAM case */
void *ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l);
}
len -= l;
addr += l;
}
}
/*
@ -3371,6 +3660,17 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr,
return flatview_access_valid(fv, addr, len, is_write, attrs);
}
bool address_space_is_io(AddressSpace *as, hwaddr addr)
{
MemoryRegion *mr;
RCU_READ_LOCK_GUARD();
mr = address_space_translate(as, addr, &addr, NULL, false,
MEMTXATTRS_UNSPECIFIED);
return !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
}
static hwaddr
flatview_extend_translation(FlatView *fv, hwaddr addr,
hwaddr target_len,
@ -3765,19 +4065,6 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr,
return 0;
}
bool cpu_physical_memory_is_io(hwaddr phys_addr)
{
MemoryRegion*mr;
hwaddr l = 1;
RCU_READ_LOCK_GUARD();
mr = address_space_translate(&address_space_memory,
phys_addr, &phys_addr, &l, false,
MEMTXATTRS_UNSPECIFIED);
return !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
}
int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
{
RAMBlock *block;
@ -3794,18 +4081,18 @@ int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
}
/*
* Unmap pages of memory from start to start+length such that
* Unmap pages of memory from offset to offset+length such that
* they a) read as 0, b) Trigger whatever fault mechanism
* the OS provides for postcopy.
* The pages must be unmapped by the end of the function.
* Returns: 0 on success, none-0 on failure
*
*/
int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
int ram_block_discard_range(RAMBlock *rb, uint64_t offset, size_t length)
{
int ret = -1;
uint8_t *host_startaddr = rb->host + start;
uint8_t *host_startaddr = rb->host + offset;
if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) {
error_report("%s: Unaligned start address: %p",
@ -3813,7 +4100,7 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
goto err;
}
if ((start + length) <= rb->max_length) {
if ((offset + length) <= rb->max_length) {
bool need_madvise, need_fallocate;
if (!QEMU_IS_ALIGNED(length, rb->page_size)) {
error_report("%s: Unaligned length: %zx", __func__, length);
@ -3864,11 +4151,11 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
}
ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start + rb->fd_offset, length);
offset + rb->fd_offset, length);
if (ret) {
ret = -errno;
error_report("%s: Failed to fallocate %s:%" PRIx64 "+%" PRIx64
" +%zx (%d)", __func__, rb->idstr, start,
" +%zx (%d)", __func__, rb->idstr, offset,
rb->fd_offset, length, ret);
goto err;
}
@ -3876,7 +4163,7 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
ret = -ENOSYS;
error_report("%s: fallocate not available/file"
"%s:%" PRIx64 "+%" PRIx64 " +%zx (%d)", __func__,
rb->idstr, start, rb->fd_offset, length, ret);
rb->idstr, offset, rb->fd_offset, length, ret);
goto err;
#endif
}
@ -3896,13 +4183,13 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
ret = -errno;
error_report("%s: Failed to discard range "
"%s:%" PRIx64 " +%zx (%d)",
__func__, rb->idstr, start, length, ret);
__func__, rb->idstr, offset, length, ret);
goto err;
}
#else
ret = -ENOSYS;
error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)",
__func__, rb->idstr, start, length, ret);
__func__, rb->idstr, offset, length, ret);
goto err;
#endif
}
@ -3910,14 +4197,14 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length)
need_madvise, need_fallocate, ret);
} else {
error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")",
__func__, rb->idstr, start, length, rb->max_length);
__func__, rb->idstr, offset, length, rb->max_length);
}
err:
return ret;
}
int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t offset,
size_t length)
{
int ret = -1;
@ -3925,23 +4212,23 @@ int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start,
#ifdef CONFIG_FALLOCATE_PUNCH_HOLE
/* ignore fd_offset with guest_memfd */
ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, length);
offset, length);
if (ret) {
ret = -errno;
error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)",
__func__, rb->idstr, start, length, ret);
__func__, rb->idstr, offset, length, ret);
}
#else
ret = -ENOSYS;
error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)",
__func__, rb->idstr, start, length, ret);
__func__, rb->idstr, offset, length, ret);
#endif
return ret;
}
bool ramblock_is_pmem(RAMBlock *rb)
bool ram_block_is_pmem(RAMBlock *rb)
{
return rb->flags & RAM_PMEM;
}

View file

@ -21,12 +21,13 @@
#include "qemu/log.h"
#include "cpu.h"
#include "internals.h"
#include "exec/target_page.h"
#include "exec/page-protection.h"
#ifdef CONFIG_USER_ONLY
#include "user/cpu_loop.h"
#include "user/page-protection.h"
#else
#include "system/ram_addr.h"
#include "system/physmem.h"
#endif
#include "accel/tcg/cpu-ldst.h"
#include "accel/tcg/probe.h"
@ -188,7 +189,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx,
*/
if (tag_access == MMU_DATA_STORE) {
ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat;
cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION);
}
return memory_region_get_ram_ptr(mr) + xlat;

View file

@ -35,7 +35,7 @@ static void walk_pte(MemoryMappingList *list, AddressSpace *as,
}
start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
if (cpu_physical_memory_is_io(start_paddr)) {
if (address_space_is_io(as, start_paddr)) {
/* I/O region */
continue;
}
@ -65,7 +65,7 @@ static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
}
start_paddr = pte & ~0xfff;
if (cpu_physical_memory_is_io(start_paddr)) {
if (address_space_is_io(as, start_paddr)) {
/* I/O region */
continue;
}
@ -100,7 +100,7 @@ static void walk_pde(MemoryMappingList *list, AddressSpace *as,
if (pde & PG_PSE_MASK) {
/* 2 MB page */
start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
if (cpu_physical_memory_is_io(start_paddr)) {
if (address_space_is_io(as, start_paddr)) {
/* I/O region */
continue;
}
@ -142,7 +142,7 @@ static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
*/
high_paddr = ((hwaddr)(pde & 0x1fe000) << 19);
start_paddr = (pde & ~0x3fffff) | high_paddr;
if (cpu_physical_memory_is_io(start_paddr)) {
if (address_space_is_io(as, start_paddr)) {
/* I/O region */
continue;
}
@ -203,7 +203,7 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
if (pdpe & PG_PSE_MASK) {
/* 1 GB page */
start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
if (cpu_physical_memory_is_io(start_paddr)) {
if (address_space_is_io(as, start_paddr)) {
/* I/O region */
continue;
}

View file

@ -21,6 +21,7 @@
#include "system/address-spaces.h"
#include "xen-emu.h"
#include "trace.h"
#include "system/memory.h"
#include "system/runstate.h"
#include "hw/pci/msi.h"
@ -75,6 +76,7 @@ static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa,
static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz,
bool is_write)
{
AddressSpace *as = cpu_addressspace(cs, MEMTXATTRS_UNSPECIFIED);
uint8_t *buf = (uint8_t *)_buf;
uint64_t gpa;
size_t len;
@ -87,7 +89,7 @@ static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz,
len = sz;
}
cpu_physical_memory_rw(gpa, buf, len, is_write);
address_space_rw(as, gpa, MEMTXATTRS_UNSPECIFIED, buf, len, is_write);
buf += len;
sz -= len;

View file

@ -15,6 +15,7 @@
#include "accel/accel-ops.h"
#include "system/nvmm.h"
#include "system/cpus.h"
#include "system/memory.h"
#include "system/runstate.h"
#include "qemu/main-loop.h"
#include "qemu/error-report.h"
@ -516,7 +517,9 @@ nvmm_io_callback(struct nvmm_io *io)
static void
nvmm_mem_callback(struct nvmm_mem *mem)
{
cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write);
/* TODO: Get CPUState via mem->vcpu? */
address_space_rw(&address_space_memory, mem->gpa, MEMTXATTRS_UNSPECIFIED,
mem->data, mem->size, mem->write);
/* Needed, otherwise infinite loop. */
current_cpu->vcpu_dirty = false;

View file

@ -788,8 +788,11 @@ static HRESULT CALLBACK whpx_emu_mmio_callback(
void *ctx,
WHV_EMULATOR_MEMORY_ACCESS_INFO *ma)
{
cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize,
ma->Direction);
CPUState *cs = (CPUState *)ctx;
AddressSpace *as = cpu_addressspace(cs, MEMTXATTRS_UNSPECIFIED);
address_space_rw(as, ma->GpaAddress, MEMTXATTRS_UNSPECIFIED,
ma->Data, ma->AccessSize, ma->Direction);
return S_OK;
}

View file

@ -23,6 +23,7 @@
#include "kvm/kvm_s390x.h"
#include "system/kvm.h"
#include "system/tcg.h"
#include "system/memory.h"
#include "exec/page-protection.h"
#include "exec/target_page.h"
#include "hw/hw.h"
@ -522,6 +523,7 @@ int s390_cpu_pv_mem_rw(S390CPU *cpu, unsigned int offset, void *hostbuf,
int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
int len, bool is_write)
{
const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int currlen, nr_pages, i;
target_ulong *pages;
uint64_t tec;
@ -542,11 +544,13 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
if (ret) {
trigger_access_exception(&cpu->env, ret, tec);
} else if (hostbuf != NULL) {
AddressSpace *as = CPU(cpu)->as;
/* Copy data by stepping through the area page by page */
for (i = 0; i < nr_pages; i++) {
currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE));
cpu_physical_memory_rw(pages[i] | (laddr & ~TARGET_PAGE_MASK),
hostbuf, currlen, is_write);
address_space_rw(as, pages[i] | (laddr & ~TARGET_PAGE_MASK),
attrs, hostbuf, currlen, is_write);
laddr += currlen;
hostbuf += currlen;
len -= currlen;

View file

@ -4,7 +4,7 @@
# The eventual goal would be to fix these warnings.
# TSan is not happy about setting/getting of dirty bits,
# for example, cpu_physical_memory_set_dirty_range,
# and cpu_physical_memory_get_dirty.
# for example, physical_memory_set_dirty_range,
# and physical_memory_get_dirty.
src:bitops.c
src:bitmap.c