system/physmem: Un-inline cpu_physical_memory_set_dirty_range()

Avoid maintaining large functions in header, rely on the
linker to optimize at linking time.

Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20251001175448.18933-12-philmd@linaro.org>
This commit is contained in:
Philippe Mathieu-Daudé 2025-09-29 13:40:29 +02:00
parent 62c889eb7d
commit 6290580e9b
2 changed files with 53 additions and 51 deletions

View file

@ -1030,6 +1030,57 @@ void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client)
set_bit_atomic(offset, blocks->blocks[idx]);
}
void cpu_physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length,
uint8_t mask)
{
DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
unsigned long end, page;
unsigned long idx, offset, base;
int i;
if (!mask && !xen_enabled()) {
return;
}
end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
page = start >> TARGET_PAGE_BITS;
WITH_RCU_READ_LOCK_GUARD() {
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]);
}
idx = page / DIRTY_MEMORY_BLOCK_SIZE;
offset = page % DIRTY_MEMORY_BLOCK_SIZE;
base = page - offset;
while (page < end) {
unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
offset, next - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
offset, next - page);
}
if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
offset, next - page);
}
page = next;
idx++;
offset = 0;
base += DIRTY_MEMORY_BLOCK_SIZE;
}
}
if (xen_enabled()) {
xen_hvm_modified_memory(start, length);
}
}
/* Note: start and end must be within the same ram block. */
bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
ram_addr_t length,