migration: Drop save_live_complete_postcopy hook

The hook is only defined in two vmstate users ("ram" and "block dirty
bitmap"), meanwhile both of them define the hook exactly the same as the
precopy version.  Hence, this postcopy version isn't needed.

No functional change intended.

Reviewed-by: Juraj Marcin <jmarcin@redhat.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Link: https://lore.kernel.org/r/20250613140801.474264-6-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Fabiano Rosas <farosas@suse.de>
This commit is contained in:
Peter Xu 2025-06-13 10:07:55 -04:00 committed by Fabiano Rosas
parent 2145f38c31
commit d7530a9682
4 changed files with 12 additions and 23 deletions

View file

@ -77,26 +77,18 @@ typedef struct SaveVMHandlers {
*/
void (*save_cleanup)(void *opaque);
/**
* @save_live_complete_postcopy
*
* Called at the end of postcopy for all postcopyable devices.
*
* @f: QEMUFile where to send the data
* @opaque: data pointer passed to register_savevm_live()
*
* Returns zero to indicate success and negative for error
*/
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
/**
* @save_live_complete_precopy
*
* Transmits the last section for the device containing any
* remaining data at the end of a precopy phase. When postcopy is
* enabled, devices that support postcopy will skip this step,
* where the final data will be flushed at the end of postcopy via
* @save_live_complete_postcopy instead.
* remaining data at the end phase of migration.
*
* For precopy, this will be invoked _during_ the switchover phase
* after source VM is stopped.
*
* For postcopy, this will be invoked _after_ the switchover phase
* (except some very unusual cases, like PMEM ramblocks), while
* destination VM can be running.
*
* @f: QEMUFile where to send the data
* @opaque: data pointer passed to register_savevm_live()

View file

@ -1248,7 +1248,6 @@ static bool dirty_bitmap_has_postcopy(void *opaque)
static SaveVMHandlers savevm_dirty_bitmap_handlers = {
.save_setup = dirty_bitmap_save_setup,
.save_live_complete_postcopy = dirty_bitmap_save_complete,
.save_live_complete_precopy = dirty_bitmap_save_complete,
.has_postcopy = dirty_bitmap_has_postcopy,
.state_pending_exact = dirty_bitmap_state_pending,

View file

@ -4548,7 +4548,6 @@ void postcopy_preempt_shutdown_file(MigrationState *s)
static SaveVMHandlers savevm_ram_handlers = {
.save_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate,
.save_live_complete_postcopy = ram_save_complete,
.save_live_complete_precopy = ram_save_complete,
.has_postcopy = ram_has_postcopy,
.state_pending_exact = ram_state_pending_exact,

View file

@ -1485,9 +1485,8 @@ bool should_send_vmdesc(void)
}
/*
* Calls the save_live_complete_postcopy methods
* causing the last few pages to be sent immediately and doing any associated
* cleanup.
* Complete saving any postcopy-able devices.
*
* Note postcopy also calls qemu_savevm_state_complete_precopy to complete
* all the other devices, but that happens at the point we switch to postcopy.
*/
@ -1497,7 +1496,7 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
int ret;
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (!se->ops || !se->ops->save_live_complete_postcopy) {
if (!se->ops || !se->ops->save_live_complete_precopy) {
continue;
}
if (se->ops->is_active) {
@ -1510,7 +1509,7 @@ void qemu_savevm_state_complete_postcopy(QEMUFile *f)
qemu_put_byte(f, QEMU_VM_SECTION_END);
qemu_put_be32(f, se->section_id);
ret = se->ops->save_live_complete_postcopy(f, se->opaque);
ret = se->ops->save_live_complete_precopy(f, se->opaque);
trace_savevm_section_end(se->idstr, se->section_id, ret);
save_section_footer(f, se);
if (ret < 0) {