cpus: properly kick CPUs out of inner execution loop

Now that cpu_exit() actually kicks all accelerators, use it whenever
the message to another thread is processed in qemu_wait_io_event().

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2025-08-01 13:50:04 +02:00
parent dcb46ecb2e
commit f8217ae54e
9 changed files with 17 additions and 14 deletions

View file

@ -137,7 +137,8 @@ static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
wi->done = false;
qemu_mutex_unlock(&cpu->work_mutex);
qemu_cpu_kick(cpu);
/* exit the inner loop and reach qemu_wait_io_event_common(). */
cpu_exit(cpu);
}
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,

View file

@ -190,6 +190,7 @@ static void ppc970_set_irq(void *opaque, int pin, int level)
if (level) {
trace_ppc_irq_cpu("stop");
cs->halted = 1;
cpu_exit(cs);
} else {
trace_ppc_irq_cpu("restart");
cs->halted = 0;
@ -386,6 +387,7 @@ static void ppc40x_set_irq(void *opaque, int pin, int level)
if (level) {
trace_ppc_irq_cpu("stop");
cs->halted = 1;
cpu_exit(cs);
} else {
trace_ppc_irq_cpu("restart");
cs->halted = 0;

View file

@ -509,8 +509,8 @@ static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr,
if (!cpu_has_work(cs)) {
cs->halted = 1;
cs->exception_index = EXCP_HLT;
qatomic_set(&cs->exit_request, true);
ppc_maybe_interrupt(env);
cpu_exit(cs);
}
return H_SUCCESS;
@ -531,8 +531,8 @@ static target_ulong h_confer_self(PowerPCCPU *cpu)
}
cs->halted = 1;
cs->exception_index = EXCP_HALTED;
qatomic_set(&cs->exit_request, true);
ppc_maybe_interrupt(&cpu->env);
cpu_exit(cs);
return H_SUCCESS;
}
@ -624,8 +624,7 @@ static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr,
}
cs->exception_index = EXCP_YIELD;
qatomic_set(&cs->exit_request, true);
cpu_loop_exit(cs);
cpu_exit(cs);
return H_SUCCESS;
}

View file

@ -221,7 +221,7 @@ static void rtas_stop_self(PowerPCCPU *cpu, SpaprMachineState *spapr,
cs->halted = 1;
ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm);
kvmppc_set_reg_ppc_online(cpu, 0);
qemu_cpu_kick(cs);
cpu_exit(cs);
}
static void rtas_ibm_suspend_me(PowerPCCPU *cpu, SpaprMachineState *spapr,

View file

@ -118,7 +118,8 @@ void replay_add_event(ReplayAsyncEventKind event_kind,
g_assert(replay_mutex_locked());
QTAILQ_INSERT_TAIL(&events_list, event, events);
qemu_cpu_kick(first_cpu);
/* Kick the TCG thread out of tcg_cpu_exec(). */
cpu_exit(first_cpu);
}
void replay_bh_schedule_event(QEMUBH *bh)

View file

@ -246,14 +246,14 @@ void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
if (qemu_in_vcpu_thread()) {
/*
* A CPU is currently running; kick it back out to the
* A CPU is currently running; send it out of the
* tcg_cpu_exec() loop so it will recalculate its
* icount deadline immediately.
*/
qemu_cpu_kick(current_cpu);
cpu_exit(current_cpu);
} else if (first_cpu) {
/*
* qemu_cpu_kick is not enough to kick a halted CPU out of
* cpu_exit() is not enough to kick a halted CPU out of
* qemu_tcg_wait_io_event. async_run_on_cpu, instead,
* causes cpu_thread_is_idle to return false. This way,
* handle_icount_deadline can run.

View file

@ -604,7 +604,7 @@ void cpu_pause(CPUState *cpu)
qemu_cpu_stop(cpu, true);
} else {
cpu->stop = true;
qemu_cpu_kick(cpu);
cpu_exit(cpu);
}
}
@ -644,6 +644,7 @@ void pause_all_vcpus(void)
while (!all_vcpus_paused()) {
qemu_cond_wait(&qemu_pause_cond, &bql);
/* FIXME: is this needed? */
CPU_FOREACH(cpu) {
qemu_cpu_kick(cpu);
}
@ -672,7 +673,7 @@ void cpu_remove_sync(CPUState *cpu)
{
cpu->stop = true;
cpu->unplug = true;
qemu_cpu_kick(cpu);
cpu_exit(cpu);
bql_unlock();
qemu_thread_join(cpu->thread);
bql_lock();

View file

@ -591,7 +591,7 @@ static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr,
* which is rather sooner than "normal". But the alternative
* is waiting until the next syscall.
*/
qemu_cpu_kick(env_cpu(env));
cpu_exit(env_cpu(env));
#endif
}

View file

@ -81,7 +81,6 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
* necessary because memory hierarchy is being changed
*/
async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL);
cpu_exit(CPU(cpu));
return EXCP_INTERRUPT;
case KVM_EXIT_HYPERV_HCALL: {