accel: use atomic accesses for exit_request

CPU threads write exit_request as a "note to self" that they need to
go out to a slow path.  This write happens out of the BQL and can be
a data race with another threads' cpu_exit(); use atomic accesses
consistently.

While at it, change the source argument from int ("1") to bool ("true").

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2025-08-08 18:55:48 +02:00
parent ac6c8a390b
commit f084ff128b
9 changed files with 24 additions and 15 deletions

View file

@ -3730,7 +3730,7 @@ int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
have_sigbus_pending = true;
pending_sigbus_addr = addr;
pending_sigbus_code = code;
qatomic_set(&cpu->exit_request, 1);
qatomic_set(&cpu->exit_request, true);
return 0;
#else
return 1;

View file

@ -85,7 +85,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
qemu_guest_random_seed_thread_part2(cpu->random_seed);
/* process any pending work */
cpu->exit_request = 1;
qatomic_set(&cpu->exit_request, true);
do {
if (cpu_can_run(cpu)) {

View file

@ -212,7 +212,7 @@ static void *rr_cpu_thread_fn(void *arg)
cpu = first_cpu;
/* process any pending work */
cpu->exit_request = 1;
qatomic_set(&cpu->exit_request, true);
while (1) {
/* Only used for icount_enabled() */
@ -293,7 +293,7 @@ static void *rr_cpu_thread_fn(void *arg)
/* Does not need a memory barrier because a spurious wakeup is okay. */
qatomic_set(&rr_current_cpu, NULL);
if (cpu && cpu->exit_request) {
if (cpu && qatomic_read(&cpu->exit_request)) {
qatomic_set_mb(&cpu->exit_request, 0);
}

View file

@ -509,7 +509,7 @@ static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr,
if (!cpu_has_work(cs)) {
cs->halted = 1;
cs->exception_index = EXCP_HLT;
cs->exit_request = 1;
qatomic_set(&cs->exit_request, true);
ppc_maybe_interrupt(env);
}
@ -531,7 +531,7 @@ static target_ulong h_confer_self(PowerPCCPU *cpu)
}
cs->halted = 1;
cs->exception_index = EXCP_HALTED;
cs->exit_request = 1;
qatomic_set(&cs->exit_request, true);
ppc_maybe_interrupt(&cpu->env);
return H_SUCCESS;
@ -624,7 +624,7 @@ static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr,
}
cs->exception_index = EXCP_YIELD;
cs->exit_request = 1;
qatomic_set(&cs->exit_request, true);
cpu_loop_exit(cs);
return H_SUCCESS;

View file

@ -422,6 +422,15 @@ struct qemu_work_item;
* valid under cpu_list_lock.
* @created: Indicates whether the CPU thread has been successfully created.
* @halt_cond: condition variable sleeping threads can wait on.
* @exit_request: Another thread requests the CPU to call qemu_wait_io_event().
* Should be read only by CPU thread with load-acquire, to synchronize with
* other threads' store-release operation.
*
* In some cases, accelerator-specific code will write exit_request from
* within the same thread, to "bump" the effect of qemu_cpu_kick() to
* the one provided by cpu_exit(), especially when processing interrupt
* flags. In this case, the write and read happen in the same thread
* and the write therefore can use qemu_atomic_set().
* @interrupt_request: Indicates a pending interrupt request.
* Only used by system emulation.
* @halted: Nonzero if the CPU is in suspended state.

View file

@ -5486,10 +5486,10 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
qatomic_set(&cpu->exit_request, 1);
qatomic_set(&cpu->exit_request, true);
}
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
qatomic_set(&cpu->exit_request, 1);
qatomic_set(&cpu->exit_request, true);
}
}
@ -5604,7 +5604,7 @@ int kvm_arch_process_async_events(CPUState *cs)
if (env->exception_nr == EXCP08_DBLE) {
/* this means triple fault */
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
cs->exit_request = 1;
qatomic_set(&cs->exit_request, true);
return 0;
}
kvm_queue_exception(env, EXCP12_MCHK, 0, 0);

View file

@ -77,7 +77,7 @@ static void nvmm_start_vcpu_thread(CPUState *cpu)
*/
static void nvmm_kick_vcpu_thread(CPUState *cpu)
{
cpu->exit_request = 1;
qatomic_set(&cpu->exit_request, true);
cpus_kick_thread(cpu);
}

View file

@ -414,7 +414,7 @@ nvmm_vcpu_pre_run(CPUState *cpu)
* or commit pending TPR access.
*/
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
cpu->exit_request = 1;
qatomic_set(&cpu->exit_request, true);
}
if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {

View file

@ -1489,10 +1489,10 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
cpu->exit_request = 1;
qatomic_set(&cpu->exit_request, true);
}
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
cpu->exit_request = 1;
qatomic_set(&cpu->exit_request, true);
}
}
@ -1539,7 +1539,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
if (tpr != vcpu->tpr) {
vcpu->tpr = tpr;
reg_values[reg_count].Reg64 = tpr;
cpu->exit_request = 1;
qatomic_set(&cpu->exit_request, true);
reg_names[reg_count] = WHvX64RegisterCr8;
reg_count += 1;
}