accel: make all calls to qemu_process_cpu_events look the same
There is no reason for some accelerators to use qemu_process_cpu_events_common (which is separated from qemu_process_cpu_events() specifically for round robin TCG). They can also check for events directly on the first pass through the loop, instead of setting cpu->exit_request to true. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9a191d3782
commit
d5e33b5f8f
7 changed files with 30 additions and 39 deletions
|
|
@ -43,6 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
|||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
qemu_process_cpu_events(cpu);
|
||||
bql_unlock();
|
||||
#ifndef _WIN32
|
||||
do {
|
||||
|
|
@ -57,7 +58,6 @@ static void *dummy_cpu_thread_fn(void *arg)
|
|||
qemu_sem_wait(&cpu->sem);
|
||||
#endif
|
||||
bql_lock();
|
||||
qemu_process_cpu_events(cpu);
|
||||
} while (!cpu->unplug);
|
||||
|
||||
bql_unlock();
|
||||
|
|
|
|||
|
|
@ -192,13 +192,13 @@ static void *hvf_cpu_thread_fn(void *arg)
|
|||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
qemu_process_cpu_events(cpu);
|
||||
if (cpu_can_run(cpu)) {
|
||||
r = hvf_vcpu_exec(cpu);
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
qemu_process_cpu_events(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
|
|
|
|||
|
|
@ -47,13 +47,14 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
|||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
qemu_process_cpu_events(cpu);
|
||||
|
||||
if (cpu_can_run(cpu)) {
|
||||
r = kvm_cpu_exec(cpu);
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
qemu_process_cpu_events(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
kvm_destroy_vcpu(cpu);
|
||||
|
|
|
|||
|
|
@ -84,10 +84,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
|||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
/* process any pending work */
|
||||
qatomic_set(&cpu->exit_request, true);
|
||||
|
||||
do {
|
||||
qemu_process_cpu_events(cpu);
|
||||
|
||||
if (cpu_can_run(cpu)) {
|
||||
int r;
|
||||
bql_unlock();
|
||||
|
|
@ -112,8 +111,6 @@ static void *mttcg_cpu_thread_fn(void *arg)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
qemu_process_cpu_events(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
tcg_cpu_destroy(cpu);
|
||||
|
|
|
|||
|
|
@ -211,13 +211,30 @@ static void *rr_cpu_thread_fn(void *arg)
|
|||
|
||||
cpu = first_cpu;
|
||||
|
||||
/* process any pending work */
|
||||
qatomic_set(&cpu->exit_request, true);
|
||||
|
||||
while (1) {
|
||||
/* Only used for icount_enabled() */
|
||||
int64_t cpu_budget = 0;
|
||||
|
||||
if (cpu) {
|
||||
/*
|
||||
* This could even reset exit_request for all CPUs, but in practice
|
||||
* races between CPU exits and changes to "cpu" are so rare that
|
||||
* there's no advantage in doing so.
|
||||
*/
|
||||
qatomic_set(&cpu->exit_request, false);
|
||||
}
|
||||
|
||||
if (icount_enabled() && all_cpu_threads_idle()) {
|
||||
/*
|
||||
* When all cpus are sleeping (e.g in WFI), to avoid a deadlock
|
||||
* in the main_loop, wake it up in order to start the warp timer.
|
||||
*/
|
||||
qemu_notify_event();
|
||||
}
|
||||
|
||||
rr_wait_io_event();
|
||||
rr_deal_with_unplugged_cpus();
|
||||
|
||||
bql_unlock();
|
||||
replay_mutex_lock();
|
||||
bql_lock();
|
||||
|
|
@ -292,26 +309,6 @@ static void *rr_cpu_thread_fn(void *arg)
|
|||
|
||||
/* Does not need a memory barrier because a spurious wakeup is okay. */
|
||||
qatomic_set(&rr_current_cpu, NULL);
|
||||
|
||||
if (cpu) {
|
||||
/*
|
||||
* This could even reset exit_request for all CPUs, but in practice
|
||||
* races between CPU exits and changes to "cpu" are so rare that
|
||||
* there's no advantage in doing so.
|
||||
*/
|
||||
qatomic_set(&cpu->exit_request, false);
|
||||
}
|
||||
|
||||
if (icount_enabled() && all_cpu_threads_idle()) {
|
||||
/*
|
||||
* When all cpus are sleeping (e.g in WFI), to avoid a deadlock
|
||||
* in the main_loop, wake it up in order to start the warp timer.
|
||||
*/
|
||||
qemu_notify_event();
|
||||
}
|
||||
|
||||
rr_wait_io_event();
|
||||
rr_deal_with_unplugged_cpus();
|
||||
}
|
||||
|
||||
g_assert_not_reached();
|
||||
|
|
|
|||
|
|
@ -42,16 +42,14 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
|
|||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
qemu_process_cpu_events(cpu);
|
||||
|
||||
if (cpu_can_run(cpu)) {
|
||||
r = nvmm_vcpu_exec(cpu);
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
while (cpu_thread_is_idle(cpu)) {
|
||||
qemu_cond_wait_bql(cpu->halt_cond);
|
||||
}
|
||||
qemu_process_cpu_events_common(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
nvmm_destroy_vcpu(cpu);
|
||||
|
|
|
|||
|
|
@ -42,16 +42,14 @@ static void *whpx_cpu_thread_fn(void *arg)
|
|||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
qemu_process_cpu_events(cpu);
|
||||
|
||||
if (cpu_can_run(cpu)) {
|
||||
r = whpx_vcpu_exec(cpu);
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
while (cpu_thread_is_idle(cpu)) {
|
||||
qemu_cond_wait_bql(cpu->halt_cond);
|
||||
}
|
||||
qemu_process_cpu_events_common(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
whpx_destroy_vcpu(cpu);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue