add cpu_test_interrupt()/cpu_set_interrupt() helpers and use them tree wide
The helpers form load-acquire/store-release pair and ensure that appropriate barriers are in place in case checks happen outside of BQL. Use them to replace open-coded checkers/setters across the code, to make sure that barriers are not missed. Helpers also make code a bit more readable. Signed-off-by: Igor Mammedov <imammedo@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Reviewed-by: Jason J. Herne <jjherne@linux.ibm.com> Link: https://lore.kernel.org/r/20250821155603.2422553-1-imammedo@redhat.com Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
b8217bbaf2
commit
87511341c3
34 changed files with 145 additions and 119 deletions
|
|
@ -779,9 +779,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
g_assert(!qatomic_read(&cpu->interrupt_request));
|
||||
assert(!cpu_test_interrupt(cpu, ~0));
|
||||
#else
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
if (unlikely(cpu_test_interrupt(cpu, ~0))) {
|
||||
int interrupt_request;
|
||||
bql_lock();
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
|
|
@ -789,7 +789,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
/* Mask out external interrupts for this step. */
|
||||
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_DEBUG)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
|
||||
cpu->exception_index = EXCP_DEBUG;
|
||||
bql_unlock();
|
||||
|
|
@ -797,7 +797,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
}
|
||||
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
|
||||
/* Do nothing */
|
||||
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
|
||||
} else if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HALT)) {
|
||||
replay_interrupt();
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
|
||||
cpu->halted = 1;
|
||||
|
|
@ -807,7 +807,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
} else {
|
||||
const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
|
||||
|
||||
if (interrupt_request & CPU_INTERRUPT_RESET) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_RESET)) {
|
||||
replay_interrupt();
|
||||
tcg_ops->cpu_exec_reset(cpu);
|
||||
bql_unlock();
|
||||
|
|
@ -842,7 +842,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
|||
* reload the 'interrupt_request' value */
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
}
|
||||
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_EXITTB)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
|
||||
/* ensure that no TB jump will be modified as
|
||||
the program flow was changed */
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
|
|||
/* mask must never be zero, except for A20 change call */
|
||||
void tcg_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
cpu_set_interrupt(cpu, mask);
|
||||
|
||||
/*
|
||||
* If called from iothread context, wake the target cpu in
|
||||
|
|
|
|||
|
|
@ -190,7 +190,7 @@ static void qemu_s390_flic_notify(uint32_t type)
|
|||
CPU_FOREACH(cs) {
|
||||
S390CPU *cpu = S390_CPU(cs);
|
||||
|
||||
cs->interrupt_request |= CPU_INTERRUPT_HARD;
|
||||
cpu_set_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
|
||||
/* ignore CPUs that are not sleeping */
|
||||
if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING &&
|
||||
|
|
|
|||
|
|
@ -105,7 +105,7 @@ static void openrisc_timer_cb(void *opaque)
|
|||
CPUState *cs = CPU(cpu);
|
||||
|
||||
cpu->env.ttmr |= TTMR_IP;
|
||||
cs->interrupt_request |= CPU_INTERRUPT_TIMER;
|
||||
cpu_set_interrupt(cs, CPU_INTERRUPT_TIMER);
|
||||
}
|
||||
|
||||
switch (cpu->env.ttmr & TTMR_M) {
|
||||
|
|
|
|||
|
|
@ -943,6 +943,28 @@ CPUState *cpu_by_arch_id(int64_t id);
|
|||
|
||||
void cpu_interrupt(CPUState *cpu, int mask);
|
||||
|
||||
/**
|
||||
* cpu_test_interrupt:
|
||||
* @cpu: The CPU to check interrupt(s) on.
|
||||
* @mask: The interrupts to check.
|
||||
*
|
||||
* Checks if any of interrupts in @mask are pending on @cpu.
|
||||
*/
|
||||
static inline bool cpu_test_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
return qatomic_load_acquire(&cpu->interrupt_request) & mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpu_set_interrupt:
|
||||
* @cpu: The CPU to set pending interrupt(s) on.
|
||||
* @mask: The interrupts to set.
|
||||
*
|
||||
* Sets interrupts in @mask as pending on @cpu. Unlike @cpu_interrupt,
|
||||
* this does not kick the vCPU.
|
||||
*/
|
||||
void cpu_set_interrupt(CPUState *cpu, int mask);
|
||||
|
||||
/**
|
||||
* cpu_set_pc:
|
||||
* @cpu: The CPU to set the program counter for.
|
||||
|
|
|
|||
|
|
@ -254,9 +254,16 @@ int64_t cpus_get_elapsed_ticks(void)
|
|||
return cpu_get_ticks();
|
||||
}
|
||||
|
||||
void cpu_set_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
/* Pairs with cpu_test_interrupt(). */
|
||||
qatomic_store_release(&cpu->interrupt_request,
|
||||
cpu->interrupt_request | mask);
|
||||
}
|
||||
|
||||
void generic_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
cpu->interrupt_request |= mask;
|
||||
cpu_set_interrupt(cpu, mask);
|
||||
|
||||
if (!qemu_cpu_is_self(cpu)) {
|
||||
qemu_cpu_kick(cpu);
|
||||
|
|
|
|||
|
|
@ -86,10 +86,10 @@ static bool alpha_cpu_has_work(CPUState *cs)
|
|||
assume that if a CPU really wants to stay asleep, it will mask
|
||||
interrupts at the chipset level, which will prevent these bits
|
||||
from being set in the first place. */
|
||||
return cs->interrupt_request & (CPU_INTERRUPT_HARD
|
||||
| CPU_INTERRUPT_TIMER
|
||||
| CPU_INTERRUPT_SMP
|
||||
| CPU_INTERRUPT_MCHK);
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD
|
||||
| CPU_INTERRUPT_TIMER
|
||||
| CPU_INTERRUPT_SMP
|
||||
| CPU_INTERRUPT_MCHK);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
|
|
|
|||
|
|
@ -142,11 +142,11 @@ static bool arm_cpu_has_work(CPUState *cs)
|
|||
ARMCPU *cpu = ARM_CPU(cs);
|
||||
|
||||
return (cpu->power_state != PSCI_OFF)
|
||||
&& cs->interrupt_request &
|
||||
(CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
|
||||
| CPU_INTERRUPT_NMI | CPU_INTERRUPT_VINMI | CPU_INTERRUPT_VFNMI
|
||||
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
|
||||
| CPU_INTERRUPT_EXITTB);
|
||||
&& cpu_test_interrupt(cs,
|
||||
CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
|
||||
| CPU_INTERRUPT_NMI | CPU_INTERRUPT_VINMI | CPU_INTERRUPT_VFNMI
|
||||
| CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR
|
||||
| CPU_INTERRUPT_EXITTB);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
|
|
@ -958,7 +958,7 @@ void arm_cpu_update_virq(ARMCPU *cpu)
|
|||
!(arm_hcrx_el2_eff(env) & HCRX_VINMI)) ||
|
||||
(env->irq_line_state & CPU_INTERRUPT_VIRQ);
|
||||
|
||||
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
|
||||
if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VIRQ)) {
|
||||
if (new_state) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
|
||||
} else {
|
||||
|
|
@ -980,7 +980,7 @@ void arm_cpu_update_vfiq(ARMCPU *cpu)
|
|||
!(arm_hcrx_el2_eff(env) & HCRX_VFNMI)) ||
|
||||
(env->irq_line_state & CPU_INTERRUPT_VFIQ);
|
||||
|
||||
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
|
||||
if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VFIQ)) {
|
||||
if (new_state) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
|
||||
} else {
|
||||
|
|
@ -1002,7 +1002,7 @@ void arm_cpu_update_vinmi(ARMCPU *cpu)
|
|||
(arm_hcrx_el2_eff(env) & HCRX_VINMI)) ||
|
||||
(env->irq_line_state & CPU_INTERRUPT_VINMI);
|
||||
|
||||
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VINMI) != 0)) {
|
||||
if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VINMI)) {
|
||||
if (new_state) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_VINMI);
|
||||
} else {
|
||||
|
|
@ -1022,7 +1022,7 @@ void arm_cpu_update_vfnmi(ARMCPU *cpu)
|
|||
bool new_state = (arm_hcr_el2_eff(env) & HCR_VF) &&
|
||||
(arm_hcrx_el2_eff(env) & HCRX_VFNMI);
|
||||
|
||||
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFNMI) != 0)) {
|
||||
if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VFNMI)) {
|
||||
if (new_state) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_VFNMI);
|
||||
} else {
|
||||
|
|
@ -1041,7 +1041,7 @@ void arm_cpu_update_vserr(ARMCPU *cpu)
|
|||
|
||||
bool new_state = env->cp15.hcr_el2 & HCR_VSE;
|
||||
|
||||
if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) {
|
||||
if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VSERR)) {
|
||||
if (new_state) {
|
||||
cpu_interrupt(cs, CPU_INTERRUPT_VSERR);
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -833,40 +833,40 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|||
uint64_t ret = 0;
|
||||
|
||||
if (hcr_el2 & HCR_IMO) {
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_VIRQ)) {
|
||||
ret |= CPSR_I;
|
||||
}
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_VINMI) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_VINMI)) {
|
||||
ret |= ISR_IS;
|
||||
ret |= CPSR_I;
|
||||
}
|
||||
} else {
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
|
||||
ret |= CPSR_I;
|
||||
}
|
||||
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_NMI) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
|
||||
ret |= ISR_IS;
|
||||
ret |= CPSR_I;
|
||||
}
|
||||
}
|
||||
|
||||
if (hcr_el2 & HCR_FMO) {
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFIQ)) {
|
||||
ret |= CPSR_F;
|
||||
}
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFNMI)) {
|
||||
ret |= ISR_FS;
|
||||
ret |= CPSR_F;
|
||||
}
|
||||
} else {
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_FIQ) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_FIQ)) {
|
||||
ret |= CPSR_F;
|
||||
}
|
||||
}
|
||||
|
||||
if (hcr_el2 & HCR_AMO) {
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_VSERR) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_VSERR)) {
|
||||
ret |= CPSR_A;
|
||||
}
|
||||
}
|
||||
|
|
@ -9147,7 +9147,7 @@ void arm_cpu_do_interrupt(CPUState *cs)
|
|||
arm_call_el_change_hook(cpu);
|
||||
|
||||
if (!kvm_enabled()) {
|
||||
cs->interrupt_request |= CPU_INTERRUPT_EXITTB;
|
||||
cpu_set_interrupt(cs, CPU_INTERRUPT_EXITTB);
|
||||
}
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
|
|
|||
|
|
@ -1782,13 +1782,13 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
|
|||
|
||||
static int hvf_inject_interrupts(CPUState *cpu)
|
||||
{
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_FIQ)) {
|
||||
trace_hvf_inject_fiq();
|
||||
hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ,
|
||||
true);
|
||||
}
|
||||
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
|
||||
trace_hvf_inject_irq();
|
||||
hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ,
|
||||
true);
|
||||
|
|
@ -1840,7 +1840,7 @@ static void hvf_wfi(CPUState *cpu)
|
|||
uint64_t nanos;
|
||||
uint32_t cntfrq;
|
||||
|
||||
if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
|
||||
/* Interrupt pending, no need to wait */
|
||||
return;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ static vaddr avr_cpu_get_pc(CPUState *cs)
|
|||
|
||||
static bool avr_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return (cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_RESET))
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_RESET)
|
||||
&& cpu_interrupts_enabled(cpu_env(cs));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -135,7 +135,7 @@ static void hppa_restore_state_to_opc(CPUState *cs,
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
static bool hppa_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
|
|
|
|||
|
|
@ -773,9 +773,9 @@ int hvf_vcpu_exec(CPUState *cpu)
|
|||
switch (exit_reason) {
|
||||
case EXIT_REASON_HLT: {
|
||||
macvm_set_rip(cpu, rip + ins_len);
|
||||
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK))
|
||||
&& !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
|
||||
&& !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI) &&
|
||||
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
|
||||
cpu->halted = 1;
|
||||
ret = EXCP_HLT;
|
||||
|
|
|
|||
|
|
@ -395,7 +395,7 @@ bool hvf_inject_interrupts(CPUState *cs)
|
|||
};
|
||||
}
|
||||
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_NMI) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
|
||||
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
|
||||
|
|
@ -406,7 +406,7 @@ bool hvf_inject_interrupts(CPUState *cs)
|
|||
}
|
||||
|
||||
if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
|
||||
(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) {
|
||||
int line = cpu_get_pic_interrupt(env);
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
||||
|
|
@ -415,11 +415,10 @@ bool hvf_inject_interrupts(CPUState *cs)
|
|||
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
|
||||
}
|
||||
}
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
|
||||
vmx_set_int_window_exiting(cs);
|
||||
}
|
||||
return (cs->interrupt_request
|
||||
& (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR);
|
||||
}
|
||||
|
||||
int hvf_process_events(CPUState *cs)
|
||||
|
|
@ -432,25 +431,25 @@ int hvf_process_events(CPUState *cs)
|
|||
env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS);
|
||||
}
|
||||
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_INIT) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_INIT)) {
|
||||
cpu_synchronize_state(cs);
|
||||
do_cpu_init(cpu);
|
||||
}
|
||||
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) {
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
}
|
||||
if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) ||
|
||||
(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
|
||||
cs->halted = 0;
|
||||
}
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_SIPI)) {
|
||||
cpu_synchronize_state(cs);
|
||||
do_cpu_sipi(cpu);
|
||||
}
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) {
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
|
||||
cpu_synchronize_state(cs);
|
||||
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
|
||||
|
|
|
|||
|
|
@ -5453,8 +5453,8 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
|||
int ret;
|
||||
|
||||
/* Inject NMI */
|
||||
if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
|
||||
bql_lock();
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
bql_unlock();
|
||||
|
|
@ -5465,7 +5465,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
|||
strerror(-ret));
|
||||
}
|
||||
}
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
|
||||
bql_lock();
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
bql_unlock();
|
||||
|
|
@ -5486,12 +5486,12 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
|||
* or (for userspace APIC, but it is cheap to combine the checks here)
|
||||
* pending TPR access reports.
|
||||
*/
|
||||
if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
|
||||
if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
|
||||
!(env->hflags & HF_SMM_MASK)) {
|
||||
cpu->exit_request = 1;
|
||||
}
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
|
||||
cpu->exit_request = 1;
|
||||
}
|
||||
}
|
||||
|
|
@ -5499,7 +5499,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
|||
if (!kvm_pic_in_kernel()) {
|
||||
/* Try to inject an interrupt if the guest can accept it */
|
||||
if (run->ready_for_interrupt_injection &&
|
||||
(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) {
|
||||
int irq;
|
||||
|
||||
|
|
@ -5523,7 +5523,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
|
|||
* interrupt, request an interrupt window exit. This will
|
||||
* cause a return to userspace as soon as the guest is ready to
|
||||
* receive interrupts. */
|
||||
if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
|
||||
run->request_interrupt_window = 1;
|
||||
} else {
|
||||
run->request_interrupt_window = 0;
|
||||
|
|
@ -5595,7 +5595,7 @@ int kvm_arch_process_async_events(CPUState *cs)
|
|||
X86CPU *cpu = X86_CPU(cs);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_MCE)) {
|
||||
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
|
||||
assert(env->mcg_cap);
|
||||
|
||||
|
|
@ -5618,7 +5618,7 @@ int kvm_arch_process_async_events(CPUState *cs)
|
|||
}
|
||||
}
|
||||
|
||||
if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_INIT) &&
|
||||
!(env->hflags & HF_SMM_MASK)) {
|
||||
kvm_cpu_synchronize_state(cs);
|
||||
do_cpu_init(cpu);
|
||||
|
|
@ -5628,20 +5628,20 @@ int kvm_arch_process_async_events(CPUState *cs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) {
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(cpu->apic_state);
|
||||
}
|
||||
if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) ||
|
||||
(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
|
||||
cs->halted = 0;
|
||||
}
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_SIPI)) {
|
||||
kvm_cpu_synchronize_state(cs);
|
||||
do_cpu_sipi(cpu);
|
||||
}
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) {
|
||||
cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
|
||||
kvm_cpu_synchronize_state(cs);
|
||||
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
|
||||
|
|
@ -5656,9 +5656,9 @@ static int kvm_handle_halt(X86CPU *cpu)
|
|||
CPUState *cs = CPU(cpu);
|
||||
CPUX86State *env = &cpu->env;
|
||||
|
||||
if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if (!(cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) &&
|
||||
!(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
!cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) {
|
||||
cs->halted = 1;
|
||||
return EXCP_HLT;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -413,11 +413,11 @@ nvmm_vcpu_pre_run(CPUState *cpu)
|
|||
* Force the VCPU out of its inner loop to process any INIT requests
|
||||
* or commit pending TPR access.
|
||||
*/
|
||||
if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
|
||||
cpu->exit_request = 1;
|
||||
}
|
||||
|
||||
if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
|
||||
if (nvmm_can_take_nmi(cpu)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
event->type = NVMM_VCPU_EVENT_INTR;
|
||||
|
|
@ -426,7 +426,7 @@ nvmm_vcpu_pre_run(CPUState *cpu)
|
|||
}
|
||||
}
|
||||
|
||||
if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
|
||||
if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
|
||||
if (nvmm_can_take_int(cpu)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
||||
event->type = NVMM_VCPU_EVENT_INTR;
|
||||
|
|
@ -436,7 +436,7 @@ nvmm_vcpu_pre_run(CPUState *cpu)
|
|||
}
|
||||
|
||||
/* Don't want SMIs. */
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
}
|
||||
|
||||
|
|
@ -651,9 +651,9 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu,
|
|||
|
||||
bql_lock();
|
||||
|
||||
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
|
||||
(cpu_env(cpu)->eflags & IF_MASK)) &&
|
||||
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
!cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
cpu->halted = true;
|
||||
ret = 1;
|
||||
|
|
@ -691,25 +691,25 @@ nvmm_vcpu_loop(CPUState *cpu)
|
|||
* Some asynchronous events must be handled outside of the inner
|
||||
* VCPU loop. They are handled here.
|
||||
*/
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT)) {
|
||||
nvmm_cpu_synchronize_state(cpu);
|
||||
do_cpu_init(x86_cpu);
|
||||
/* set int/nmi windows back to the reset state */
|
||||
}
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
}
|
||||
if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) ||
|
||||
(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
|
||||
cpu->halted = false;
|
||||
}
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SIPI)) {
|
||||
nvmm_cpu_synchronize_state(cpu);
|
||||
do_cpu_sipi(x86_cpu);
|
||||
}
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
|
||||
nvmm_cpu_synchronize_state(cpu);
|
||||
apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ bool x86_cpu_exec_halt(CPUState *cpu)
|
|||
X86CPU *x86_cpu = X86_CPU(cpu);
|
||||
CPUX86State *env = &x86_cpu->env;
|
||||
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) {
|
||||
bql_lock();
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
|
||||
|
|
|
|||
|
|
@ -403,7 +403,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
|
|||
env->hflags2 |= HF2_GIF_MASK;
|
||||
|
||||
if (ctl_has_irq(env)) {
|
||||
cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
|
||||
cpu_set_interrupt(cs, CPU_INTERRUPT_VIRQ);
|
||||
}
|
||||
|
||||
if (virtual_gif_set(env)) {
|
||||
|
|
|
|||
|
|
@ -1436,9 +1436,9 @@ static int whpx_handle_halt(CPUState *cpu)
|
|||
int ret = 0;
|
||||
|
||||
bql_lock();
|
||||
if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
|
||||
(cpu_env(cpu)->eflags & IF_MASK)) &&
|
||||
!(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
!cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
|
||||
cpu->exception_index = EXCP_HLT;
|
||||
cpu->halted = true;
|
||||
ret = 1;
|
||||
|
|
@ -1469,15 +1469,15 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
|
|||
|
||||
/* Inject NMI */
|
||||
if (!vcpu->interruption_pending &&
|
||||
cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
|
||||
cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
|
||||
vcpu->interruptable = false;
|
||||
new_int.InterruptionType = WHvX64PendingNmi;
|
||||
new_int.InterruptionPending = 1;
|
||||
new_int.InterruptionVector = 2;
|
||||
}
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
|
||||
}
|
||||
}
|
||||
|
|
@ -1486,12 +1486,12 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
|
|||
* Force the VCPU out of its inner loop to process any INIT requests or
|
||||
* commit pending TPR access.
|
||||
*/
|
||||
if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
|
||||
if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
|
||||
!(env->hflags & HF_SMM_MASK)) {
|
||||
cpu->exit_request = 1;
|
||||
}
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
|
||||
cpu->exit_request = 1;
|
||||
}
|
||||
}
|
||||
|
|
@ -1501,7 +1501,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
|
|||
if (!vcpu->interruption_pending &&
|
||||
vcpu->interruptable && (env->eflags & IF_MASK)) {
|
||||
assert(!new_int.InterruptionPending);
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
||||
irq = cpu_get_pic_interrupt(env);
|
||||
if (irq >= 0) {
|
||||
|
|
@ -1519,7 +1519,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
|
|||
reg_count += 1;
|
||||
}
|
||||
} else if (vcpu->ready_for_pic_interrupt &&
|
||||
(cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
|
||||
cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
||||
irq = cpu_get_pic_interrupt(env);
|
||||
if (irq >= 0) {
|
||||
|
|
@ -1546,7 +1546,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
|
|||
|
||||
/* Update the state of the interrupt delivery notification */
|
||||
if (!vcpu->window_registered &&
|
||||
cpu->interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) {
|
||||
reg_values[reg_count].DeliverabilityNotifications =
|
||||
(WHV_X64_DELIVERABILITY_NOTIFICATIONS_REGISTER) {
|
||||
.InterruptNotification = 1
|
||||
|
|
@ -1599,30 +1599,30 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
|
|||
CPUX86State *env = &x86_cpu->env;
|
||||
AccelCPUState *vcpu = cpu->accel;
|
||||
|
||||
if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) &&
|
||||
!(env->hflags & HF_SMM_MASK)) {
|
||||
whpx_cpu_synchronize_state(cpu);
|
||||
do_cpu_init(x86_cpu);
|
||||
vcpu->interruptable = true;
|
||||
}
|
||||
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
|
||||
apic_poll_irq(x86_cpu->apic_state);
|
||||
}
|
||||
|
||||
if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) &&
|
||||
(env->eflags & IF_MASK)) ||
|
||||
(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
|
||||
cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) {
|
||||
cpu->halted = false;
|
||||
}
|
||||
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SIPI)) {
|
||||
whpx_cpu_synchronize_state(cpu);
|
||||
do_cpu_sipi(x86_cpu);
|
||||
}
|
||||
|
||||
if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
|
||||
if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) {
|
||||
cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
|
||||
whpx_cpu_synchronize_state(cpu);
|
||||
apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip,
|
||||
|
|
|
|||
|
|
@ -376,7 +376,7 @@ static bool loongarch_cpu_has_work(CPUState *cs)
|
|||
{
|
||||
bool has_work = false;
|
||||
|
||||
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
|
||||
cpu_loongarch_hw_interrupts_pending(cpu_env(cs))) {
|
||||
has_work = true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ static void m68k_restore_state_to_opc(CPUState *cs,
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
static bool m68k_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & CPU_INTERRUPT_HARD;
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
|
|
|
|||
|
|
@ -129,7 +129,7 @@ static void mb_restore_state_to_opc(CPUState *cs,
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
static bool mb_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
|
|
|
|||
|
|
@ -145,7 +145,7 @@ static bool mips_cpu_has_work(CPUState *cs)
|
|||
* check for interrupts that can be taken. For pre-release 6 CPUs,
|
||||
* check for CP0 Config7 'Wait IE ignore' bit.
|
||||
*/
|
||||
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
|
||||
cpu_mips_hw_interrupts_pending(env)) {
|
||||
if (cpu_mips_hw_interrupts_enabled(env) ||
|
||||
(env->CP0_Config7 & (1 << CP0C7_WII)) ||
|
||||
|
|
@ -160,7 +160,7 @@ static bool mips_cpu_has_work(CPUState *cs)
|
|||
* The QEMU model will issue an _WAKE request whenever the CPUs
|
||||
* should be woken up.
|
||||
*/
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_WAKE) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_WAKE)) {
|
||||
has_work = true;
|
||||
}
|
||||
|
||||
|
|
@ -170,7 +170,7 @@ static bool mips_cpu_has_work(CPUState *cs)
|
|||
}
|
||||
/* MIPS Release 6 has the ability to halt the CPU. */
|
||||
if (env->CP0_Config5 & (1 << CP0C5_VP)) {
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_WAKE) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_WAKE)) {
|
||||
has_work = true;
|
||||
}
|
||||
if (!mips_vp_active(env)) {
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
|||
|
||||
bql_lock();
|
||||
|
||||
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
|
||||
cpu_mips_io_interrupts_pending(cpu)) {
|
||||
intr.cpu = -1;
|
||||
intr.irq = 2;
|
||||
|
|
|
|||
|
|
@ -78,8 +78,7 @@ static void openrisc_restore_state_to_opc(CPUState *cs,
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
static bool openrisc_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & (CPU_INTERRUPT_HARD |
|
||||
CPU_INTERRUPT_TIMER);
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
|
|
|
|||
|
|
@ -7225,7 +7225,7 @@ static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch)
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
static bool ppc_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & CPU_INTERRUPT_HARD;
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
|
|
|
|||
|
|
@ -1354,7 +1354,7 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu)
|
|||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
|
||||
if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
if (!cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
|
||||
FIELD_EX64(env->msr, MSR, EE)) {
|
||||
cs->halted = 1;
|
||||
cs->exception_index = EXCP_HLT;
|
||||
|
|
|
|||
|
|
@ -75,8 +75,7 @@ static void rx_restore_state_to_opc(CPUState *cs,
|
|||
|
||||
static bool rx_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request &
|
||||
(CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR);
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR);
|
||||
}
|
||||
|
||||
static int rx_cpu_mmu_index(CPUState *cs, bool ifunc)
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte)
|
|||
void rx_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
CPURXState *env = cpu_env(cs);
|
||||
int do_irq = cs->interrupt_request & INT_FLAGS;
|
||||
int do_irq = cpu_test_interrupt(cs, INT_FLAGS);
|
||||
uint32_t save_psw;
|
||||
|
||||
env->in_sleep = 0;
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ bool s390_cpu_has_work(CPUState *cs)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) {
|
||||
if (!cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ static bool superh_io_recompile_replay_branch(CPUState *cs,
|
|||
|
||||
static bool superh_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return cs->interrupt_request & CPU_INTERRUPT_HARD;
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr)
|
|||
void superh_cpu_do_interrupt(CPUState *cs)
|
||||
{
|
||||
CPUSH4State *env = cpu_env(cs);
|
||||
int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD;
|
||||
int do_irq = cpu_test_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
int do_exp, irq_vector = cs->exception_index;
|
||||
|
||||
/* prioritize exceptions over interrupts */
|
||||
|
|
|
|||
|
|
@ -783,7 +783,7 @@ static void sparc_restore_state_to_opc(CPUState *cs,
|
|||
#ifndef CONFIG_USER_ONLY
|
||||
static bool sparc_cpu_has_work(CPUState *cs)
|
||||
{
|
||||
return (cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
||||
return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) &&
|
||||
cpu_interrupts_enabled(cpu_env(cs));
|
||||
}
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ void cpu_check_irqs(CPUSPARCState *env)
|
|||
* the next bit is (2 << psrpil).
|
||||
*/
|
||||
if (pil < (2 << env->psrpil)) {
|
||||
if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
|
||||
trace_sparc64_cpu_check_irqs_reset_irq(env->interrupt_index);
|
||||
env->interrupt_index = 0;
|
||||
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
|
||||
|
|
@ -120,7 +120,7 @@ void cpu_check_irqs(CPUSPARCState *env)
|
|||
break;
|
||||
}
|
||||
}
|
||||
} else if (cs->interrupt_request & CPU_INTERRUPT_HARD) {
|
||||
} else if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) {
|
||||
trace_sparc64_cpu_check_irqs_disabled(pil, env->pil_in, env->softint,
|
||||
env->interrupt_index);
|
||||
env->interrupt_index = 0;
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue