@@ -433,7 +433,7 @@ static int hax_vcpu_interrupt(CPUArchState *env)
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
hax_inject_interrupt(env, irq);
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
}
}
@@ -483,7 +483,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
cpu->halted = 0;
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
apic_poll_irq(x86_cpu->apic_state);
}
@@ -402,7 +402,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
- cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_NMI);
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | NMI_VEC;
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info);
} else {
@@ -414,7 +414,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
(cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) {
int line = cpu_get_pic_interrupt(&x86cpu->env);
- cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_HARD);
if (line >= 0) {
wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line |
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
@@ -440,7 +440,7 @@ int hvf_process_events(CPUState *cpu_state)
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {
- cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -453,7 +453,7 @@ int hvf_process_events(CPUState *cpu_state)
do_cpu_sipi(cpu);
}
if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
- cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_TPR);
hvf_cpu_synchronize_state(cpu_state);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
env->tpr_access_type);
@@ -2709,7 +2709,7 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
*/
events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
- cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
} else {
/* Keep these in cs->interrupt_request. */
events.smi.pending = 0;
@@ -3005,7 +3005,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
qemu_mutex_lock_iothread();
- cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
qemu_mutex_unlock_iothread();
DPRINTF("injected NMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
@@ -3016,7 +3016,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
}
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
qemu_mutex_lock_iothread();
- cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
qemu_mutex_unlock_iothread();
DPRINTF("injected SMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
@@ -3052,7 +3052,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
(env->eflags & IF_MASK)) {
int irq;
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
struct kvm_interrupt intr;
@@ -3123,7 +3123,7 @@ int kvm_arch_process_async_events(CPUState *cs)
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
assert(env->mcg_cap);
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE);
kvm_cpu_synchronize_state(cs);
@@ -3153,7 +3153,7 @@ int kvm_arch_process_async_events(CPUState *cs)
}
if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -3166,7 +3166,7 @@ int kvm_arch_process_async_events(CPUState *cs)
do_cpu_sipi(cpu);
}
if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
- cs->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR);
kvm_cpu_synchronize_state(cs);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
env->tpr_access_type);
@@ -1323,7 +1323,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
#if !defined(CONFIG_USER_ONLY)
if (interrupt_request & CPU_INTERRUPT_POLL) {
- cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
/* Don't process multiple interrupt requests in a single call.
This is required to make icount-driven execution deterministic. */
@@ -1337,18 +1337,18 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
!(env->hflags & HF_SMM_MASK)) {
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_SMI);
do_smm_enter(cpu);
ret = true;
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
!(env->hflags2 & HF2_NMI_MASK)) {
cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
- cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_NMI);
env->hflags2 |= HF2_NMI_MASK;
do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
ret = true;
} else if (interrupt_request & CPU_INTERRUPT_MCE) {
- cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE);
do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
ret = true;
} else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
@@ -1359,8 +1359,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
int intno;
cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
- cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_VIRQ);
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
intno = cpu_get_pic_interrupt(env);
qemu_log_mask(CPU_LOG_TB_IN_ASM,
"Servicing hardware INT=0x%02x\n", intno);
@@ -1380,7 +1379,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
qemu_log_mask(CPU_LOG_TB_IN_ASM,
"Servicing virtual hardware INT=0x%02x\n", intno);
do_interrupt_x86_hardirq(env, intno, 1);
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
ret = true;
#endif
}
@@ -700,7 +700,7 @@ void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
env->hflags &= ~HF_SVMI_MASK;
env->intercept = 0;
env->intercept_exceptions = 0;
- cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
env->tsc_offset = 0;
env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
@@ -728,14 +728,14 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
if (!vcpu->interruption_pending &&
cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
vcpu->interruptable = false;
new_int.InterruptionType = WHvX64PendingNmi;
new_int.InterruptionPending = 1;
new_int.InterruptionVector = 2;
}
if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
}
}
@@ -758,7 +758,7 @@ static void whpx_vcpu_pre_run(CPUState *cpu)
vcpu->interruptable && (env->eflags & IF_MASK)) {
assert(!new_int.InterruptionPending);
if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
new_int.InterruptionType = WHvX64PendingInterrupt;
@@ -850,7 +850,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
}
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
apic_poll_irq(x86_cpu->apic_state);
}
@@ -868,7 +868,7 @@ static void whpx_vcpu_process_async_events(CPUState *cpu)
}
if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_TPR;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_TPR);
if (!cpu->vcpu_dirty) {
whpx_get_registers(cpu);
}