@@ -5433,15 +5433,16 @@ static bool x86_cpu_has_work(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ int interrupt_request = atomic_read(&cs->interrupt_request);
- return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
+ return ((interrupt_request & (CPU_INTERRUPT_HARD |
CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
- (cs->interrupt_request & (CPU_INTERRUPT_NMI |
+ (interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
CPU_INTERRUPT_SIPI |
CPU_INTERRUPT_MCE)) ||
- ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
+ ((interrupt_request & CPU_INTERRUPT_SMI) &&
!(env->hflags & HF_SMM_MASK));
}
@@ -1035,12 +1035,12 @@ void do_cpu_init(X86CPU *cpu)
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
CPUX86State *save = g_new(CPUX86State, 1);
- int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
+ int sipi = atomic_read(&cs->interrupt_request) & CPU_INTERRUPT_SIPI;
*save = *env;
cpu_reset(cs);
- cs->interrupt_request = sipi;
+ atomic_mb_set(&cs->interrupt_request, sipi);
memcpy(&env->start_init_save, &save->start_init_save,
offsetof(CPUX86State, end_init_save) -
offsetof(CPUX86State, start_init_save));
@@ -2707,8 +2707,10 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
/* As soon as these are moved to the kernel, remove them
* from cs->interrupt_request.
*/
- events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
- events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
+ uint32_t interrupt_request = atomic_read(&cs->interrupt_request);
+
+ events.smi.pending = interrupt_request & CPU_INTERRUPT_SMI;
+ events.smi.latched_init = interrupt_request & CPU_INTERRUPT_INIT;
cpu_reset_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
} else {
/* Keep these in cs->interrupt_request. */
@@ -2999,11 +3001,12 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
+ int interrupt_request = atomic_read(&cpu->interrupt_request);
int ret;
/* Inject NMI */
- if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
- if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+ if (interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+ if (interrupt_request & CPU_INTERRUPT_NMI) {
qemu_mutex_lock_iothread();
cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
qemu_mutex_unlock_iothread();
@@ -3014,7 +3017,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
strerror(-ret));
}
}
- if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+ if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_SMI) {
qemu_mutex_lock_iothread();
cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
qemu_mutex_unlock_iothread();
@@ -3035,12 +3038,12 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
* or (for userspace APIC, but it is cheap to combine the checks here)
* pending TPR access reports.
*/
- if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
- if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if (interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ if ((interrupt_request & CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
cpu->exit_request = 1;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (interrupt_request & CPU_INTERRUPT_TPR) {
cpu->exit_request = 1;
}
}
@@ -3048,11 +3051,12 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
if (!kvm_pic_in_kernel()) {
/* Try to inject an interrupt if the guest can accept it */
if (run->ready_for_interrupt_injection &&
- (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) {
int irq;
cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
+ interrupt_request &= ~CPU_INTERRUPT_HARD;
irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
struct kvm_interrupt intr;
@@ -3072,7 +3076,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
* interrupt, request an interrupt window exit. This will
* cause a return to userspace as soon as the guest is ready to
* receive interrupts. */
- if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
run->request_interrupt_window = 1;
} else {
run->request_interrupt_window = 0;
@@ -3118,8 +3122,9 @@ int kvm_arch_process_async_events(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ int interrupt_request = atomic_read(&cs->interrupt_request);
- if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
+ if (interrupt_request & CPU_INTERRUPT_MCE) {
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
assert(env->mcg_cap);
@@ -3142,7 +3147,7 @@ int kvm_arch_process_async_events(CPUState *cs)
}
}
- if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if ((interrupt_request & CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
kvm_cpu_synchronize_state(cs);
do_cpu_init(cpu);
@@ -3152,20 +3157,20 @@ int kvm_arch_process_async_events(CPUState *cs)
return 0;
}
- if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (interrupt_request & CPU_INTERRUPT_POLL) {
cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
- if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
- (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ (interrupt_request & CPU_INTERRUPT_NMI)) {
cs->halted = 0;
}
- if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
kvm_cpu_synchronize_state(cs);
do_cpu_sipi(cpu);
}
- if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (interrupt_request & CPU_INTERRUPT_TPR) {
cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR);
kvm_cpu_synchronize_state(cs);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
@@ -3179,10 +3184,11 @@ static int kvm_handle_halt(X86CPU *cpu)
{
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
+ int interrupt_request = atomic_read(&cs->interrupt_request);
- if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
- !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ !(interrupt_request & CPU_INTERRUPT_NMI)) {
cs->halted = 1;
return EXCP_HLT;
}
@@ -316,7 +316,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
if (int_ctl & V_IRQ_MASK) {
CPUState *cs = CPU(x86_env_get_cpu(env));
- cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
+ atomic_or(&cs->interrupt_request, CPU_INTERRUPT_VIRQ);
}
/* maybe we need to inject an event */
@@ -674,7 +674,7 @@ void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
int_ctl |= env->v_tpr & V_TPR_MASK;
- if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
+ if (atomic_read(&cs->interrupt_request) & CPU_INTERRUPT_VIRQ) {
int_ctl |= V_IRQ_MASK;
}
x86_stl_phys(cs,