@@ -2704,11 +2704,14 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
events.smi.smm = !!(env->hflags & HF_SMM_MASK);
events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK);
if (kvm_irqchip_in_kernel()) {
+ uint32_t interrupt_request;
+
/* As soon as these are moved to the kernel, remove them
* from cs->interrupt_request.
*/
- events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
- events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
+ interrupt_request = cpu_interrupt_request(cs);
+ events.smi.pending = interrupt_request & CPU_INTERRUPT_SMI;
+ events.smi.latched_init = interrupt_request & CPU_INTERRUPT_INIT;
cpu_reset_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
} else {
/* Keep these in cs->interrupt_request. */
@@ -2999,14 +3002,14 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
{
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
+ uint32_t interrupt_request;
int ret;
+ interrupt_request = cpu_interrupt_request(cpu);
/* Inject NMI */
- if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
- if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
- qemu_mutex_lock_iothread();
+ if (interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+ if (interrupt_request & CPU_INTERRUPT_NMI) {
cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
- qemu_mutex_unlock_iothread();
DPRINTF("injected NMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
if (ret < 0) {
@@ -3014,10 +3017,8 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
strerror(-ret));
}
}
- if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
- qemu_mutex_lock_iothread();
+ if (interrupt_request & CPU_INTERRUPT_SMI) {
cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
- qemu_mutex_unlock_iothread();
DPRINTF("injected SMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_SMI);
if (ret < 0) {
@@ -3031,16 +3032,18 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
qemu_mutex_lock_iothread();
}
+ interrupt_request = cpu_interrupt_request(cpu);
+
/* Force the VCPU out of its inner loop to process any INIT requests
* or (for userspace APIC, but it is cheap to combine the checks here)
* pending TPR access reports.
*/
- if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
- if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if (interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+ if ((interrupt_request & CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
cpu->exit_request = 1;
}
- if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (interrupt_request & CPU_INTERRUPT_TPR) {
cpu->exit_request = 1;
}
}
@@ -3048,7 +3051,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
if (!kvm_pic_in_kernel()) {
/* Try to inject an interrupt if the guest can accept it */
if (run->ready_for_interrupt_injection &&
- (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) {
int irq;
@@ -3072,7 +3075,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
* interrupt, request an interrupt window exit. This will
* cause a return to userspace as soon as the guest is ready to
* receive interrupts. */
- if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+ if ((cpu_interrupt_request(cpu) & CPU_INTERRUPT_HARD)) {
run->request_interrupt_window = 1;
} else {
run->request_interrupt_window = 0;
@@ -3118,8 +3121,9 @@ int kvm_arch_process_async_events(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
+ uint32_t interrupt_request;
- if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
+ if (cpu_interrupt_request(cs) & CPU_INTERRUPT_MCE) {
/* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
assert(env->mcg_cap);
@@ -3142,7 +3146,7 @@ int kvm_arch_process_async_events(CPUState *cs)
}
}
- if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
+ if ((cpu_interrupt_request(cs) & CPU_INTERRUPT_INIT) &&
!(env->hflags & HF_SMM_MASK)) {
kvm_cpu_synchronize_state(cs);
do_cpu_init(cpu);
@@ -3152,20 +3156,21 @@ int kvm_arch_process_async_events(CPUState *cs)
return 0;
}
- if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (cpu_interrupt_request(cs) & CPU_INTERRUPT_POLL) {
cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
- if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ interrupt_request = cpu_interrupt_request(cs);
+ if (((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
- (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ (interrupt_request & CPU_INTERRUPT_NMI)) {
cpu_halted_set(cs, 0);
}
- if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
kvm_cpu_synchronize_state(cs);
do_cpu_sipi(cpu);
}
- if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (cpu_interrupt_request(cs) & CPU_INTERRUPT_TPR) {
cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR);
kvm_cpu_synchronize_state(cs);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
@@ -3179,10 +3184,13 @@ static int kvm_handle_halt(X86CPU *cpu)
{
CPUState *cs = CPU(cpu);
CPUX86State *env = &cpu->env;
+ uint32_t interrupt_request;
+
+ interrupt_request = cpu_interrupt_request(cs);
- if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) &&
- !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+ !(interrupt_request & CPU_INTERRUPT_NMI)) {
cpu_halted_set(cs, 1);
return EXCP_HLT;
}
Signed-off-by: Emilio G. Cota <cota@braap.org> --- target/i386/kvm.c | 54 +++++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 23 deletions(-)