diff mbox series

[11/35] target/i386: access cpu->interrupt_request with atomics

Message ID 20180917163103.6113-12-cota@braap.org (mailing list archive)
State New, archived
Headers show
Series exec: drop BQL from interrupt handling | expand

Commit Message

Emilio Cota Sept. 17, 2018, 4:30 p.m. UTC
From: Paolo Bonzini <pbonzini@redhat.com>

Cc: Richard Henderson <rth@twiddle.net>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: kvm@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Emilio G. Cota <cota@braap.org>
---
 target/i386/cpu.c        |  7 ++++---
 target/i386/helper.c     |  4 ++--
 target/i386/kvm.c        | 44 +++++++++++++++++++++++-----------------
 target/i386/svm_helper.c |  4 ++--
 4 files changed, 33 insertions(+), 26 deletions(-)

Comments

Richard Henderson Sept. 18, 2018, 9:04 p.m. UTC | #1
On 9/17/18 9:30 AM, Emilio G. Cota wrote:
>      cpu_reset(cs);
> -    cs->interrupt_request = sipi;
> +    atomic_mb_set(&cs->interrupt_request, sipi);
>      memcpy(&env->start_init_save, &save->start_init_save,

Why does this need a memory barrier?

Anyway, I think a bare mechanical conversion would be best
for the first patch and then extra barriers added separately
and with a description of why.


r~
Emilio Cota Sept. 19, 2018, 3:02 p.m. UTC | #2
On Tue, Sep 18, 2018 at 14:04:35 -0700, Richard Henderson wrote:
> On 9/17/18 9:30 AM, Emilio G. Cota wrote:
> >      cpu_reset(cs);
> > -    cs->interrupt_request = sipi;
> > +    atomic_mb_set(&cs->interrupt_request, sipi);
> >      memcpy(&env->start_init_save, &save->start_init_save,
> 
> Why does this need a memory barrier?
>
> Anyway, I think a bare mechanical conversion would be best
> for the first patch and then extra barriers added separately
> and with a description of why.

Almost no corresponding read has a barrier so it's hard to
justify this one. I'll drop it.

Thanks,

		Emilio
diff mbox series

Patch

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index f24295e6e4..f98e6e4318 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5433,15 +5433,16 @@  static bool x86_cpu_has_work(CPUState *cs)
 {
     X86CPU *cpu = X86_CPU(cs);
     CPUX86State *env = &cpu->env;
+    int interrupt_request = atomic_read(&cs->interrupt_request);
 
-    return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
+    return ((interrupt_request & (CPU_INTERRUPT_HARD |
                                       CPU_INTERRUPT_POLL)) &&
             (env->eflags & IF_MASK)) ||
-           (cs->interrupt_request & (CPU_INTERRUPT_NMI |
+           (interrupt_request & (CPU_INTERRUPT_NMI |
                                      CPU_INTERRUPT_INIT |
                                      CPU_INTERRUPT_SIPI |
                                      CPU_INTERRUPT_MCE)) ||
-           ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
+           ((interrupt_request & CPU_INTERRUPT_SMI) &&
             !(env->hflags & HF_SMM_MASK));
 }
 
diff --git a/target/i386/helper.c b/target/i386/helper.c
index e695f8ba7a..ee9f24d853 100644
--- a/target/i386/helper.c
+++ b/target/i386/helper.c
@@ -1035,12 +1035,12 @@  void do_cpu_init(X86CPU *cpu)
     CPUState *cs = CPU(cpu);
     CPUX86State *env = &cpu->env;
     CPUX86State *save = g_new(CPUX86State, 1);
-    int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
+    int sipi = atomic_read(&cs->interrupt_request) & CPU_INTERRUPT_SIPI;
 
     *save = *env;
 
     cpu_reset(cs);
-    cs->interrupt_request = sipi;
+    atomic_mb_set(&cs->interrupt_request, sipi);
     memcpy(&env->start_init_save, &save->start_init_save,
            offsetof(CPUX86State, end_init_save) -
            offsetof(CPUX86State, start_init_save));
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 5dd66809b0..e40c8d5461 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -2707,8 +2707,10 @@  static int kvm_put_vcpu_events(X86CPU *cpu, int level)
             /* As soon as these are moved to the kernel, remove them
              * from cs->interrupt_request.
              */
-            events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI;
-            events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT;
+            uint32_t interrupt_request = atomic_read(&cs->interrupt_request);
+
+            events.smi.pending = interrupt_request & CPU_INTERRUPT_SMI;
+            events.smi.latched_init = interrupt_request & CPU_INTERRUPT_INIT;
             cpu_reset_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI);
         } else {
             /* Keep these in cs->interrupt_request.  */
@@ -2999,11 +3001,12 @@  void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
 {
     X86CPU *x86_cpu = X86_CPU(cpu);
     CPUX86State *env = &x86_cpu->env;
+    int interrupt_request = atomic_read(&cpu->interrupt_request);
     int ret;
 
     /* Inject NMI */
-    if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
-        if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+    if (interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) {
+        if (interrupt_request & CPU_INTERRUPT_NMI) {
             qemu_mutex_lock_iothread();
             cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI);
             qemu_mutex_unlock_iothread();
@@ -3014,7 +3017,7 @@  void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
                         strerror(-ret));
             }
         }
-        if (cpu->interrupt_request & CPU_INTERRUPT_SMI) {
+        if (atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_SMI) {
             qemu_mutex_lock_iothread();
             cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI);
             qemu_mutex_unlock_iothread();
@@ -3035,12 +3038,12 @@  void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
      * or (for userspace APIC, but it is cheap to combine the checks here)
      * pending TPR access reports.
      */
-    if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
-        if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) &&
+    if (interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) {
+        if ((interrupt_request & CPU_INTERRUPT_INIT) &&
             !(env->hflags & HF_SMM_MASK)) {
             cpu->exit_request = 1;
         }
-        if (cpu->interrupt_request & CPU_INTERRUPT_TPR) {
+        if (interrupt_request & CPU_INTERRUPT_TPR) {
             cpu->exit_request = 1;
         }
     }
@@ -3048,11 +3051,12 @@  void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
     if (!kvm_pic_in_kernel()) {
         /* Try to inject an interrupt if the guest can accept it */
         if (run->ready_for_interrupt_injection &&
-            (cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+            (interrupt_request & CPU_INTERRUPT_HARD) &&
             (env->eflags & IF_MASK)) {
             int irq;
 
             cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD);
+            interrupt_request &= ~CPU_INTERRUPT_HARD;
             irq = cpu_get_pic_interrupt(env);
             if (irq >= 0) {
                 struct kvm_interrupt intr;
@@ -3072,7 +3076,7 @@  void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
          * interrupt, request an interrupt window exit.  This will
          * cause a return to userspace as soon as the guest is ready to
          * receive interrupts. */
-        if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) {
+        if (interrupt_request & CPU_INTERRUPT_HARD) {
             run->request_interrupt_window = 1;
         } else {
             run->request_interrupt_window = 0;
@@ -3118,8 +3122,9 @@  int kvm_arch_process_async_events(CPUState *cs)
 {
     X86CPU *cpu = X86_CPU(cs);
     CPUX86State *env = &cpu->env;
+    int interrupt_request = atomic_read(&cs->interrupt_request);
 
-    if (cs->interrupt_request & CPU_INTERRUPT_MCE) {
+    if (interrupt_request & CPU_INTERRUPT_MCE) {
         /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */
         assert(env->mcg_cap);
 
@@ -3142,7 +3147,7 @@  int kvm_arch_process_async_events(CPUState *cs)
         }
     }
 
-    if ((cs->interrupt_request & CPU_INTERRUPT_INIT) &&
+    if ((interrupt_request & CPU_INTERRUPT_INIT) &&
         !(env->hflags & HF_SMM_MASK)) {
         kvm_cpu_synchronize_state(cs);
         do_cpu_init(cpu);
@@ -3152,20 +3157,20 @@  int kvm_arch_process_async_events(CPUState *cs)
         return 0;
     }
 
-    if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
+    if (interrupt_request & CPU_INTERRUPT_POLL) {
         cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
         apic_poll_irq(cpu->apic_state);
     }
-    if (((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+    if (((interrupt_request & CPU_INTERRUPT_HARD) &&
          (env->eflags & IF_MASK)) ||
-        (cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+        (interrupt_request & CPU_INTERRUPT_NMI)) {
         cs->halted = 0;
     }
-    if (cs->interrupt_request & CPU_INTERRUPT_SIPI) {
+    if (interrupt_request & CPU_INTERRUPT_SIPI) {
         kvm_cpu_synchronize_state(cs);
         do_cpu_sipi(cpu);
     }
-    if (cs->interrupt_request & CPU_INTERRUPT_TPR) {
+    if (interrupt_request & CPU_INTERRUPT_TPR) {
         cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR);
         kvm_cpu_synchronize_state(cs);
         apic_handle_tpr_access_report(cpu->apic_state, env->eip,
@@ -3179,10 +3184,11 @@  static int kvm_handle_halt(X86CPU *cpu)
 {
     CPUState *cs = CPU(cpu);
     CPUX86State *env = &cpu->env;
+    int interrupt_request = atomic_read(&cs->interrupt_request);
 
-    if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
+    if (!((interrupt_request & CPU_INTERRUPT_HARD) &&
           (env->eflags & IF_MASK)) &&
-        !(cs->interrupt_request & CPU_INTERRUPT_NMI)) {
+        !(interrupt_request & CPU_INTERRUPT_NMI)) {
         cs->halted = 1;
         return EXCP_HLT;
     }
diff --git a/target/i386/svm_helper.c b/target/i386/svm_helper.c
index c532639574..e18e53c869 100644
--- a/target/i386/svm_helper.c
+++ b/target/i386/svm_helper.c
@@ -316,7 +316,7 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     if (int_ctl & V_IRQ_MASK) {
         CPUState *cs = CPU(x86_env_get_cpu(env));
 
-        cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
+        atomic_or(&cs->interrupt_request, CPU_INTERRUPT_VIRQ);
     }
 
     /* maybe we need to inject an event */
@@ -674,7 +674,7 @@  void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
     int_ctl |= env->v_tpr & V_TPR_MASK;
-    if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
+    if (atomic_read(&cs->interrupt_request) & CPU_INTERRUPT_VIRQ) {
         int_ctl |= V_IRQ_MASK;
     }
     x86_stl_phys(cs,