diff mbox series

[v2,1/2] xen/x86: cleanup unused NMI/MCE code

Message ID 20190723182530.24087-2-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series xen: enhance temporary vcpu pinning | expand

Commit Message

Jürgen Groß July 23, 2019, 6:25 p.m. UTC
pv_raise_interrupt() is only called for NMIs these days, so the MCE
specific part can be removed. Rename pv_raise_interrupt() to
pv_raise_nmi() and NMI_MCE_SOFTIRQ to NMI_SOFTIRQ.

Additionally there is no need to pin the vcpu the NMI is delivered
to, that is a leftover of (already removed) MCE handling. So remove
the pinning, too.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/arch/x86/pv/traps.c        | 88 ++++++++----------------------------------
 xen/arch/x86/traps.c           | 10 +----
 xen/common/domain.c            |  3 --
 xen/include/asm-x86/pv/traps.h |  8 ++--
 xen/include/asm-x86/softirq.h  |  2 +-
 xen/include/xen/sched.h        |  2 -
 6 files changed, 23 insertions(+), 90 deletions(-)

Comments

Andrew Cooper July 23, 2019, 6:48 p.m. UTC | #1
On 23/07/2019 19:25, Juergen Gross wrote:
> pv_raise_interrupt() is only called for NMIs these days, so the MCE
> specific part can be removed. Rename pv_raise_interrupt() to
> pv_raise_nmi() and NMI_MCE_SOFTIRQ to NMI_SOFTIRQ.

For posterity, it would be helpful to explicitly identify 355b0469a8
which introduced NMI and MCE pinning (where previously there was no NMI
pinning beforehand AFAICT), and then 3a91769d6e which removed the MCE
pinning.

Stated like that, I doubt the NMI pinning was ever relevant in practice.

>
> Additionally there is no need to pin the vcpu the NMI is delivered
> to, that is a leftover of (already removed) MCE handling. So remove
> the pinning, too.
>
> Signed-off-by: Juergen Gross <jgross@suse.com>

Everything LGTM.  A few trivial notes.

> diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
> index 1740784ff2..9436c80047 100644
> --- a/xen/arch/x86/pv/traps.c
> +++ b/xen/arch/x86/pv/traps.c
> @@ -136,47 +136,21 @@ bool set_guest_nmi_trapbounce(void)
>      return !null_trap_bounce(curr, tb);
>  }
>  
> -struct softirq_trap {
> -    struct domain *domain;   /* domain to inject trap */
> -    struct vcpu *vcpu;       /* vcpu to inject trap */
> -    unsigned int processor;  /* physical cpu to inject trap */
> -};
> +static DEFINE_PER_CPU(struct vcpu *, softirq_nmi_vcpu);
>  
> -static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
> -
> -static void nmi_mce_softirq(void)
> +static void nmi_softirq(void)
>  {
>      unsigned int cpu = smp_processor_id();
> -    struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
> -
> -    BUG_ON(st->vcpu == NULL);
> -
> -    /*
> -     * Set the tmp value unconditionally, so that the check in the iret
> -     * hypercall works.
> -     */
> -    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
> -                 st->vcpu->cpu_hard_affinity);
> +    struct vcpu **v_ptr = &per_cpu(softirq_nmi_vcpu, cpu);

There is only a single use of 'cpu' here, so you can drop that and use
this_cpu(softirq_nmi_vcpu) instead.

> diff --git a/xen/include/asm-x86/pv/traps.h b/xen/include/asm-x86/pv/traps.h
> index fcc75f5e9a..47d6cf5fc4 100644
> --- a/xen/include/asm-x86/pv/traps.h
> +++ b/xen/include/asm-x86/pv/traps.h
> @@ -27,8 +27,8 @@
>  
>  void pv_trap_init(void);
>  
> -/* Deliver interrupt to PV guest. Return 0 on success. */
> -int pv_raise_interrupt(struct vcpu *v, uint8_t vector);
> +/* Deliver NMI to PV guest. Return 0 on success. */
> +int pv_raise_nmi(struct vcpu *v);
>  
>  int pv_emulate_privileged_op(struct cpu_user_regs *regs);
>  void pv_emulate_gate_op(struct cpu_user_regs *regs);
> @@ -46,8 +46,8 @@ static inline bool pv_trap_callback_registered(const struct vcpu *v,
>  
>  static inline void pv_trap_init(void) {}
>  
> -/* Deliver interrupt to PV guest. Return 0 on success. */
> -static inline int pv_raise_interrupt(struct vcpu *v, uint8_t vector) { return -EOPNOTSUPP; }
> +/* Deliver NMI to PV guest. Return 0 on success. */
> +static inline int pv_raise_nmi(struct vcpu *v) { return -EOPNOTSUPP; }

I don't think duplicating the function description here is useful. 
Instead, I'd recommend dropping these lines, and commenting it once in
pv/traps.c.  That should include the fact that it is expected to be used
NMI context, which means its not safe to use printk() etc in there.

~Andrew
Jürgen Groß July 24, 2019, 5:06 a.m. UTC | #2
On 23.07.19 20:48, Andrew Cooper wrote:
> On 23/07/2019 19:25, Juergen Gross wrote:
>> pv_raise_interrupt() is only called for NMIs these days, so the MCE
>> specific part can be removed. Rename pv_raise_interrupt() to
>> pv_raise_nmi() and NMI_MCE_SOFTIRQ to NMI_SOFTIRQ.
> 
> For posterity, it would be helpful to explicitly identify 355b0469a8
> which introduced NMI and MCE pinning (where previously there was no NMI
> pinning beforehand AFAICT), and then 3a91769d6e which removed the MCE
> pinning.

Okay.

> 
> Stated like that, I doubt the NMI pinning was ever relevant in practice.

Indeed.

> 
>>
>> Additionally there is no need to pin the vcpu the NMI is delivered
>> to, that is a leftover of (already removed) MCE handling. So remove
>> the pinning, too.
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
> 
> Everything LGTM.  A few trivial notes.
> 
>> diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
>> index 1740784ff2..9436c80047 100644
>> --- a/xen/arch/x86/pv/traps.c
>> +++ b/xen/arch/x86/pv/traps.c
>> @@ -136,47 +136,21 @@ bool set_guest_nmi_trapbounce(void)
>>       return !null_trap_bounce(curr, tb);
>>   }
>>   
>> -struct softirq_trap {
>> -    struct domain *domain;   /* domain to inject trap */
>> -    struct vcpu *vcpu;       /* vcpu to inject trap */
>> -    unsigned int processor;  /* physical cpu to inject trap */
>> -};
>> +static DEFINE_PER_CPU(struct vcpu *, softirq_nmi_vcpu);
>>   
>> -static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
>> -
>> -static void nmi_mce_softirq(void)
>> +static void nmi_softirq(void)
>>   {
>>       unsigned int cpu = smp_processor_id();
>> -    struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
>> -
>> -    BUG_ON(st->vcpu == NULL);
>> -
>> -    /*
>> -     * Set the tmp value unconditionally, so that the check in the iret
>> -     * hypercall works.
>> -     */
>> -    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
>> -                 st->vcpu->cpu_hard_affinity);
>> +    struct vcpu **v_ptr = &per_cpu(softirq_nmi_vcpu, cpu);
> 
> There is only a single use of 'cpu' here, so you can drop that and use
> this_cpu(softirq_nmi_vcpu) instead.

Okay.

> 
>> diff --git a/xen/include/asm-x86/pv/traps.h b/xen/include/asm-x86/pv/traps.h
>> index fcc75f5e9a..47d6cf5fc4 100644
>> --- a/xen/include/asm-x86/pv/traps.h
>> +++ b/xen/include/asm-x86/pv/traps.h
>> @@ -27,8 +27,8 @@
>>   
>>   void pv_trap_init(void);
>>   
>> -/* Deliver interrupt to PV guest. Return 0 on success. */
>> -int pv_raise_interrupt(struct vcpu *v, uint8_t vector);
>> +/* Deliver NMI to PV guest. Return 0 on success. */
>> +int pv_raise_nmi(struct vcpu *v);
>>   
>>   int pv_emulate_privileged_op(struct cpu_user_regs *regs);
>>   void pv_emulate_gate_op(struct cpu_user_regs *regs);
>> @@ -46,8 +46,8 @@ static inline bool pv_trap_callback_registered(const struct vcpu *v,
>>   
>>   static inline void pv_trap_init(void) {}
>>   
>> -/* Deliver interrupt to PV guest. Return 0 on success. */
>> -static inline int pv_raise_interrupt(struct vcpu *v, uint8_t vector) { return -EOPNOTSUPP; }
>> +/* Deliver NMI to PV guest. Return 0 on success. */
>> +static inline int pv_raise_nmi(struct vcpu *v) { return -EOPNOTSUPP; }
> 
> I don't think duplicating the function description here is useful.
> Instead, I'd recommend dropping these lines, and commenting it once in
> pv/traps.c.  That should include the fact that it is expected to be used
> NMI context, which means its not safe to use printk() etc in there.

Will do.


Juergen
diff mbox series

Patch

diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index 1740784ff2..9436c80047 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -136,47 +136,21 @@  bool set_guest_nmi_trapbounce(void)
     return !null_trap_bounce(curr, tb);
 }
 
-struct softirq_trap {
-    struct domain *domain;   /* domain to inject trap */
-    struct vcpu *vcpu;       /* vcpu to inject trap */
-    unsigned int processor;  /* physical cpu to inject trap */
-};
+static DEFINE_PER_CPU(struct vcpu *, softirq_nmi_vcpu);
 
-static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
-
-static void nmi_mce_softirq(void)
+static void nmi_softirq(void)
 {
     unsigned int cpu = smp_processor_id();
-    struct softirq_trap *st = &per_cpu(softirq_trap, cpu);
-
-    BUG_ON(st->vcpu == NULL);
-
-    /*
-     * Set the tmp value unconditionally, so that the check in the iret
-     * hypercall works.
-     */
-    cpumask_copy(st->vcpu->cpu_hard_affinity_tmp,
-                 st->vcpu->cpu_hard_affinity);
+    struct vcpu **v_ptr = &per_cpu(softirq_nmi_vcpu, cpu);
 
-    if ( (cpu != st->processor) ||
-         (st->processor != st->vcpu->processor) )
-    {
-
-        /*
-         * We are on a different physical cpu.  Make sure to wakeup the vcpu on
-         * the specified processor.
-         */
-        vcpu_set_hard_affinity(st->vcpu, cpumask_of(st->processor));
-
-        /* Affinity is restored in the iret hypercall. */
-    }
+    BUG_ON(*v_ptr == NULL);
 
     /*
-     * Only used to defer wakeup of domain/vcpu to a safe (non-NMI/MCE)
+     * Only used to defer wakeup of domain/vcpu to a safe (non-NMI)
      * context.
      */
-    vcpu_kick(st->vcpu);
-    st->vcpu = NULL;
+    vcpu_kick(*v_ptr);
+    *v_ptr = NULL;
 }
 
 void __init pv_trap_init(void)
@@ -189,50 +163,22 @@  void __init pv_trap_init(void)
     _set_gate(idt_table + LEGACY_SYSCALL_VECTOR, SYS_DESC_trap_gate, 3,
               &int80_direct_trap);
 
-    open_softirq(NMI_MCE_SOFTIRQ, nmi_mce_softirq);
+    open_softirq(NMI_SOFTIRQ, nmi_softirq);
 }
 
-int pv_raise_interrupt(struct vcpu *v, uint8_t vector)
+int pv_raise_nmi(struct vcpu *v)
 {
-    struct softirq_trap *st = &per_cpu(softirq_trap, smp_processor_id());
+    struct vcpu **v_ptr = &per_cpu(softirq_nmi_vcpu, smp_processor_id());
 
-    switch ( vector )
+    if ( cmpxchgptr(v_ptr, NULL, v) )
+        return -EBUSY;
+    if ( !test_and_set_bool(v->nmi_pending) )
     {
-    case TRAP_nmi:
-        if ( cmpxchgptr(&st->vcpu, NULL, v) )
-            return -EBUSY;
-        if ( !test_and_set_bool(v->nmi_pending) )
-        {
-            st->domain = v->domain;
-            st->processor = v->processor;
-
-            /* Not safe to wake up a vcpu here */
-            raise_softirq(NMI_MCE_SOFTIRQ);
-            return 0;
-        }
-        st->vcpu = NULL;
-        break;
-
-    case TRAP_machine_check:
-        if ( cmpxchgptr(&st->vcpu, NULL, v) )
-            return -EBUSY;
-
-        /*
-         * We are called by the machine check (exception or polling) handlers
-         * on the physical CPU that reported a machine check error.
-         */
-        if ( !test_and_set_bool(v->mce_pending) )
-        {
-            st->domain = v->domain;
-            st->processor = v->processor;
-
-            /* not safe to wake up a vcpu here */
-            raise_softirq(NMI_MCE_SOFTIRQ);
-            return 0;
-        }
-        st->vcpu = NULL;
-        break;
+        /* Not safe to wake up a vcpu here */
+        raise_softirq(NMI_SOFTIRQ);
+        return 0;
     }
+    *v_ptr = NULL;
 
     /* Delivery failed */
     return -EIO;
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 25b4b47e5e..08d7edc568 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1600,14 +1600,6 @@  void async_exception_cleanup(struct vcpu *curr)
     if ( !curr->async_exception_mask )
         return;
 
-    /* Restore affinity.  */
-    if ( !cpumask_empty(curr->cpu_hard_affinity_tmp) &&
-         !cpumask_equal(curr->cpu_hard_affinity_tmp, curr->cpu_hard_affinity) )
-    {
-        vcpu_set_hard_affinity(curr, curr->cpu_hard_affinity_tmp);
-        cpumask_clear(curr->cpu_hard_affinity_tmp);
-    }
-
     if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
         trap = __scanbit(curr->async_exception_mask, VCPU_TRAP_NONE);
     else
@@ -1634,7 +1626,7 @@  static void nmi_hwdom_report(unsigned int reason_idx)
 
     set_bit(reason_idx, nmi_reason(d));
 
-    pv_raise_interrupt(d->vcpu[0], TRAP_nmi);
+    pv_raise_nmi(d->vcpu[0]);
 }
 
 static void pci_serr_error(const struct cpu_user_regs *regs)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 55aa759b75..bc56a51815 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -133,7 +133,6 @@  static void vcpu_info_reset(struct vcpu *v)
 static void vcpu_destroy(struct vcpu *v)
 {
     free_cpumask_var(v->cpu_hard_affinity);
-    free_cpumask_var(v->cpu_hard_affinity_tmp);
     free_cpumask_var(v->cpu_hard_affinity_saved);
     free_cpumask_var(v->cpu_soft_affinity);
 
@@ -161,7 +160,6 @@  struct vcpu *vcpu_create(
     grant_table_init_vcpu(v);
 
     if ( !zalloc_cpumask_var(&v->cpu_hard_affinity) ||
-         !zalloc_cpumask_var(&v->cpu_hard_affinity_tmp) ||
          !zalloc_cpumask_var(&v->cpu_hard_affinity_saved) ||
          !zalloc_cpumask_var(&v->cpu_soft_affinity) )
         goto fail;
@@ -1269,7 +1267,6 @@  int vcpu_reset(struct vcpu *v)
     v->async_exception_mask = 0;
     memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
 #endif
-    cpumask_clear(v->cpu_hard_affinity_tmp);
     clear_bit(_VPF_blocked, &v->pause_flags);
     clear_bit(_VPF_in_reset, &v->pause_flags);
 
diff --git a/xen/include/asm-x86/pv/traps.h b/xen/include/asm-x86/pv/traps.h
index fcc75f5e9a..47d6cf5fc4 100644
--- a/xen/include/asm-x86/pv/traps.h
+++ b/xen/include/asm-x86/pv/traps.h
@@ -27,8 +27,8 @@ 
 
 void pv_trap_init(void);
 
-/* Deliver interrupt to PV guest. Return 0 on success. */
-int pv_raise_interrupt(struct vcpu *v, uint8_t vector);
+/* Deliver NMI to PV guest. Return 0 on success. */
+int pv_raise_nmi(struct vcpu *v);
 
 int pv_emulate_privileged_op(struct cpu_user_regs *regs);
 void pv_emulate_gate_op(struct cpu_user_regs *regs);
@@ -46,8 +46,8 @@  static inline bool pv_trap_callback_registered(const struct vcpu *v,
 
 static inline void pv_trap_init(void) {}
 
-/* Deliver interrupt to PV guest. Return 0 on success. */
-static inline int pv_raise_interrupt(struct vcpu *v, uint8_t vector) { return -EOPNOTSUPP; }
+/* Deliver NMI to PV guest. Return 0 on success. */
+static inline int pv_raise_nmi(struct vcpu *v) { return -EOPNOTSUPP; }
 
 static inline int pv_emulate_privileged_op(struct cpu_user_regs *regs) { return 0; }
 static inline void pv_emulate_gate_op(struct cpu_user_regs *regs) {}
diff --git a/xen/include/asm-x86/softirq.h b/xen/include/asm-x86/softirq.h
index 5c1a7db566..0b7a77f11f 100644
--- a/xen/include/asm-x86/softirq.h
+++ b/xen/include/asm-x86/softirq.h
@@ -1,7 +1,7 @@ 
 #ifndef __ASM_SOFTIRQ_H__
 #define __ASM_SOFTIRQ_H__
 
-#define NMI_MCE_SOFTIRQ        (NR_COMMON_SOFTIRQS + 0)
+#define NMI_SOFTIRQ            (NR_COMMON_SOFTIRQS + 0)
 #define TIME_CALIBRATE_SOFTIRQ (NR_COMMON_SOFTIRQS + 1)
 #define VCPU_KICK_SOFTIRQ      (NR_COMMON_SOFTIRQS + 2)
 
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index b40c8fd138..c197e93d73 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -245,8 +245,6 @@  struct vcpu
 
     /* Bitmask of CPUs on which this VCPU may run. */
     cpumask_var_t    cpu_hard_affinity;
-    /* Used to change affinity temporarily. */
-    cpumask_var_t    cpu_hard_affinity_tmp;
     /* Used to restore affinity across S3. */
     cpumask_var_t    cpu_hard_affinity_saved;