diff mbox series

[3/3] xen/x86: Rename and simplify async_event_* infrastructure

Message ID 20200217111740.7298-4-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show
Series xen: async_exception_* cleanup | expand

Commit Message

Andrew Cooper Feb. 17, 2020, 11:17 a.m. UTC
The name async_exception isn't appropriate.  NMI isn't an exception at all,
and while MCE is classified as an exception (i.e. can occur at any point), the
mechanics of injecting it behave like other external interrupts.  Rename to
async_event_* which is a little shorter.

Drop VCPU_TRAP_NONE and renumber VCPU_TRAP_* to be 0-based, rather than
1-based, and remove async_exception_state() which hides the fixup internally.
This shifts the bits used in async_event_mask along by one, but doesn't alter
the overall logic.

Drop the {nmi,mce}_{state,pending} defines which obfuscate the data layout.
Instead, use an anonymous union to overlay names on the async_event[] array,
to retain the easy-to-follow v->arch.{nmi,mce}_pending logic.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wl@xen.org>
CC: Roger Pau Monné <roger.pau@citrix.com>
---
 xen/arch/x86/domain.c              |  5 ++---
 xen/arch/x86/nmi.c                 |  2 +-
 xen/arch/x86/pv/iret.c             | 15 +++++++--------
 xen/arch/x86/x86_64/asm-offsets.c  |  6 +++---
 xen/arch/x86/x86_64/compat/entry.S | 12 ++++++------
 xen/arch/x86/x86_64/entry.S        | 12 ++++++------
 xen/include/asm-x86/domain.h       | 33 ++++++++++++++++-----------------
 7 files changed, 41 insertions(+), 44 deletions(-)

Comments

Jan Beulich Feb. 18, 2020, 4:31 p.m. UTC | #1
On 17.02.2020 12:17, Andrew Cooper wrote:
> --- a/xen/arch/x86/pv/iret.c
> +++ b/xen/arch/x86/pv/iret.c
> @@ -27,15 +27,15 @@ static void async_exception_cleanup(struct vcpu *curr)
>  {
>      unsigned int trap;
>  
> -    if ( !curr->arch.async_exception_mask )
> +    if ( !curr->arch.async_event_mask )
>          return;
>  
> -    if ( !(curr->arch.async_exception_mask & (curr->arch.async_exception_mask - 1)) )
> -        trap = __scanbit(curr->arch.async_exception_mask, VCPU_TRAP_NONE);
> +    if ( !(curr->arch.async_event_mask & (curr->arch.async_event_mask - 1)) )
> +        trap = __scanbit(curr->arch.async_event_mask, 0);

The transformation just by itself is clearly not "no functional
change"; it is together with the prior if(), but it took me a
little to convince myself. I don't recall why VCPU_TRAP_NONE was
used here originally (possibly just because of it being zero),
but I think the latest now it would be better to use
VCPU_TRAP_LAST + 1 instead, as 0 now has an actual meaning.

> @@ -557,12 +546,22 @@ struct arch_vcpu
>  
>      struct vpmu_struct vpmu;
>  
> -    struct {
> -        bool    pending;
> -        uint8_t old_mask;
> -    } async_exception_state[VCPU_TRAP_LAST];
> -#define async_exception_state(t) async_exception_state[(t)-1]
> -    uint8_t async_exception_mask;
> +    union {
> +#define VCPU_TRAP_NMI          0
> +#define VCPU_TRAP_MCE          1
> +#define VCPU_TRAP_LAST         VCPU_TRAP_MCE
> +        struct {
> +            bool    pending;
> +            uint8_t old_mask;
> +        } async_event[VCPU_TRAP_LAST + 1];
> +        struct {
> +            bool    nmi_pending;
> +            uint8_t nmi_old_mask;
> +            bool    mce_pending;
> +            uint8_t mce_old_mask;
> +        };
> +    };

How about

    union {
#define VCPU_TRAP_NMI          0
#define VCPU_TRAP_MCE          1
#define VCPU_TRAP_LAST         VCPU_TRAP_MCE
        struct async_event_state {
            bool    pending;
            uint8_t old_mask;
        } async_event[VCPU_TRAP_LAST + 1];
        struct {
            struct async_event_state nmi;
            struct async_event_state mce;
        };
    };

(structure tag subject to improvement)?

Jan
Andrew Cooper Feb. 20, 2020, 6:24 p.m. UTC | #2
On 18/02/2020 16:31, Jan Beulich wrote:
> On 17.02.2020 12:17, Andrew Cooper wrote:
>> --- a/xen/arch/x86/pv/iret.c
>> +++ b/xen/arch/x86/pv/iret.c
>> @@ -27,15 +27,15 @@ static void async_exception_cleanup(struct vcpu *curr)
>>  {
>>      unsigned int trap;
>>  
>> -    if ( !curr->arch.async_exception_mask )
>> +    if ( !curr->arch.async_event_mask )
>>          return;
>>  
>> -    if ( !(curr->arch.async_exception_mask & (curr->arch.async_exception_mask - 1)) )
>> -        trap = __scanbit(curr->arch.async_exception_mask, VCPU_TRAP_NONE);
>> +    if ( !(curr->arch.async_event_mask & (curr->arch.async_event_mask - 1)) )
>> +        trap = __scanbit(curr->arch.async_event_mask, 0);
> The transformation just by itself is clearly not "no functional
> change"; it is together with the prior if(), but it took me a
> little to convince myself.

Well... It is no functional change, even if the fact isn't terribly obvious.

> I don't recall why VCPU_TRAP_NONE was
> used here originally (possibly just because of it being zero),
> but I think the latest now it would be better to use
> VCPU_TRAP_LAST + 1 instead, as 0 now has an actual meaning.

Half poking out of context is:

    if ( unlikely(trap > VCPU_TRAP_LAST) )
    {
        ASSERT_UNREACHABLE();
        return;
    }

which would trigger on such an error path.  That said, the following:

    /* Restore previous asynchronous exception mask. */
    curr->arch.async_event_mask = curr->arch.async_event[trap].old_mask;

will just pick the NMI old_mask in the case of something going wrong.


I deliberately made no adjustment here.  I intend to replace it with
something which is correctly, rather than spending time trying to figure
out how some clearly broken logic was intended to "work".

>
>> @@ -557,12 +546,22 @@ struct arch_vcpu
>>  
>>      struct vpmu_struct vpmu;
>>  
>> -    struct {
>> -        bool    pending;
>> -        uint8_t old_mask;
>> -    } async_exception_state[VCPU_TRAP_LAST];
>> -#define async_exception_state(t) async_exception_state[(t)-1]
>> -    uint8_t async_exception_mask;
>> +    union {
>> +#define VCPU_TRAP_NMI          0
>> +#define VCPU_TRAP_MCE          1
>> +#define VCPU_TRAP_LAST         VCPU_TRAP_MCE
>> +        struct {
>> +            bool    pending;
>> +            uint8_t old_mask;
>> +        } async_event[VCPU_TRAP_LAST + 1];
>> +        struct {
>> +            bool    nmi_pending;
>> +            uint8_t nmi_old_mask;
>> +            bool    mce_pending;
>> +            uint8_t mce_old_mask;
>> +        };
>> +    };
> How about
>
>     union {
> #define VCPU_TRAP_NMI          0
> #define VCPU_TRAP_MCE          1
> #define VCPU_TRAP_LAST         VCPU_TRAP_MCE
>         struct async_event_state {
>             bool    pending;
>             uint8_t old_mask;
>         } async_event[VCPU_TRAP_LAST + 1];
>         struct {
>             struct async_event_state nmi;
>             struct async_event_state mce;
>         };
>     };
>
> (structure tag subject to improvement)?

Can do.  I don't expect any of this to survive, but I also don't yet
have a clear idea what form the eventual solution is going to take.

~Andrew
diff mbox series

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index fe63c23676..7ee6853522 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1246,9 +1246,8 @@  int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
 
 int arch_vcpu_reset(struct vcpu *v)
 {
-    v->arch.async_exception_mask = 0;
-    memset(v->arch.async_exception_state, 0,
-           sizeof(v->arch.async_exception_state));
+    v->arch.async_event_mask = 0;
+    memset(v->arch.async_event, 0, sizeof(v->arch.async_event));
 
     if ( is_pv_vcpu(v) )
     {
diff --git a/xen/arch/x86/nmi.c b/xen/arch/x86/nmi.c
index 0390d9b0b4..44507cd86b 100644
--- a/xen/arch/x86/nmi.c
+++ b/xen/arch/x86/nmi.c
@@ -600,7 +600,7 @@  static void do_nmi_stats(unsigned char key)
         return;
 
     pend = v->arch.nmi_pending;
-    mask = v->arch.async_exception_mask & (1 << VCPU_TRAP_NMI);
+    mask = v->arch.async_event_mask & (1 << VCPU_TRAP_NMI);
     if ( pend || mask )
         printk("%pv: NMI%s%s\n",
                v, pend ? " pending" : "", mask ? " masked" : "");
diff --git a/xen/arch/x86/pv/iret.c b/xen/arch/x86/pv/iret.c
index 9e34b616f9..27bb39f162 100644
--- a/xen/arch/x86/pv/iret.c
+++ b/xen/arch/x86/pv/iret.c
@@ -27,15 +27,15 @@  static void async_exception_cleanup(struct vcpu *curr)
 {
     unsigned int trap;
 
-    if ( !curr->arch.async_exception_mask )
+    if ( !curr->arch.async_event_mask )
         return;
 
-    if ( !(curr->arch.async_exception_mask & (curr->arch.async_exception_mask - 1)) )
-        trap = __scanbit(curr->arch.async_exception_mask, VCPU_TRAP_NONE);
+    if ( !(curr->arch.async_event_mask & (curr->arch.async_event_mask - 1)) )
+        trap = __scanbit(curr->arch.async_event_mask, 0);
     else
-        for ( trap = VCPU_TRAP_NONE + 1; trap <= VCPU_TRAP_LAST; ++trap )
-            if ( (curr->arch.async_exception_mask ^
-                  curr->arch.async_exception_state(trap).old_mask) == (1u << trap) )
+        for ( trap = 0; trap <= VCPU_TRAP_LAST; ++trap )
+            if ( (curr->arch.async_event_mask ^
+                  curr->arch.async_event[trap].old_mask) == (1u << trap) )
                 break;
     if ( unlikely(trap > VCPU_TRAP_LAST) )
     {
@@ -44,8 +44,7 @@  static void async_exception_cleanup(struct vcpu *curr)
     }
 
     /* Restore previous asynchronous exception mask. */
-    curr->arch.async_exception_mask =
-        curr->arch.async_exception_state(trap).old_mask;
+    curr->arch.async_event_mask = curr->arch.async_event[trap].old_mask;
 }
 
 unsigned long do_iret(void)
diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
index b8e8510439..59b62649e2 100644
--- a/xen/arch/x86/x86_64/asm-offsets.c
+++ b/xen/arch/x86/x86_64/asm-offsets.c
@@ -74,9 +74,9 @@  void __dummy__(void)
     OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs);
     OFFSET(VCPU_nmi_pending, struct vcpu, arch.nmi_pending);
     OFFSET(VCPU_mce_pending, struct vcpu, arch.mce_pending);
-    OFFSET(VCPU_nmi_old_mask, struct vcpu, arch.nmi_state.old_mask);
-    OFFSET(VCPU_mce_old_mask, struct vcpu, arch.mce_state.old_mask);
-    OFFSET(VCPU_async_exception_mask, struct vcpu, arch.async_exception_mask);
+    OFFSET(VCPU_nmi_old_mask, struct vcpu, arch.nmi_old_mask);
+    OFFSET(VCPU_mce_old_mask, struct vcpu, arch.mce_old_mask);
+    OFFSET(VCPU_async_event_mask, struct vcpu, arch.async_event_mask);
     DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
     DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
     DEFINE(_VGCF_syscall_disables_events,  _VGCF_syscall_disables_events);
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
index 3cd375bd48..17b1153f79 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -84,33 +84,33 @@  compat_process_softirqs:
 	ALIGN
 /* %rbx: struct vcpu */
 compat_process_mce:
-        testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
+        testb $1 << VCPU_TRAP_MCE, VCPU_async_event_mask(%rbx)
         jnz   .Lcompat_test_guest_nmi
         sti
         movb  $0, VCPU_mce_pending(%rbx)
         call  set_guest_machinecheck_trapbounce
         test  %al, %al
         jz    compat_test_all_events
-        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+        movzbl VCPU_async_event_mask(%rbx), %edx    # save mask for the
         movb %dl,VCPU_mce_old_mask(%rbx)            # iret hypercall
         orl  $1 << VCPU_TRAP_MCE,%edx
-        movb %dl,VCPU_async_exception_mask(%rbx)
+        movb %dl, VCPU_async_event_mask(%rbx)
         jmp   compat_process_trap
 
 	ALIGN
 /* %rbx: struct vcpu */
 compat_process_nmi:
-        testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
+        testb $1 << VCPU_TRAP_NMI, VCPU_async_event_mask(%rbx)
         jnz   compat_test_guest_events
         sti
         movb  $0, VCPU_nmi_pending(%rbx)
         call  set_guest_nmi_trapbounce
         test  %al, %al
         jz    compat_test_all_events
-        movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+        movzbl VCPU_async_event_mask(%rbx), %edx    # save mask for the
         movb %dl,VCPU_nmi_old_mask(%rbx)            # iret hypercall
         orl  $1 << VCPU_TRAP_NMI,%edx
-        movb %dl,VCPU_async_exception_mask(%rbx)
+        movb %dl, VCPU_async_event_mask(%rbx)
         /* FALLTHROUGH */
 compat_process_trap:
         leaq  VCPU_trap_bounce(%rbx),%rdx
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 997c481ecb..7832ff6fda 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -96,33 +96,33 @@  process_softirqs:
         ALIGN
 /* %rbx: struct vcpu */
 process_mce:
-        testb $1 << VCPU_TRAP_MCE, VCPU_async_exception_mask(%rbx)
+        testb $1 << VCPU_TRAP_MCE, VCPU_async_event_mask(%rbx)
         jnz  .Ltest_guest_nmi
         sti
         movb $0, VCPU_mce_pending(%rbx)
         call set_guest_machinecheck_trapbounce
         test %al, %al
         jz   test_all_events
-        movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
+        movzbl VCPU_async_event_mask(%rbx), %edx     # save mask for the
         movb %dl, VCPU_mce_old_mask(%rbx)            # iret hypercall
         orl  $1 << VCPU_TRAP_MCE, %edx
-        movb %dl, VCPU_async_exception_mask(%rbx)
+        movb %dl, VCPU_async_event_mask(%rbx)
         jmp  process_trap
 
         ALIGN
 /* %rbx: struct vcpu */
 process_nmi:
-        testb $1 << VCPU_TRAP_NMI, VCPU_async_exception_mask(%rbx)
+        testb $1 << VCPU_TRAP_NMI, VCPU_async_event_mask(%rbx)
         jnz  test_guest_events
         sti
         movb $0, VCPU_nmi_pending(%rbx)
         call set_guest_nmi_trapbounce
         test %al, %al
         jz   test_all_events
-        movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
+        movzbl VCPU_async_event_mask(%rbx), %edx     # save mask for the
         movb %dl, VCPU_nmi_old_mask(%rbx)            # iret hypercall
         orl  $1 << VCPU_TRAP_NMI, %edx
-        movb %dl, VCPU_async_exception_mask(%rbx)
+        movb %dl, VCPU_async_event_mask(%rbx)
         /* FALLTHROUGH */
 process_trap:
         leaq VCPU_trap_bounce(%rbx), %rdx
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 105adf96eb..dd056360b4 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -19,17 +19,6 @@ 
 #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
 #define is_domain_direct_mapped(d) ((void)(d), 0)
 
-#define VCPU_TRAP_NONE         0
-#define VCPU_TRAP_NMI          1
-#define VCPU_TRAP_MCE          2
-#define VCPU_TRAP_LAST         VCPU_TRAP_MCE
-
-#define nmi_state              async_exception_state(VCPU_TRAP_NMI)
-#define mce_state              async_exception_state(VCPU_TRAP_MCE)
-
-#define nmi_pending            nmi_state.pending
-#define mce_pending            mce_state.pending
-
 struct trap_bounce {
     uint32_t      error_code;
     uint8_t       flags; /* TBF_ */
@@ -557,12 +546,22 @@  struct arch_vcpu
 
     struct vpmu_struct vpmu;
 
-    struct {
-        bool    pending;
-        uint8_t old_mask;
-    } async_exception_state[VCPU_TRAP_LAST];
-#define async_exception_state(t) async_exception_state[(t)-1]
-    uint8_t async_exception_mask;
+    union {
+#define VCPU_TRAP_NMI          0
+#define VCPU_TRAP_MCE          1
+#define VCPU_TRAP_LAST         VCPU_TRAP_MCE
+        struct {
+            bool    pending;
+            uint8_t old_mask;
+        } async_event[VCPU_TRAP_LAST + 1];
+        struct {
+            bool    nmi_pending;
+            uint8_t nmi_old_mask;
+            bool    mce_pending;
+            uint8_t mce_old_mask;
+        };
+    };
+    uint8_t async_event_mask;
 
     /* Virtual Machine Extensions */
     union {