@@ -1246,9 +1246,8 @@ int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
int arch_vcpu_reset(struct vcpu *v)
{
- v->arch.async_exception_mask = 0;
- memset(v->arch.async_exception_state, 0,
- sizeof(v->arch.async_exception_state));
+ v->arch.async_event_mask = 0;
+ memset(v->arch.async_event, 0, sizeof(v->arch.async_event));
if ( is_pv_vcpu(v) )
{
@@ -600,7 +600,7 @@ static void do_nmi_stats(unsigned char key)
return;
pend = v->arch.nmi_pending;
- mask = v->arch.async_exception_mask & (1 << VCPU_TRAP_NMI);
+ mask = v->arch.async_event_mask & (1 << VCPU_TRAP_NMI);
if ( pend || mask )
printk("%pv: NMI%s%s\n",
v, pend ? " pending" : "", mask ? " masked" : "");
@@ -27,15 +27,15 @@ static void async_exception_cleanup(struct vcpu *curr)
{
unsigned int trap;
- if ( !curr->arch.async_exception_mask )
+ if ( !curr->arch.async_event_mask )
return;
- if ( !(curr->arch.async_exception_mask & (curr->arch.async_exception_mask - 1)) )
- trap = __scanbit(curr->arch.async_exception_mask, VCPU_TRAP_NONE);
+ if ( !(curr->arch.async_event_mask & (curr->arch.async_event_mask - 1)) )
+ trap = __scanbit(curr->arch.async_event_mask, 0);
else
- for ( trap = VCPU_TRAP_NONE + 1; trap <= VCPU_TRAP_LAST; ++trap )
- if ( (curr->arch.async_exception_mask ^
- curr->arch.async_exception_state(trap).old_mask) == (1u << trap) )
+ for ( trap = 0; trap <= VCPU_TRAP_LAST; ++trap )
+ if ( (curr->arch.async_event_mask ^
+ curr->arch.async_event[trap].old_mask) == (1u << trap) )
break;
if ( unlikely(trap > VCPU_TRAP_LAST) )
{
@@ -44,8 +44,7 @@ static void async_exception_cleanup(struct vcpu *curr)
}
/* Restore previous asynchronous exception mask. */
- curr->arch.async_exception_mask =
- curr->arch.async_exception_state(trap).old_mask;
+ curr->arch.async_event_mask = curr->arch.async_event[trap].old_mask;
}
unsigned long do_iret(void)
@@ -74,9 +74,9 @@ void __dummy__(void)
OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs);
OFFSET(VCPU_nmi_pending, struct vcpu, arch.nmi_pending);
OFFSET(VCPU_mce_pending, struct vcpu, arch.mce_pending);
- OFFSET(VCPU_nmi_old_mask, struct vcpu, arch.nmi_state.old_mask);
- OFFSET(VCPU_mce_old_mask, struct vcpu, arch.mce_state.old_mask);
- OFFSET(VCPU_async_exception_mask, struct vcpu, arch.async_exception_mask);
+ OFFSET(VCPU_nmi_old_mask, struct vcpu, arch.nmi_old_mask);
+ OFFSET(VCPU_mce_old_mask, struct vcpu, arch.mce_old_mask);
+ OFFSET(VCPU_async_event_mask, struct vcpu, arch.async_event_mask);
DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events);
@@ -84,33 +84,33 @@ compat_process_softirqs:
ALIGN
/* %rbx: struct vcpu */
compat_process_mce:
- testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
+ testb $1 << VCPU_TRAP_MCE, VCPU_async_event_mask(%rbx)
jnz .Lcompat_test_guest_nmi
sti
movb $0, VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
test %al, %al
jz compat_test_all_events
- movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+ movzbl VCPU_async_event_mask(%rbx), %edx # save mask for the
movb %dl,VCPU_mce_old_mask(%rbx) # iret hypercall
orl $1 << VCPU_TRAP_MCE,%edx
- movb %dl,VCPU_async_exception_mask(%rbx)
+ movb %dl, VCPU_async_event_mask(%rbx)
jmp compat_process_trap
ALIGN
/* %rbx: struct vcpu */
compat_process_nmi:
- testb $1 << VCPU_TRAP_NMI,VCPU_async_exception_mask(%rbx)
+ testb $1 << VCPU_TRAP_NMI, VCPU_async_event_mask(%rbx)
jnz compat_test_guest_events
sti
movb $0, VCPU_nmi_pending(%rbx)
call set_guest_nmi_trapbounce
test %al, %al
jz compat_test_all_events
- movzbl VCPU_async_exception_mask(%rbx),%edx # save mask for the
+ movzbl VCPU_async_event_mask(%rbx), %edx # save mask for the
movb %dl,VCPU_nmi_old_mask(%rbx) # iret hypercall
orl $1 << VCPU_TRAP_NMI,%edx
- movb %dl,VCPU_async_exception_mask(%rbx)
+ movb %dl, VCPU_async_event_mask(%rbx)
/* FALLTHROUGH */
compat_process_trap:
leaq VCPU_trap_bounce(%rbx),%rdx
@@ -96,33 +96,33 @@ process_softirqs:
ALIGN
/* %rbx: struct vcpu */
process_mce:
- testb $1 << VCPU_TRAP_MCE, VCPU_async_exception_mask(%rbx)
+ testb $1 << VCPU_TRAP_MCE, VCPU_async_event_mask(%rbx)
jnz .Ltest_guest_nmi
sti
movb $0, VCPU_mce_pending(%rbx)
call set_guest_machinecheck_trapbounce
test %al, %al
jz test_all_events
- movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
+ movzbl VCPU_async_event_mask(%rbx), %edx # save mask for the
movb %dl, VCPU_mce_old_mask(%rbx) # iret hypercall
orl $1 << VCPU_TRAP_MCE, %edx
- movb %dl, VCPU_async_exception_mask(%rbx)
+ movb %dl, VCPU_async_event_mask(%rbx)
jmp process_trap
ALIGN
/* %rbx: struct vcpu */
process_nmi:
- testb $1 << VCPU_TRAP_NMI, VCPU_async_exception_mask(%rbx)
+ testb $1 << VCPU_TRAP_NMI, VCPU_async_event_mask(%rbx)
jnz test_guest_events
sti
movb $0, VCPU_nmi_pending(%rbx)
call set_guest_nmi_trapbounce
test %al, %al
jz test_all_events
- movzbl VCPU_async_exception_mask(%rbx), %edx # save mask for the
+ movzbl VCPU_async_event_mask(%rbx), %edx # save mask for the
movb %dl, VCPU_nmi_old_mask(%rbx) # iret hypercall
orl $1 << VCPU_TRAP_NMI, %edx
- movb %dl, VCPU_async_exception_mask(%rbx)
+ movb %dl, VCPU_async_event_mask(%rbx)
/* FALLTHROUGH */
process_trap:
leaq VCPU_trap_bounce(%rbx), %rdx
@@ -19,17 +19,6 @@
#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
#define is_domain_direct_mapped(d) ((void)(d), 0)
-#define VCPU_TRAP_NONE 0
-#define VCPU_TRAP_NMI 1
-#define VCPU_TRAP_MCE 2
-#define VCPU_TRAP_LAST VCPU_TRAP_MCE
-
-#define nmi_state async_exception_state(VCPU_TRAP_NMI)
-#define mce_state async_exception_state(VCPU_TRAP_MCE)
-
-#define nmi_pending nmi_state.pending
-#define mce_pending mce_state.pending
-
struct trap_bounce {
uint32_t error_code;
uint8_t flags; /* TBF_ */
@@ -557,12 +546,22 @@ struct arch_vcpu
struct vpmu_struct vpmu;
- struct {
- bool pending;
- uint8_t old_mask;
- } async_exception_state[VCPU_TRAP_LAST];
-#define async_exception_state(t) async_exception_state[(t)-1]
- uint8_t async_exception_mask;
+ union {
+#define VCPU_TRAP_NMI 0
+#define VCPU_TRAP_MCE 1
+#define VCPU_TRAP_LAST VCPU_TRAP_MCE
+ struct {
+ bool pending;
+ uint8_t old_mask;
+ } async_event[VCPU_TRAP_LAST + 1];
+ struct {
+ bool nmi_pending;
+ uint8_t nmi_old_mask;
+ bool mce_pending;
+ uint8_t mce_old_mask;
+ };
+ };
+ uint8_t async_event_mask;
/* Virtual Machine Extensions */
union {
The name async_exception isn't appropriate. NMI isn't an exception at all, and while MCE is classified as an exception (i.e. can occur at any point), the mechanics of injecting it behave like other external interrupts. Rename to async_event_* which is a little shorter. Drop VCPU_TRAP_NONE and renumber VCPU_TRAP_* to be 0-based, rather than 1-based, and remove async_exception_state() which hides the fixup internally. This shifts the bits used in async_event_mask along by one, but doesn't alter the overall logic. Drop the {nmi,mce}_{state,pending} defines which obfuscate the data layout. Instead, use an anonymous union to overlay names on the async_event[] array, to retain the easy-to-follow v->arch.{nmi,mce}_pending logic. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Wei Liu <wl@xen.org> CC: Roger Pau Monné <roger.pau@citrix.com> --- xen/arch/x86/domain.c | 5 ++--- xen/arch/x86/nmi.c | 2 +- xen/arch/x86/pv/iret.c | 15 +++++++-------- xen/arch/x86/x86_64/asm-offsets.c | 6 +++--- xen/arch/x86/x86_64/compat/entry.S | 12 ++++++------ xen/arch/x86/x86_64/entry.S | 12 ++++++------ xen/include/asm-x86/domain.h | 33 ++++++++++++++++----------------- 7 files changed, 41 insertions(+), 44 deletions(-)