@@ -412,7 +412,7 @@ int inject_vmce(struct domain *d, int vcpu)
if ( (is_hvm_domain(d) ||
pv_trap_callback_registered(v, TRAP_machine_check)) &&
- !test_and_set_bool(v->mce_pending) )
+ !test_and_set_bool(v->arch.mce_pending) )
{
mce_printk(MCE_VERBOSE, "MCE: inject vMCE to %pv\n", v);
vcpu_kick(v);
@@ -329,7 +329,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
vlapic_set_irq(vlapic, vlapic_lvtpc & APIC_VECTOR_MASK, 0);
break;
case APIC_MODE_NMI:
- sampling->nmi_pending = 1;
+ sampling->arch.nmi_pending = true;
break;
}
#endif
@@ -1246,6 +1246,10 @@ int arch_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
int arch_vcpu_reset(struct vcpu *v)
{
+ v->arch.async_exception_mask = 0;
+ memset(v->arch.async_exception_state, 0,
+ sizeof(v->arch.async_exception_state));
+
if ( is_pv_vcpu(v) )
{
pv_destroy_gdt(v);
@@ -1264,6 +1268,14 @@ arch_do_vcpu_op(
switch ( cmd )
{
+ case VCPUOP_send_nmi:
+ if ( !guest_handle_is_null(arg) )
+ return -EINVAL;
+
+ if ( !test_and_set_bool(v->arch.nmi_pending) )
+ vcpu_kick(v);
+ break;
+
case VCPUOP_register_vcpu_time_memory_area:
{
struct vcpu_register_time_memory_area area;
@@ -614,7 +614,7 @@ long arch_do_domctl(
{
case XEN_DOMCTL_SENDTRIGGER_NMI:
ret = 0;
- if ( !test_and_set_bool(v->nmi_pending) )
+ if ( !test_and_set_bool(v->arch.nmi_pending) )
vcpu_kick(v);
break;
@@ -526,10 +526,10 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v)
*/
vlapic_sync_pir_to_irr(v);
- if ( unlikely(v->nmi_pending) )
+ if ( unlikely(v->arch.nmi_pending) )
return hvm_intack_nmi;
- if ( unlikely(v->mce_pending) )
+ if ( unlikely(v->arch.mce_pending) )
return hvm_intack_mce;
if ( (plat->irq->callback_via_type == HVMIRQ_callback_vector)
@@ -554,11 +554,11 @@ struct hvm_intack hvm_vcpu_ack_pending_irq(
switch ( intack.source )
{
case hvm_intsrc_nmi:
- if ( !test_and_clear_bool(v->nmi_pending) )
+ if ( !test_and_clear_bool(v->arch.nmi_pending) )
intack = hvm_intack_none;
break;
case hvm_intsrc_mce:
- if ( !test_and_clear_bool(v->mce_pending) )
+ if ( !test_and_clear_bool(v->arch.mce_pending) )
intack = hvm_intack_none;
break;
case hvm_intsrc_pic:
@@ -469,7 +469,7 @@ static void vioapic_deliver(struct hvm_vioapic *vioapic, unsigned int pin)
for_each_vcpu ( d, v )
if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
0, dest, dest_mode) &&
- !test_and_set_bool(v->nmi_pending) )
+ !test_and_set_bool(v->arch.nmi_pending) )
vcpu_kick(v);
break;
}
@@ -355,7 +355,7 @@ static void vlapic_accept_irq(struct vcpu *v, uint32_t icr_low)
break;
case APIC_DM_NMI:
- if ( !test_and_set_bool(v->nmi_pending) )
+ if ( !test_and_set_bool(v->arch.nmi_pending) )
{
bool_t wake = 0;
domain_lock(v->domain);
@@ -599,8 +599,8 @@ static void do_nmi_stats(unsigned char key)
!(v = hardware_domain->vcpu[0]) )
return;
- pend = v->nmi_pending;
- mask = v->async_exception_mask & (1 << VCPU_TRAP_NMI);
+ pend = v->arch.nmi_pending;
+ mask = v->arch.async_exception_mask & (1 << VCPU_TRAP_NMI);
if ( pend || mask )
printk("%pv: NMI%s%s\n",
v, pend ? " pending" : "", mask ? " masked" : "");
@@ -93,7 +93,7 @@ static int nmi_callback(const struct cpu_user_regs *regs, int cpu)
send_guest_vcpu_virq(current, VIRQ_XENOPROF);
if ( ovf == 2 )
- current->nmi_pending = 1;
+ current->arch.nmi_pending = true;
return 1;
}
@@ -52,7 +52,7 @@ static int register_guest_nmi_callback(unsigned long address)
* now.
*/
if ( curr->vcpu_id == 0 && arch_get_nmi_reason(d) != 0 )
- curr->nmi_pending = 1;
+ curr->arch.nmi_pending = true;
return 0;
}
@@ -27,15 +27,15 @@ static void async_exception_cleanup(struct vcpu *curr)
{
unsigned int trap;
- if ( !curr->async_exception_mask )
+ if ( !curr->arch.async_exception_mask )
return;
- if ( !(curr->async_exception_mask & (curr->async_exception_mask - 1)) )
- trap = __scanbit(curr->async_exception_mask, VCPU_TRAP_NONE);
+ if ( !(curr->arch.async_exception_mask & (curr->arch.async_exception_mask - 1)) )
+ trap = __scanbit(curr->arch.async_exception_mask, VCPU_TRAP_NONE);
else
for ( trap = VCPU_TRAP_NONE + 1; trap <= VCPU_TRAP_LAST; ++trap )
- if ( (curr->async_exception_mask ^
- curr->async_exception_state(trap).old_mask) == (1u << trap) )
+ if ( (curr->arch.async_exception_mask ^
+ curr->arch.async_exception_state(trap).old_mask) == (1u << trap) )
break;
if ( unlikely(trap > VCPU_TRAP_LAST) )
{
@@ -44,7 +44,8 @@ static void async_exception_cleanup(struct vcpu *curr)
}
/* Restore previous asynchronous exception mask. */
- curr->async_exception_mask = curr->async_exception_state(trap).old_mask;
+ curr->arch.async_exception_mask =
+ curr->arch.async_exception_state(trap).old_mask;
}
unsigned long do_iret(void)
@@ -176,7 +176,7 @@ int pv_raise_nmi(struct vcpu *v)
if ( cmpxchgptr(v_ptr, NULL, v) )
return -EBUSY;
- if ( !test_and_set_bool(v->nmi_pending) )
+ if ( !test_and_set_bool(v->arch.nmi_pending) )
{
/* Not safe to wake up a vcpu here */
raise_softirq(NMI_SOFTIRQ);
@@ -72,11 +72,11 @@ void __dummy__(void)
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.pv.vgc_flags);
OFFSET(VCPU_cr3, struct vcpu, arch.cr3);
OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs);
- OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
- OFFSET(VCPU_mce_pending, struct vcpu, mce_pending);
- OFFSET(VCPU_nmi_old_mask, struct vcpu, nmi_state.old_mask);
- OFFSET(VCPU_mce_old_mask, struct vcpu, mce_state.old_mask);
- OFFSET(VCPU_async_exception_mask, struct vcpu, async_exception_mask);
+ OFFSET(VCPU_nmi_pending, struct vcpu, arch.nmi_pending);
+ OFFSET(VCPU_mce_pending, struct vcpu, arch.mce_pending);
+ OFFSET(VCPU_nmi_old_mask, struct vcpu, arch.nmi_state.old_mask);
+ OFFSET(VCPU_mce_old_mask, struct vcpu, arch.mce_state.old_mask);
+ OFFSET(VCPU_async_exception_mask, struct vcpu, arch.async_exception_mask);
DEFINE(VCPU_TRAP_NMI, VCPU_TRAP_NMI);
DEFINE(VCPU_TRAP_MCE, VCPU_TRAP_MCE);
DEFINE(_VGCF_syscall_disables_events, _VGCF_syscall_disables_events);
@@ -1199,10 +1199,6 @@ int vcpu_reset(struct vcpu *v)
v->fpu_initialised = 0;
v->fpu_dirtied = 0;
v->is_initialised = 0;
-#ifdef VCPU_TRAP_LAST
- v->async_exception_mask = 0;
- memset(v->async_exception_state, 0, sizeof(v->async_exception_state));
-#endif
if ( v->affinity_broken & VCPU_AFFINITY_OVERRIDE )
vcpu_temporary_affinity(v, NR_CPUS, VCPU_AFFINITY_OVERRIDE);
if ( v->affinity_broken & VCPU_AFFINITY_WAIT )
@@ -1511,17 +1507,6 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
-#ifdef VCPU_TRAP_NMI
- case VCPUOP_send_nmi:
- if ( !guest_handle_is_null(arg) )
- return -EINVAL;
-
- if ( !test_and_set_bool(v->nmi_pending) )
- vcpu_kick(v);
-
- break;
-#endif
-
default:
rc = arch_do_vcpu_op(cmd, v, arg);
break;
@@ -19,6 +19,7 @@
#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
#define is_domain_direct_mapped(d) ((void)(d), 0)
+#define VCPU_TRAP_NONE 0
#define VCPU_TRAP_NMI 1
#define VCPU_TRAP_MCE 2
#define VCPU_TRAP_LAST VCPU_TRAP_MCE
@@ -556,6 +557,13 @@ struct arch_vcpu
struct vpmu_struct vpmu;
+ struct {
+ bool pending;
+ uint8_t old_mask;
+ } async_exception_state[VCPU_TRAP_LAST];
+#define async_exception_state(t) async_exception_state[(t)-1]
+ uint8_t async_exception_mask;
+
/* Virtual Machine Extensions */
union {
struct pv_vcpu pv;
@@ -191,17 +191,6 @@ struct vcpu
bool is_urgent;
/* VCPU must context_switch without scheduling unit. */
bool force_context_switch;
-
-#ifdef VCPU_TRAP_LAST
-#define VCPU_TRAP_NONE 0
- struct {
- bool pending;
- uint8_t old_mask;
- } async_exception_state[VCPU_TRAP_LAST];
-#define async_exception_state(t) async_exception_state[(t)-1]
- uint8_t async_exception_mask;
-#endif
-
/* Require shutdown to be deferred for some asynchronous operation? */
bool defer_shutdown;
/* VCPU is paused following shutdown request (d->is_shutting_down)? */
The async_exception_{state,mask} infrastructure is implemented in common code, but is limited to x86 because of the VCPU_TRAP_LAST ifdef-ary. The internals are very x86 specific (and even then, in need of correction), and won't be of interest to other architectures. Move it all into x86 specific code. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Wei Liu <wl@xen.org> CC: Roger Pau Monné <roger.pau@citrix.com> CC: Stefano Stabellini <sstabellini@kernel.org> CC: Julien Grall <julien@xen.org> CC: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com> --- xen/arch/x86/cpu/mcheck/vmce.c | 2 +- xen/arch/x86/cpu/vpmu.c | 2 +- xen/arch/x86/domain.c | 12 ++++++++++++ xen/arch/x86/domctl.c | 2 +- xen/arch/x86/hvm/irq.c | 8 ++++---- xen/arch/x86/hvm/vioapic.c | 2 +- xen/arch/x86/hvm/vlapic.c | 2 +- xen/arch/x86/nmi.c | 4 ++-- xen/arch/x86/oprofile/nmi_int.c | 2 +- xen/arch/x86/pv/callback.c | 2 +- xen/arch/x86/pv/iret.c | 13 +++++++------ xen/arch/x86/pv/traps.c | 2 +- xen/arch/x86/x86_64/asm-offsets.c | 10 +++++----- xen/common/domain.c | 15 --------------- xen/include/asm-x86/domain.h | 8 ++++++++ xen/include/xen/sched.h | 11 ----------- 16 files changed, 46 insertions(+), 51 deletions(-)