@@ -1,3 +1,4 @@
+#include <assert.h>
#include <errno.h>
#include <limits.h>
#include <stdbool.h>
@@ -568,12 +568,9 @@ static int hvmemul_virtual_to_linear(
return X86EMUL_UNHANDLEABLE;
/* This is a singleton operation: fail it with an exception. */
- hvmemul_ctxt->exn_pending = 1;
- hvmemul_ctxt->trap.vector =
- (seg == x86_seg_ss) ? TRAP_stack_error : TRAP_gp_fault;
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_HW_EXCEPTION;
- hvmemul_ctxt->trap.error_code = 0;
- hvmemul_ctxt->trap.insn_len = 0;
+ x86_emul_hw_exception((seg == x86_seg_ss)
+ ? TRAP_stack_error
+ : TRAP_gp_fault, 0, &hvmemul_ctxt->ctxt);
return X86EMUL_EXCEPTION;
}
@@ -1562,59 +1559,6 @@ int hvmemul_cpuid(
return X86EMUL_OKAY;
}
-static int hvmemul_inject_hw_exception(
- uint8_t vector,
- int32_t error_code,
- struct x86_emulate_ctxt *ctxt)
-{
- struct hvm_emulate_ctxt *hvmemul_ctxt =
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-
- hvmemul_ctxt->exn_pending = 1;
- hvmemul_ctxt->trap.vector = vector;
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_HW_EXCEPTION;
- hvmemul_ctxt->trap.error_code = error_code;
- hvmemul_ctxt->trap.insn_len = 0;
-
- return X86EMUL_OKAY;
-}
-
-static int hvmemul_inject_sw_interrupt(
- enum x86_swint_type type,
- uint8_t vector,
- uint8_t insn_len,
- struct x86_emulate_ctxt *ctxt)
-{
- struct hvm_emulate_ctxt *hvmemul_ctxt =
- container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-
- switch ( type )
- {
- case x86_swint_icebp:
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_PRI_SW_EXCEPTION;
- break;
-
- case x86_swint_int3:
- case x86_swint_into:
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_SW_EXCEPTION;
- break;
-
- case x86_swint_int:
- hvmemul_ctxt->trap.type = X86_EVENTTYPE_SW_INTERRUPT;
- break;
-
- default:
- return X86EMUL_UNHANDLEABLE;
- }
-
- hvmemul_ctxt->exn_pending = 1;
- hvmemul_ctxt->trap.vector = vector;
- hvmemul_ctxt->trap.error_code = X86_EVENT_NO_EC;
- hvmemul_ctxt->trap.insn_len = insn_len;
-
- return X86EMUL_OKAY;
-}
-
static int hvmemul_get_fpu(
void (*exception_callback)(void *, struct cpu_user_regs *),
void *exception_callback_arg,
@@ -1678,8 +1622,7 @@ static int hvmemul_invlpg(
* hvmemul_virtual_to_linear() raises exceptions for type/limit
* violations, so squash them.
*/
- hvmemul_ctxt->exn_pending = 0;
- hvmemul_ctxt->trap = (struct x86_event){};
+ x86_emul_reset_event(ctxt);
rc = X86EMUL_OKAY;
}
@@ -1696,7 +1639,7 @@ static int hvmemul_vmfunc(
rc = hvm_funcs.altp2m_vcpu_emulate_vmfunc(ctxt->regs);
if ( rc != X86EMUL_OKAY )
- hvmemul_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
+ x86_emul_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC, ctxt);
return rc;
}
@@ -1720,8 +1663,6 @@ static const struct x86_emulate_ops hvm_emulate_ops = {
.write_msr = hvmemul_write_msr,
.wbinvd = hvmemul_wbinvd,
.cpuid = hvmemul_cpuid,
- .inject_hw_exception = hvmemul_inject_hw_exception,
- .inject_sw_interrupt = hvmemul_inject_sw_interrupt,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
.invlpg = hvmemul_invlpg,
@@ -1747,8 +1688,6 @@ static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
.write_msr = hvmemul_write_msr_discard,
.wbinvd = hvmemul_wbinvd_discard,
.cpuid = hvmemul_cpuid,
- .inject_hw_exception = hvmemul_inject_hw_exception,
- .inject_sw_interrupt = hvmemul_inject_sw_interrupt,
.get_fpu = hvmemul_get_fpu,
.put_fpu = hvmemul_put_fpu,
.invlpg = hvmemul_invlpg,
@@ -1771,6 +1710,19 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
rc = x86_emulate(&hvmemul_ctxt->ctxt, ops);
+ /*
+ * TODO: Make this true:
+ *
+ ASSERT(hvmemul_ctxt->ctxt.event_pending == (rc == X86EMUL_EXCEPTION));
+ *
+ * Some codepaths still raise exceptions behind the back of the
+ * emulator. (i.e. return X86EMUL_EXCEPTION but without
+ * event_pending being set). In the meantime, use a slightly
+ * relaxed check...
+ */
+ if ( hvmemul_ctxt->ctxt.event_pending )
+ ASSERT(rc == X86EMUL_EXCEPTION);
+
if ( rc == X86EMUL_OKAY && vio->mmio_retry )
rc = X86EMUL_RETRY;
if ( rc != X86EMUL_RETRY )
@@ -1867,8 +1819,8 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
hvm_dump_emulation_state(XENLOG_G_WARNING "MMCFG", &ctxt);
break;
case X86EMUL_EXCEPTION:
- if ( ctxt.exn_pending )
- hvm_inject_event(&ctxt.trap);
+ if ( ctxt.ctxt.event_pending )
+ hvm_inject_event(&ctxt.ctxt.event);
/* fallthrough */
default:
hvm_emulate_writeback(&ctxt);
@@ -1927,8 +1879,8 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
hvm_inject_hw_exception(trapnr, errcode);
break;
case X86EMUL_EXCEPTION:
- if ( ctx.exn_pending )
- hvm_inject_event(&ctx.trap);
+ if ( ctx.ctxt.event_pending )
+ hvm_inject_event(&ctx.ctxt.event);
break;
}
@@ -2003,8 +1955,6 @@ void hvm_emulate_init_per_insn(
hvmemul_ctxt->insn_buf_bytes = insn_bytes;
memcpy(hvmemul_ctxt->insn_buf, insn_buf, insn_bytes);
}
-
- hvmemul_ctxt->exn_pending = 0;
}
void hvm_emulate_writeback(
@@ -4076,8 +4076,8 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
break;
case X86EMUL_EXCEPTION:
- if ( ctxt.exn_pending )
- hvm_inject_event(&ctxt.trap);
+ if ( ctxt.ctxt.event_pending )
+ hvm_inject_event(&ctxt.ctxt.event);
/* fall through */
default:
hvm_emulate_writeback(&ctxt);
@@ -102,8 +102,8 @@ int handle_mmio(void)
hvm_dump_emulation_state(XENLOG_G_WARNING "MMIO", &ctxt);
return 0;
case X86EMUL_EXCEPTION:
- if ( ctxt.exn_pending )
- hvm_inject_event(&ctxt.trap);
+ if ( ctxt.ctxt.event_pending )
+ hvm_inject_event(&ctxt.ctxt.event);
break;
default:
break;
@@ -122,7 +122,7 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
if ( rc == X86EMUL_EXCEPTION )
{
- if ( !hvmemul_ctxt->exn_pending )
+ if ( !hvmemul_ctxt->ctxt.event_pending )
{
unsigned long intr_info;
@@ -133,27 +133,27 @@ void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
goto fail;
}
- hvmemul_ctxt->trap.vector = (uint8_t)intr_info;
- hvmemul_ctxt->trap.insn_len = 0;
+ hvmemul_ctxt->ctxt.event.vector = (uint8_t)intr_info;
+ hvmemul_ctxt->ctxt.event.insn_len = 0;
}
if ( unlikely(curr->domain->debugger_attached) &&
- ((hvmemul_ctxt->trap.vector == TRAP_debug) ||
- (hvmemul_ctxt->trap.vector == TRAP_int3)) )
+ ((hvmemul_ctxt->ctxt.event.vector == TRAP_debug) ||
+ (hvmemul_ctxt->ctxt.event.vector == TRAP_int3)) )
{
domain_pause_for_debugger();
}
else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
{
gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
- hvmemul_ctxt->trap.vector);
+ hvmemul_ctxt->ctxt.event.vector);
goto fail;
}
else
{
realmode_deliver_exception(
- hvmemul_ctxt->trap.vector,
- hvmemul_ctxt->trap.insn_len,
+ hvmemul_ctxt->ctxt.event.vector,
+ hvmemul_ctxt->ctxt.event.insn_len,
hvmemul_ctxt);
}
}
@@ -5379,7 +5379,20 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
page_unlock(page);
put_page(page);
- if ( rc == X86EMUL_UNHANDLEABLE )
+ /*
+ * TODO: Make this true:
+ *
+ ASSERT(ptwr_ctxt.ctxt.event_pending == (rc == X86EMUL_EXCEPTION));
+ *
+ * Some codepaths still raise exceptions behind the back of the
+ * emulator. (i.e. return X86EMUL_EXCEPTION but without
+ * event_pending being set). In the meantime, use a slightly
+ * relaxed check...
+ */
+ if ( ptwr_ctxt.ctxt.event_pending )
+ ASSERT(rc == X86EMUL_EXCEPTION);
+
+ if ( rc == X86EMUL_UNHANDLEABLE || ptwr_ctxt.ctxt.event_pending )
goto bail;
perfc_incr(ptwr_emulations);
@@ -5503,7 +5516,21 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
else
rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
- return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
+ /*
+ * TODO: Make this true:
+ *
+ ASSERT(ctxt.event_pending == (rc == X86EMUL_EXCEPTION));
+ *
+ * Some codepaths still raise exceptions behind the back of the
+ * emulator. (i.e. return X86EMUL_EXCEPTION but without
+ * event_pending being set). In the meantime, use a slightly
+ * relaxed check...
+ */
+ if ( ctxt.event_pending )
+ ASSERT(rc == X86EMUL_EXCEPTION);
+
+ return ((rc != X86EMUL_UNHANDLEABLE && !ctxt.event_pending)
+ ? EXCRET_fault_fixed : 0);
}
void *alloc_xen_pagetable(void)
@@ -3374,11 +3374,23 @@ static int sh_page_fault(struct vcpu *v,
r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
/*
+ * TODO: Make this true:
+ *
+ ASSERT(emul_ctxt.ctxt.event_pending == (rc == X86EMUL_EXCEPTION));
+ *
+ * Some codepaths still raise exceptions behind the back of the
+ * emulator. (i.e. return X86EMUL_EXCEPTION but without event_pending
+ * being set). In the meantime, use a slightly relaxed check...
+ */
+ if ( emul_ctxt.ctxt.event_pending )
+ ASSERT(r == X86EMUL_EXCEPTION);
+
+ /*
* NB. We do not unshadow on X86EMUL_EXCEPTION. It's not clear that it
* would be a good unshadow hint. If we *do* decide to unshadow-on-fault
* then it must be 'failable': we cannot require the unshadow to succeed.
*/
- if ( r == X86EMUL_UNHANDLEABLE )
+ if ( r == X86EMUL_UNHANDLEABLE || emul_ctxt.ctxt.event_pending )
{
perfc_incr(shadow_fault_emulate_failed);
#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
@@ -3433,6 +3445,20 @@ static int sh_page_fault(struct vcpu *v,
shadow_continue_emulation(&emul_ctxt, regs);
v->arch.paging.last_write_was_pt = 0;
r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
+
+ /*
+ * TODO: Make this true:
+ *
+ ASSERT(emul_ctxt.ctxt.event_pending == (rc == X86EMUL_EXCEPTION));
+ *
+ * Some codepaths still raise exceptions behind the back of the
+ * emulator. (i.e. return X86EMUL_EXCEPTION but without
+ * event_pending being set). In the meantime, use a slightly
+ * relaxed check...
+ */
+ if ( emul_ctxt.ctxt.event_pending )
+ ASSERT(r == X86EMUL_EXCEPTION);
+
if ( r == X86EMUL_OKAY )
{
emulation_count++;
@@ -680,9 +680,8 @@ static inline int mkec(uint8_t e, int32_t ec, ...)
#define generate_exception_if(p, e, ec...) \
({ if ( (p) ) { \
- fail_if(ops->inject_hw_exception == NULL); \
- rc = ops->inject_hw_exception(e, mkec(e, ##ec, 0), ctxt) \
- ? : X86EMUL_EXCEPTION; \
+ x86_emul_hw_exception(e, mkec(e, ##ec, 0), ctxt); \
+ rc = X86EMUL_EXCEPTION; \
goto done; \
} \
})
@@ -1604,9 +1603,6 @@ static int inject_swint(enum x86_swint_type type,
{
int rc, error_code, fault_type = EXC_GP;
- fail_if(ops->inject_sw_interrupt == NULL);
- fail_if(ops->inject_hw_exception == NULL);
-
/*
* Without hardware support, injecting software interrupts/exceptions is
* problematic.
@@ -1703,7 +1699,8 @@ static int inject_swint(enum x86_swint_type type,
ctxt->regs->eip += insn_len;
}
- rc = ops->inject_sw_interrupt(type, vector, insn_len, ctxt);
+ x86_emul_software_event(type, vector, insn_len, ctxt);
+ rc = X86EMUL_OKAY;
done:
return rc;
@@ -1911,6 +1908,7 @@ x86_decode(
/* Initialise output state in x86_emulate_ctxt */
ctxt->retire.byte = 0;
+ x86_emul_reset_event(ctxt);
op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8;
if ( op_bytes == 8 )
@@ -62,9 +62,31 @@ enum x86_swint_type {
/* How much help is required with software event injection? */
enum x86_swint_emulation {
- x86_swint_emulate_none, /* Hardware supports all software injection properly */
- x86_swint_emulate_icebp,/* Help needed with `icebp` (0xf1) */
- x86_swint_emulate_all, /* Help needed with all software events */
+ /*
+ * Hardware supports all software injection properly. All traps exit the
+ * emulator with an unmodified %eip, and a suitable x86_event pending.
+ *
+ * Callers not wishing to deal with trap semantics should chose this
+ * option, as all causes all software traps to have fault semantics
+ * (i.e. no movement of %eip).
+ */
+ x86_swint_emulate_none,
+
+ /*
+ * Help needed with `icebp` (0xf1). The emulator will emulate injection
+ * of `icebp` only. If all checks are successful, %eip will be moved
+ * forwards to the end of the instruction, and a suitable x86_event will
+ * be pending.
+ */
+ x86_swint_emulate_icebp,
+
+ /*
+ * Help needed with all software events. The emulator will emulate
+ * injection of all software events. If all checks are successful, %eip
+ * will be moved forwards to the end of the instruction, and a suitable
+ * x86_event will be pending.
+ */
+ x86_swint_emulate_all,
};
/*
@@ -386,19 +408,6 @@ struct x86_emulate_ops
unsigned int *edx,
struct x86_emulate_ctxt *ctxt);
- /* inject_hw_exception */
- int (*inject_hw_exception)(
- uint8_t vector,
- int32_t error_code,
- struct x86_emulate_ctxt *ctxt);
-
- /* inject_sw_interrupt */
- int (*inject_sw_interrupt)(
- enum x86_swint_type type,
- uint8_t vector,
- uint8_t insn_len,
- struct x86_emulate_ctxt *ctxt);
-
/*
* get_fpu: Load emulated environment's FPU state onto processor.
* @exn_callback: On any FPU or SIMD exception, pass control to
@@ -475,6 +484,9 @@ struct x86_emulate_ctxt
} flags;
uint8_t byte;
} retire;
+
+ bool event_pending;
+ struct x86_event event;
};
/*
@@ -596,4 +608,55 @@ void x86_emulate_free_state(struct x86_emulate_state *state);
#endif
+#ifndef ASSERT
+#define ASSERT assert
+#endif
+
+static inline void x86_emul_hw_exception(
+ unsigned int vector, int error_code, struct x86_emulate_ctxt *ctxt)
+{
+ ASSERT(!ctxt->event_pending);
+
+ ctxt->event.vector = vector;
+ ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
+ ctxt->event.error_code = error_code;
+
+ ctxt->event_pending = true;
+}
+
+static inline void x86_emul_software_event(
+ enum x86_swint_type type, uint8_t vector, uint8_t insn_len,
+ struct x86_emulate_ctxt *ctxt)
+{
+ ASSERT(!ctxt->event_pending);
+
+ switch ( type )
+ {
+ case x86_swint_icebp:
+ ctxt->event.type = X86_EVENTTYPE_PRI_SW_EXCEPTION;
+ break;
+
+ case x86_swint_int3:
+ case x86_swint_into:
+ ctxt->event.type = X86_EVENTTYPE_SW_EXCEPTION;
+ break;
+
+ case x86_swint_int:
+ ctxt->event.type = X86_EVENTTYPE_SW_INTERRUPT;
+ break;
+ }
+
+ ctxt->event.vector = vector;
+ ctxt->event.error_code = X86_EVENT_NO_EC;
+ ctxt->event.insn_len = insn_len;
+
+ ctxt->event_pending = true;
+}
+
+static inline void x86_emul_reset_event(struct x86_emulate_ctxt *ctxt)
+{
+ ctxt->event_pending = false;
+ ctxt->event = (struct x86_event){};
+}
+
#endif /* __X86_EMULATE_H__ */
@@ -29,9 +29,6 @@ struct hvm_emulate_ctxt {
unsigned long seg_reg_accessed;
unsigned long seg_reg_dirty;
- bool_t exn_pending;
- struct x86_event trap;
-
uint32_t intr_shadow;
bool_t set_context;