@@ -12,9 +12,11 @@
#include <xen/init.h>
#include <xen/lib.h>
#include <xen/sched.h>
+#include <xen/monitor.h>
#include <xen/paging.h>
#include <xen/trace.h>
#include <xen/vm_event.h>
+#include <asm/altp2m.h>
#include <asm/event.h>
#include <asm/i387.h>
#include <asm/xstate.h>
@@ -530,6 +532,57 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
}
+bool hvm_emulate_send_vm_event(unsigned long gla, gfn_t gfn,
+ uint32_t pfec, bool send_event)
+{
+ xenmem_access_t access;
+ vm_event_request_t req = {};
+ paddr_t gpa = ((gfn_x(gfn) << PAGE_SHIFT) | (gla & ~PAGE_MASK));
+
+ if ( !send_event || !pfec )
+ return false;
+
+ if ( p2m_get_mem_access(current->domain, gfn, &access,
+ altp2m_vcpu_idx(current)) != 0 )
+ return false;
+
+ switch ( access ) {
+ case XENMEM_access_x:
+ case XENMEM_access_rx:
+ if ( pfec & PFEC_write_access )
+ req.u.mem_access.flags = MEM_ACCESS_R | MEM_ACCESS_W;
+ break;
+
+ case XENMEM_access_w:
+ case XENMEM_access_rw:
+ if ( pfec & PFEC_insn_fetch )
+ req.u.mem_access.flags = MEM_ACCESS_X;
+ break;
+
+ case XENMEM_access_r:
+ case XENMEM_access_n:
+ if ( pfec & PFEC_write_access )
+ req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W;
+ if ( pfec & PFEC_insn_fetch )
+ req.u.mem_access.flags |= MEM_ACCESS_X;
+ break;
+
+ default:
+ return false;
+ }
+
+ if ( !req.u.mem_access.flags )
+ return false; /* no violation */
+
+ req.reason = VM_EVENT_REASON_MEM_ACCESS;
+ req.u.mem_access.gfn = gfn_x(gfn);
+ req.u.mem_access.flags |= MEM_ACCESS_FAULT_WITH_GLA | MEM_ACCESS_GLA_VALID;
+ req.u.mem_access.gla = gla;
+ req.u.mem_access.offset = gpa & ~PAGE_MASK;
+
+ return monitor_traps(current, true, &req) >= 0;
+}
+
/*
* Map the frame(s) covering an individual linear access, for writeable
* access. May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
@@ -547,6 +600,7 @@ static void *hvmemul_map_linear_addr(
unsigned int nr_frames = ((linear + bytes - !!bytes) >> PAGE_SHIFT) -
(linear >> PAGE_SHIFT) + 1;
unsigned int i;
+ gfn_t gfn;
/*
* mfn points to the next free slot. All used slots have a page reference
@@ -585,7 +639,7 @@ static void *hvmemul_map_linear_addr(
ASSERT(mfn_x(*mfn) == 0);
res = hvm_translate_get_page(curr, addr, true, pfec,
- &pfinfo, &page, NULL, &p2mt);
+ &pfinfo, &page, &gfn, &p2mt);
switch ( res )
{
@@ -615,6 +669,13 @@ static void *hvmemul_map_linear_addr(
if ( pfec & PFEC_write_access )
{
+ if ( hvm_emulate_send_vm_event(addr, gfn, pfec,
+ hvmemul_ctxt->send_event) )
+ {
+ err = ERR_PTR(~X86EMUL_RETRY);
+ goto out;
+ }
+
if ( p2m_is_discard_write(p2mt) )
{
err = ERR_PTR(~X86EMUL_OKAY);
@@ -1115,7 +1176,8 @@ static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
* clean up any interim state.
*/
if ( !hvmemul_find_mmio_cache(vio, addr, IOREQ_READ, false) )
- rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+ rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo,
+ hvmemul_ctxt->send_event);
switch ( rc )
{
@@ -2509,12 +2571,13 @@ int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
}
void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
- unsigned int errcode)
+ unsigned int errcode, bool send_event)
{
struct hvm_emulate_ctxt ctx = {{ 0 }};
int rc;
hvm_emulate_init_once(&ctx, NULL, guest_cpu_user_regs());
+ ctx.send_event = send_event;
switch ( kind )
{
@@ -2629,7 +2692,7 @@ void hvm_emulate_init_per_insn(
hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
sizeof(hvmemul_ctxt->insn_buf),
pfec | PFEC_insn_fetch,
- NULL) == HVMTRANS_okay) ?
+ NULL, false) == HVMTRANS_okay) ?
sizeof(hvmemul_ctxt->insn_buf) : 0;
}
else
@@ -2942,7 +2942,7 @@ void hvm_task_switch(
}
rc = hvm_copy_from_guest_linear(
- &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+ &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo, false);
if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMTRANS_okay )
@@ -2989,7 +2989,7 @@ void hvm_task_switch(
goto out;
rc = hvm_copy_from_guest_linear(
- &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+ &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo, false);
if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
/*
@@ -3180,7 +3180,7 @@ enum hvm_translation_result hvm_translate_get_page(
#define HVMCOPY_linear (1u<<2)
static enum hvm_translation_result __hvm_copy(
void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags,
- uint32_t pfec, pagefault_info_t *pfinfo)
+ uint32_t pfec, pagefault_info_t *pfinfo, bool send_event)
{
gfn_t gfn;
struct page_info *page;
@@ -3224,6 +3224,12 @@ static enum hvm_translation_result __hvm_copy(
return HVMTRANS_bad_gfn_to_mfn;
}
+ if ( hvm_emulate_send_vm_event(addr, gfn, pfec, send_event) )
+ {
+ put_page(page);
+ return HVMTRANS_gfn_paged_out;
+ }
+
p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
if ( flags & HVMCOPY_to_guest )
@@ -3267,14 +3273,14 @@ enum hvm_translation_result hvm_copy_to_guest_phys(
paddr_t paddr, void *buf, int size, struct vcpu *v)
{
return __hvm_copy(buf, paddr, size, v,
- HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL);
+ HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL, false);
}
enum hvm_translation_result hvm_copy_from_guest_phys(
void *buf, paddr_t paddr, int size)
{
return __hvm_copy(buf, paddr, size, current,
- HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
+ HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL, false);
}
enum hvm_translation_result hvm_copy_to_guest_linear(
@@ -3283,16 +3289,17 @@ enum hvm_translation_result hvm_copy_to_guest_linear(
{
return __hvm_copy(buf, addr, size, current,
HVMCOPY_to_guest | HVMCOPY_linear,
- PFEC_page_present | PFEC_write_access | pfec, pfinfo);
+ PFEC_page_present | PFEC_write_access | pfec, pfinfo,
+ false);
}
enum hvm_translation_result hvm_copy_from_guest_linear(
void *buf, unsigned long addr, int size, uint32_t pfec,
- pagefault_info_t *pfinfo)
+ pagefault_info_t *pfinfo, bool send_event)
{
return __hvm_copy(buf, addr, size, current,
HVMCOPY_from_guest | HVMCOPY_linear,
- PFEC_page_present | pfec, pfinfo);
+ PFEC_page_present | pfec, pfinfo, send_event);
}
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
@@ -3333,7 +3340,7 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
return 0;
}
- rc = hvm_copy_from_guest_linear(to, (unsigned long)from, len, 0, NULL);
+ rc = hvm_copy_from_guest_linear(to, (unsigned long)from, len, 0, NULL, false);
return rc ? len : 0; /* fake a copy_from_user() return code */
}
@@ -3707,7 +3714,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
sizeof(sig), hvm_access_insn_fetch,
cs, &addr) &&
(hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
- walk, NULL) == HVMTRANS_okay) &&
+ walk, NULL, false) == HVMTRANS_okay) &&
(memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
{
regs->rip += sizeof(sig);
@@ -1255,7 +1255,7 @@ static void svm_emul_swint_injection(struct x86_event *event)
goto raise_exception;
rc = hvm_copy_from_guest_linear(&idte, idte_linear_addr, idte_size,
- PFEC_implicit, &pfinfo);
+ PFEC_implicit, &pfinfo, false);
if ( rc )
{
if ( rc == HVMTRANS_bad_linear_to_gfn )
@@ -87,7 +87,7 @@ void hvm_vm_event_do_resume(struct vcpu *v)
kind = EMUL_KIND_SET_CONTEXT_INSN;
hvm_emulate_one_vm_event(kind, TRAP_invalid_op,
- X86_EVENT_NO_EC);
+ X86_EVENT_NO_EC, false);
v->arch.vm_event->emulate_flags = 0;
}
@@ -426,7 +426,7 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
{
pagefault_info_t pfinfo;
int rc = hvm_copy_from_guest_linear(poperandS, base, size,
- 0, &pfinfo);
+ 0, &pfinfo, false);
if ( rc == HVMTRANS_bad_linear_to_gfn )
hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
@@ -214,7 +214,8 @@ bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
d->arch.monitor.inguest_pagefault_disabled &&
npfec.kind != npfec_kind_with_gla ) /* don't send a mem_event */
{
- hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, X86_EVENT_NO_EC);
+ hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op,
+ X86_EVENT_NO_EC, true);
return true;
}
@@ -166,7 +166,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
hvm_access_insn_fetch, sh_ctxt, &addr) &&
!hvm_copy_from_guest_linear(
sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
- PFEC_insn_fetch, NULL))
+ PFEC_insn_fetch, NULL, false))
? sizeof(sh_ctxt->insn_buf) : 0;
return &hvm_shadow_emulator_ops;
@@ -201,7 +201,7 @@ void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
hvm_access_insn_fetch, sh_ctxt, &addr) &&
!hvm_copy_from_guest_linear(
sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
- PFEC_insn_fetch, NULL))
+ PFEC_insn_fetch, NULL, false))
? sizeof(sh_ctxt->insn_buf) : 0;
sh_ctxt->insn_buf_eip = regs->rip;
}
@@ -125,7 +125,7 @@ hvm_read(enum x86_segment seg,
rc = hvm_copy_from_guest_linear(p_data, addr, bytes,
(access_type == hvm_access_insn_fetch
? PFEC_insn_fetch : 0),
- &pfinfo);
+ &pfinfo, false);
switch ( rc )
{
@@ -47,6 +47,7 @@ struct hvm_emulate_ctxt {
uint32_t intr_shadow;
bool_t set_context;
+ bool send_event;
};
enum emul_kind {
@@ -63,7 +64,8 @@ int hvm_emulate_one(
struct hvm_emulate_ctxt *hvmemul_ctxt);
void hvm_emulate_one_vm_event(enum emul_kind kind,
unsigned int trapnr,
- unsigned int errcode);
+ unsigned int errcode,
+ bool send_event);
/* Must be called once to set up hvmemul state. */
void hvm_emulate_init_once(
struct hvm_emulate_ctxt *hvmemul_ctxt,
@@ -80,6 +82,11 @@ struct segment_register *hvmemul_get_seg_reg(
enum x86_segment seg,
struct hvm_emulate_ctxt *hvmemul_ctxt);
int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla);
+bool hvm_emulate_send_vm_event(
+ unsigned long gla,
+ gfn_t gfn,
+ uint32_t pfec,
+ bool send_event);
static inline bool handle_mmio(void)
{
@@ -99,7 +99,7 @@ enum hvm_translation_result hvm_copy_to_guest_linear(
pagefault_info_t *pfinfo);
enum hvm_translation_result hvm_copy_from_guest_linear(
void *buf, unsigned long addr, int size, uint32_t pfec,
- pagefault_info_t *pfinfo);
+ pagefault_info_t *pfinfo, bool send_event);
/*
* Get a reference on the page under an HVM physical or linear address. If