@@ -799,6 +799,7 @@ static int __hvmemul_read(
case HVMCOPY_okay:
break;
case HVMCOPY_bad_gva_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
return X86EMUL_EXCEPTION;
case HVMCOPY_bad_gfn_to_mfn:
if ( access_type == hvm_access_insn_fetch )
@@ -905,6 +906,7 @@ static int hvmemul_write(
case HVMCOPY_okay:
break;
case HVMCOPY_bad_gva_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
return X86EMUL_EXCEPTION;
case HVMCOPY_bad_gfn_to_mfn:
return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
@@ -2927,6 +2927,8 @@ void hvm_task_switch(
rc = hvm_copy_from_guest_linear(
&tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMCOPY_okay )
goto out;
@@ -2965,11 +2967,15 @@ void hvm_task_switch(
offsetof(typeof(tss), trace) -
offsetof(typeof(tss), eip),
PFEC_page_present, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMCOPY_okay )
goto out;
rc = hvm_copy_from_guest_linear(
&tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
/*
* Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
* functions knew we want RO access.
@@ -3012,7 +3018,10 @@ void hvm_task_switch(
&tss.back_link, sizeof(tss.back_link), 0,
&pfinfo);
if ( rc == HVMCOPY_bad_gva_to_gfn )
+ {
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
exn_raised = 1;
+ }
else if ( rc != HVMCOPY_okay )
goto out;
}
@@ -3114,8 +3123,6 @@ static enum hvm_copy_result __hvm_copy(
{
pfinfo->linear = addr;
pfinfo->ec = pfec;
-
- hvm_inject_page_fault(pfec, addr);
}
return HVMCOPY_bad_gva_to_gfn;
}
@@ -396,7 +396,6 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
struct vcpu *v = current;
union vmx_inst_info info;
struct segment_register seg;
- pagefault_info_t pfinfo;
unsigned long base, index, seg_base, disp, offset;
int scale, size;
@@ -451,10 +450,17 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
offset + size - 1 > seg.limit) )
goto gp_fault;
- if ( poperandS != NULL &&
- hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo)
- != HVMCOPY_okay )
- return X86EMUL_EXCEPTION;
+ if ( poperandS != NULL )
+ {
+ pagefault_info_t pfinfo;
+ int rc = hvm_copy_from_guest_linear(poperandS, base, size,
+ 0, &pfinfo);
+
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
+ if ( rc != HVMCOPY_okay )
+ return X86EMUL_EXCEPTION;
+ }
decode->mem = base;
decode->len = size;
}
@@ -1623,6 +1629,8 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
gpa = nvcpu->nv_vvmcxaddr;
rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
@@ -1694,6 +1702,8 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
switch ( decode.type ) {
case VMX_INST_MEMREG_TYPE_MEMORY:
rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, &pfinfo);
+ if ( rc == HVMCOPY_bad_gva_to_gfn )
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
if ( rc != HVMCOPY_okay )
return X86EMUL_EXCEPTION;
break;
@@ -198,6 +198,7 @@ hvm_read(enum x86_segment seg,
case HVMCOPY_okay:
return X86EMUL_OKAY;
case HVMCOPY_bad_gva_to_gfn:
+ hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
return X86EMUL_EXCEPTION;
case HVMCOPY_bad_gfn_to_mfn:
case HVMCOPY_unhandleable:
@@ -85,9 +85,7 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
* HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
* mapping to a guest physical address. The
* pagefault_info_t structure will be filled in if
- * provided, and a page fault exception is
- * automatically queued for injection into the
- * current HVM VCPU.
+ * provided.
*/
typedef struct pagefault_info
{
Drop the call to hvm_inject_page_fault() in __hvm_copy(), and require callers to inject the pagefault themselves. No functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> --- CC: Jan Beulich <JBeulich@suse.com> CC: Paul Durrant <paul.durrant@citrix.com> CC: Tim Deegan <tim@xen.org> CC: Jun Nakajima <jun.nakajima@intel.com> CC: Kevin Tian <kevin.tian@intel.com> --- xen/arch/x86/hvm/emulate.c | 2 ++ xen/arch/x86/hvm/hvm.c | 11 +++++++++-- xen/arch/x86/hvm/vmx/vvmx.c | 20 +++++++++++++++----- xen/arch/x86/mm/shadow/common.c | 1 + xen/include/asm-x86/hvm/support.h | 4 +--- 5 files changed, 28 insertions(+), 10 deletions(-)