diff mbox

[v3,22/24] x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back

Message ID 1480513841-7565-23-git-send-email-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Cooper Nov. 30, 2016, 1:50 p.m. UTC
Drop the call to hvm_inject_page_fault() in __hvm_copy(), and require callers
to inject the pagefault themselves.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
Acked-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
CC: Paul Durrant <paul.durrant@citrix.com>

v3:
 * Correct patch description
 * Fix rebasing error over previous TSS series
---
 xen/arch/x86/hvm/emulate.c        |  2 ++
 xen/arch/x86/hvm/hvm.c            | 14 ++++++++++++--
 xen/arch/x86/hvm/vmx/vvmx.c       | 20 +++++++++++++++-----
 xen/arch/x86/mm/shadow/common.c   |  1 +
 xen/include/asm-x86/hvm/support.h |  4 +---
 5 files changed, 31 insertions(+), 10 deletions(-)

Comments

Paul Durrant Nov. 30, 2016, 2:29 p.m. UTC | #1
> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: 30 November 2016 13:51
> To: Xen-devel <xen-devel@lists.xen.org>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant
> <Paul.Durrant@citrix.com>
> Subject: [PATCH v3 22/24] x86/hvm: Avoid __hvm_copy() raising #PF behind
> the emulators back
> 
> Drop the call to hvm_inject_page_fault() in __hvm_copy(), and require
> callers
> to inject the pagefault themselves.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Tim Deegan <tim@xen.org>
> Acked-by: Kevin Tian <kevin.tian@intel.com>
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> ---
> CC: Paul Durrant <paul.durrant@citrix.com>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> 
> v3:
>  * Correct patch description
>  * Fix rebasing error over previous TSS series
> ---
>  xen/arch/x86/hvm/emulate.c        |  2 ++
>  xen/arch/x86/hvm/hvm.c            | 14 ++++++++++++--
>  xen/arch/x86/hvm/vmx/vvmx.c       | 20 +++++++++++++++-----
>  xen/arch/x86/mm/shadow/common.c   |  1 +
>  xen/include/asm-x86/hvm/support.h |  4 +---
>  5 files changed, 31 insertions(+), 10 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 035b654..ccf3aa2 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -799,6 +799,7 @@ static int __hvmemul_read(
>      case HVMCOPY_okay:
>          break;
>      case HVMCOPY_bad_gva_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
>          return X86EMUL_EXCEPTION;
>      case HVMCOPY_bad_gfn_to_mfn:
>          if ( access_type == hvm_access_insn_fetch )
> @@ -905,6 +906,7 @@ static int hvmemul_write(
>      case HVMCOPY_okay:
>          break;
>      case HVMCOPY_bad_gva_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
>          return X86EMUL_EXCEPTION;
>      case HVMCOPY_bad_gfn_to_mfn:
>          return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 37eaee2..3596f2c 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2927,6 +2927,8 @@ void hvm_task_switch(
> 
>      rc = hvm_copy_from_guest_linear(
>          &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
> +    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>      if ( rc != HVMCOPY_okay )
>          goto out;
> 
> @@ -2965,11 +2967,15 @@ void hvm_task_switch(
>                                    offsetof(typeof(tss), trace) -
>                                    offsetof(typeof(tss), eip),
>                                    PFEC_page_present, &pfinfo);
> +    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>      if ( rc != HVMCOPY_okay )
>          goto out;
> 
>      rc = hvm_copy_from_guest_linear(
>          &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
> +    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>      /*
>       * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
>       * functions knew we want RO access.
> @@ -3012,7 +3018,10 @@ void hvm_task_switch(
>                                        &tss.back_link, sizeof(tss.back_link), 0,
>                                        &pfinfo);
>          if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        {
> +            hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>              exn_raised = 1;
> +        }
>          else if ( rc != HVMCOPY_okay )
>              goto out;
>      }
> @@ -3050,7 +3059,10 @@ void hvm_task_switch(
>              rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
>                                            &pfinfo);
>              if ( rc == HVMCOPY_bad_gva_to_gfn )
> +            {
> +                hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>                  exn_raised = 1;
> +            }
>              else if ( rc != HVMCOPY_okay )
>                  goto out;
>          }
> @@ -3114,8 +3126,6 @@ static enum hvm_copy_result __hvm_copy(
>                  {
>                      pfinfo->linear = addr;
>                      pfinfo->ec = pfec;
> -
> -                    hvm_inject_page_fault(pfec, addr);
>                  }
>                  return HVMCOPY_bad_gva_to_gfn;
>              }
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c
> b/xen/arch/x86/hvm/vmx/vvmx.c
> index fd7ea0a..e6e9ebd 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -396,7 +396,6 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>      struct vcpu *v = current;
>      union vmx_inst_info info;
>      struct segment_register seg;
> -    pagefault_info_t pfinfo;
>      unsigned long base, index, seg_base, disp, offset;
>      int scale, size;
> 
> @@ -451,10 +450,17 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>                offset + size - 1 > seg.limit) )
>              goto gp_fault;
> 
> -        if ( poperandS != NULL &&
> -             hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo)
> -                  != HVMCOPY_okay )
> -            return X86EMUL_EXCEPTION;
> +        if ( poperandS != NULL )
> +        {
> +            pagefault_info_t pfinfo;
> +            int rc = hvm_copy_from_guest_linear(poperandS, base, size,
> +                                                0, &pfinfo);
> +
> +            if ( rc == HVMCOPY_bad_gva_to_gfn )
> +                hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
> +            if ( rc != HVMCOPY_okay )
> +                return X86EMUL_EXCEPTION;
> +        }
>          decode->mem = base;
>          decode->len = size;
>      }
> @@ -1623,6 +1629,8 @@ int nvmx_handle_vmptrst(struct cpu_user_regs
> *regs)
>      gpa = nvcpu->nv_vvmcxaddr;
> 
>      rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0,
> &pfinfo);
> +    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>      if ( rc != HVMCOPY_okay )
>          return X86EMUL_EXCEPTION;
> 
> @@ -1694,6 +1702,8 @@ int nvmx_handle_vmread(struct cpu_user_regs
> *regs)
>      switch ( decode.type ) {
>      case VMX_INST_MEMREG_TYPE_MEMORY:
>          rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0,
> &pfinfo);
> +        if ( rc == HVMCOPY_bad_gva_to_gfn )
> +            hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>          if ( rc != HVMCOPY_okay )
>              return X86EMUL_EXCEPTION;
>          break;
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index 0760e76..fbe49e1 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -198,6 +198,7 @@ hvm_read(enum x86_segment seg,
>      case HVMCOPY_okay:
>          return X86EMUL_OKAY;
>      case HVMCOPY_bad_gva_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &sh_ctxt->ctxt);
>          return X86EMUL_EXCEPTION;
>      case HVMCOPY_bad_gfn_to_mfn:
>      case HVMCOPY_unhandleable:
> diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-
> x86/hvm/support.h
> index 78349f8..3d767d7 100644
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -85,9 +85,7 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
>   *  HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a
> valid
>   *                          mapping to a guest physical address.  The
>   *                          pagefault_info_t structure will be filled in if
> - *                          provided, and a page fault exception is
> - *                          automatically queued for injection into the
> - *                          current HVM VCPU.
> + *                          provided.
>   */
>  typedef struct pagefault_info
>  {
> --
> 2.1.4
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 035b654..ccf3aa2 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -799,6 +799,7 @@  static int __hvmemul_read(
     case HVMCOPY_okay:
         break;
     case HVMCOPY_bad_gva_to_gfn:
+        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
         return X86EMUL_EXCEPTION;
     case HVMCOPY_bad_gfn_to_mfn:
         if ( access_type == hvm_access_insn_fetch )
@@ -905,6 +906,7 @@  static int hvmemul_write(
     case HVMCOPY_okay:
         break;
     case HVMCOPY_bad_gva_to_gfn:
+        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
         return X86EMUL_EXCEPTION;
     case HVMCOPY_bad_gfn_to_mfn:
         return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 37eaee2..3596f2c 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2927,6 +2927,8 @@  void hvm_task_switch(
 
     rc = hvm_copy_from_guest_linear(
         &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+    if ( rc == HVMCOPY_bad_gva_to_gfn )
+        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
     if ( rc != HVMCOPY_okay )
         goto out;
 
@@ -2965,11 +2967,15 @@  void hvm_task_switch(
                                   offsetof(typeof(tss), trace) -
                                   offsetof(typeof(tss), eip),
                                   PFEC_page_present, &pfinfo);
+    if ( rc == HVMCOPY_bad_gva_to_gfn )
+        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
     if ( rc != HVMCOPY_okay )
         goto out;
 
     rc = hvm_copy_from_guest_linear(
         &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
+    if ( rc == HVMCOPY_bad_gva_to_gfn )
+        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
     /*
      * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
      * functions knew we want RO access.
@@ -3012,7 +3018,10 @@  void hvm_task_switch(
                                       &tss.back_link, sizeof(tss.back_link), 0,
                                       &pfinfo);
         if ( rc == HVMCOPY_bad_gva_to_gfn )
+        {
+            hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
             exn_raised = 1;
+        }
         else if ( rc != HVMCOPY_okay )
             goto out;
     }
@@ -3050,7 +3059,10 @@  void hvm_task_switch(
             rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
                                           &pfinfo);
             if ( rc == HVMCOPY_bad_gva_to_gfn )
+            {
+                hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
                 exn_raised = 1;
+            }
             else if ( rc != HVMCOPY_okay )
                 goto out;
         }
@@ -3114,8 +3126,6 @@  static enum hvm_copy_result __hvm_copy(
                 {
                     pfinfo->linear = addr;
                     pfinfo->ec = pfec;
-
-                    hvm_inject_page_fault(pfec, addr);
                 }
                 return HVMCOPY_bad_gva_to_gfn;
             }
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index fd7ea0a..e6e9ebd 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -396,7 +396,6 @@  static int decode_vmx_inst(struct cpu_user_regs *regs,
     struct vcpu *v = current;
     union vmx_inst_info info;
     struct segment_register seg;
-    pagefault_info_t pfinfo;
     unsigned long base, index, seg_base, disp, offset;
     int scale, size;
 
@@ -451,10 +450,17 @@  static int decode_vmx_inst(struct cpu_user_regs *regs,
               offset + size - 1 > seg.limit) )
             goto gp_fault;
 
-        if ( poperandS != NULL &&
-             hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo)
-                  != HVMCOPY_okay )
-            return X86EMUL_EXCEPTION;
+        if ( poperandS != NULL )
+        {
+            pagefault_info_t pfinfo;
+            int rc = hvm_copy_from_guest_linear(poperandS, base, size,
+                                                0, &pfinfo);
+
+            if ( rc == HVMCOPY_bad_gva_to_gfn )
+                hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
+            if ( rc != HVMCOPY_okay )
+                return X86EMUL_EXCEPTION;
+        }
         decode->mem = base;
         decode->len = size;
     }
@@ -1623,6 +1629,8 @@  int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
     gpa = nvcpu->nv_vvmcxaddr;
 
     rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0, &pfinfo);
+    if ( rc == HVMCOPY_bad_gva_to_gfn )
+        hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
     if ( rc != HVMCOPY_okay )
         return X86EMUL_EXCEPTION;
 
@@ -1694,6 +1702,8 @@  int nvmx_handle_vmread(struct cpu_user_regs *regs)
     switch ( decode.type ) {
     case VMX_INST_MEMREG_TYPE_MEMORY:
         rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, &pfinfo);
+        if ( rc == HVMCOPY_bad_gva_to_gfn )
+            hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
         if ( rc != HVMCOPY_okay )
             return X86EMUL_EXCEPTION;
         break;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 0760e76..fbe49e1 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -198,6 +198,7 @@  hvm_read(enum x86_segment seg,
     case HVMCOPY_okay:
         return X86EMUL_OKAY;
     case HVMCOPY_bad_gva_to_gfn:
+        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &sh_ctxt->ctxt);
         return X86EMUL_EXCEPTION;
     case HVMCOPY_bad_gfn_to_mfn:
     case HVMCOPY_unhandleable:
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 78349f8..3d767d7 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -85,9 +85,7 @@  enum hvm_copy_result hvm_copy_from_guest_phys(
  *  HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
  *                          mapping to a guest physical address.  The
  *                          pagefault_info_t structure will be filled in if
- *                          provided, and a page fault exception is
- *                          automatically queued for injection into the
- *                          current HVM VCPU.
+ *                          provided.
  */
 typedef struct pagefault_info
 {