diff mbox

[v2,16/19] x86/hvm: Rename hvm_copy_*_guest_virt() to hvm_copy_*_guest_linear()

Message ID 1480331616-6165-17-git-send-email-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Cooper Nov. 28, 2016, 11:13 a.m. UTC
The functions use linear addresses, not virtual addresses, as no segmentation
is used.  (Lots of other code in Xen makes this mistake.)

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
CC: Paul Durrant <paul.durrant@citrix.com>
---
 xen/arch/x86/hvm/emulate.c        | 12 ++++----
 xen/arch/x86/hvm/hvm.c            | 60 +++++++++++++++++++--------------------
 xen/arch/x86/hvm/vmx/vvmx.c       |  6 ++--
 xen/arch/x86/mm/shadow/common.c   |  8 +++---
 xen/include/asm-x86/hvm/support.h | 14 ++++-----
 5 files changed, 50 insertions(+), 50 deletions(-)

Comments

Paul Durrant Nov. 28, 2016, 11:59 a.m. UTC | #1
> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: 28 November 2016 11:14
> To: Xen-devel <xen-devel@lists.xen.org>
> Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant
> <Paul.Durrant@citrix.com>
> Subject: [PATCH v2 16/19] x86/hvm: Rename hvm_copy_*_guest_virt() to
> hvm_copy_*_guest_linear()
> 
> The functions use linear addresses, not virtual addresses, as no
> segmentation
> is used.  (Lots of other code in Xen makes this mistake.)
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Acked-by: Tim Deegan <tim@xen.org>
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
> ---
> CC: Paul Durrant <paul.durrant@citrix.com>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> ---
>  xen/arch/x86/hvm/emulate.c        | 12 ++++----
>  xen/arch/x86/hvm/hvm.c            | 60 +++++++++++++++++++------------------
> --
>  xen/arch/x86/hvm/vmx/vvmx.c       |  6 ++--
>  xen/arch/x86/mm/shadow/common.c   |  8 +++---
>  xen/include/asm-x86/hvm/support.h | 14 ++++-----
>  5 files changed, 50 insertions(+), 50 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 5165bb2..efd6d32 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -791,8 +791,8 @@ static int __hvmemul_read(
>          pfec |= PFEC_user_mode;
> 
>      rc = ((access_type == hvm_access_insn_fetch) ?
> -          hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo) :
> -          hvm_copy_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo));
> +          hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
> +          hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
> 
>      switch ( rc )
>      {
> @@ -898,7 +898,7 @@ static int hvmemul_write(
>           (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
>          pfec |= PFEC_user_mode;
> 
> -    rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec, &pfinfo);
> +    rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
> 
>      switch ( rc )
>      {
> @@ -1947,9 +1947,9 @@ void hvm_emulate_init_per_insn(
>                                          hvm_access_insn_fetch,
>                                          hvmemul_ctxt->ctxt.addr_size,
>                                          &addr) &&
> -             hvm_fetch_from_guest_virt(hvmemul_ctxt->insn_buf, addr,
> -                                       sizeof(hvmemul_ctxt->insn_buf),
> -                                       pfec, NULL) == HVMCOPY_okay) ?
> +             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
> +                                         sizeof(hvmemul_ctxt->insn_buf),
> +                                         pfec, NULL) == HVMCOPY_okay) ?
>              sizeof(hvmemul_ctxt->insn_buf) : 0;
>      }
>      else
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 5eae06a..37eaee2 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2925,7 +2925,7 @@ void hvm_task_switch(
>          goto out;
>      }
> 
> -    rc = hvm_copy_from_guest_virt(
> +    rc = hvm_copy_from_guest_linear(
>          &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
>      if ( rc != HVMCOPY_okay )
>          goto out;
> @@ -2960,15 +2960,15 @@ void hvm_task_switch(
>      hvm_get_segment_register(v, x86_seg_ldtr, &segr);
>      tss.ldt = segr.sel;
> 
> -    rc = hvm_copy_to_guest_virt(prev_tr.base + offsetof(typeof(tss), eip),
> -                                &tss.eip,
> -                                offsetof(typeof(tss), trace) -
> -                                offsetof(typeof(tss), eip),
> -                                PFEC_page_present, &pfinfo);
> +    rc = hvm_copy_to_guest_linear(prev_tr.base + offsetof(typeof(tss), eip),
> +                                  &tss.eip,
> +                                  offsetof(typeof(tss), trace) -
> +                                  offsetof(typeof(tss), eip),
> +                                  PFEC_page_present, &pfinfo);
>      if ( rc != HVMCOPY_okay )
>          goto out;
> 
> -    rc = hvm_copy_from_guest_virt(
> +    rc = hvm_copy_from_guest_linear(
>          &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
>      /*
>       * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
> @@ -3008,9 +3008,9 @@ void hvm_task_switch(
>          regs->eflags |= X86_EFLAGS_NT;
>          tss.back_link = prev_tr.sel;
> 
> -        rc = hvm_copy_to_guest_virt(tr.base + offsetof(typeof(tss), back_link),
> -                                    &tss.back_link, sizeof(tss.back_link), 0,
> -                                    &pfinfo);
> +        rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss),
> back_link),
> +                                      &tss.back_link, sizeof(tss.back_link), 0,
> +                                      &pfinfo);
>          if ( rc == HVMCOPY_bad_gva_to_gfn )
>              exn_raised = 1;
>          else if ( rc != HVMCOPY_okay )
> @@ -3047,8 +3047,8 @@ void hvm_task_switch(
>                                          16 << segr.attr.fields.db,
>                                          &linear_addr) )
>          {
> -            rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0,
> -                                        &pfinfo);
> +            rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
> +                                          &pfinfo);
>              if ( rc == HVMCOPY_bad_gva_to_gfn )
>                  exn_raised = 1;
>              else if ( rc != HVMCOPY_okay )
> @@ -3067,7 +3067,7 @@ void hvm_task_switch(
>  #define HVMCOPY_from_guest (0u<<0)
>  #define HVMCOPY_to_guest   (1u<<0)
>  #define HVMCOPY_phys       (0u<<2)
> -#define HVMCOPY_virt       (1u<<2)
> +#define HVMCOPY_linear     (1u<<2)
>  static enum hvm_copy_result __hvm_copy(
>      void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
>      pagefault_info_t *pfinfo)
> @@ -3101,7 +3101,7 @@ static enum hvm_copy_result __hvm_copy(
> 
>          count = min_t(int, PAGE_SIZE - gpa, todo);
> 
> -        if ( flags & HVMCOPY_virt )
> +        if ( flags & HVMCOPY_linear )
>          {
>              gfn = paging_gva_to_gfn(curr, addr, &pfec);
>              if ( gfn == gfn_x(INVALID_GFN) )
> @@ -3295,30 +3295,30 @@ enum hvm_copy_result
> hvm_copy_from_guest_phys(
>                        HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
>  }
> 
> -enum hvm_copy_result hvm_copy_to_guest_virt(
> -    unsigned long vaddr, void *buf, int size, uint32_t pfec,
> +enum hvm_copy_result hvm_copy_to_guest_linear(
> +    unsigned long addr, void *buf, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> -                      HVMCOPY_to_guest | HVMCOPY_virt,
> +    return __hvm_copy(buf, addr, size,
> +                      HVMCOPY_to_guest | HVMCOPY_linear,
>                        PFEC_page_present | PFEC_write_access | pfec, pfinfo);
>  }
> 
> -enum hvm_copy_result hvm_copy_from_guest_virt(
> -    void *buf, unsigned long vaddr, int size, uint32_t pfec,
> +enum hvm_copy_result hvm_copy_from_guest_linear(
> +    void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> -                      HVMCOPY_from_guest | HVMCOPY_virt,
> +    return __hvm_copy(buf, addr, size,
> +                      HVMCOPY_from_guest | HVMCOPY_linear,
>                        PFEC_page_present | pfec, pfinfo);
>  }
> 
> -enum hvm_copy_result hvm_fetch_from_guest_virt(
> -    void *buf, unsigned long vaddr, int size, uint32_t pfec,
> +enum hvm_copy_result hvm_fetch_from_guest_linear(
> +    void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo)
>  {
> -    return __hvm_copy(buf, vaddr, size,
> -                      HVMCOPY_from_guest | HVMCOPY_virt,
> +    return __hvm_copy(buf, addr, size,
> +                      HVMCOPY_from_guest | HVMCOPY_linear,
>                        PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
>  }
> 
> @@ -3333,7 +3333,7 @@ unsigned long copy_to_user_hvm(void *to, const
> void *from, unsigned int len)
>          return 0;
>      }
> 
> -    rc = hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len, 0,
> NULL);
> +    rc = hvm_copy_to_guest_linear((unsigned long)to, (void *)from, len, 0,
> NULL);
>      return rc ? len : 0; /* fake a copy_to_user() return code */
>  }
> 
> @@ -3363,7 +3363,7 @@ unsigned long copy_from_user_hvm(void *to,
> const void *from, unsigned len)
>          return 0;
>      }
> 
> -    rc = hvm_copy_from_guest_virt(to, (unsigned long)from, len, 0, NULL);
> +    rc = hvm_copy_from_guest_linear(to, (unsigned long)from, len, 0, NULL);
>      return rc ? len : 0; /* fake a copy_from_user() return code */
>  }
> 
> @@ -4038,8 +4038,8 @@ void hvm_ud_intercept(struct cpu_user_regs
> *regs)
>                                          (hvm_long_mode_enabled(cur) &&
>                                           cs->attr.fields.l) ? 64 :
>                                          cs->attr.fields.db ? 32 : 16, &addr) &&
> -             (hvm_fetch_from_guest_virt(sig, addr, sizeof(sig),
> -                                        walk, NULL) == HVMCOPY_okay) &&
> +             (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
> +                                          walk, NULL) == HVMCOPY_okay) &&
>               (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
>          {
>              regs->eip += sizeof(sig);
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c
> b/xen/arch/x86/hvm/vmx/vvmx.c
> index 7342d12..fd7ea0a 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -452,7 +452,7 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>              goto gp_fault;
> 
>          if ( poperandS != NULL &&
> -             hvm_copy_from_guest_virt(poperandS, base, size, 0, &pfinfo)
> +             hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo)
>                    != HVMCOPY_okay )
>              return X86EMUL_EXCEPTION;
>          decode->mem = base;
> @@ -1622,7 +1622,7 @@ int nvmx_handle_vmptrst(struct cpu_user_regs
> *regs)
> 
>      gpa = nvcpu->nv_vvmcxaddr;
> 
> -    rc = hvm_copy_to_guest_virt(decode.mem, &gpa, decode.len, 0,
> &pfinfo);
> +    rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0,
> &pfinfo);
>      if ( rc != HVMCOPY_okay )
>          return X86EMUL_EXCEPTION;
> 
> @@ -1693,7 +1693,7 @@ int nvmx_handle_vmread(struct cpu_user_regs
> *regs)
> 
>      switch ( decode.type ) {
>      case VMX_INST_MEMREG_TYPE_MEMORY:
> -        rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0,
> &pfinfo);
> +        rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0,
> &pfinfo);
>          if ( rc != HVMCOPY_okay )
>              return X86EMUL_EXCEPTION;
>          break;
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index b659324..0760e76 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -189,9 +189,9 @@ hvm_read(enum x86_segment seg,
>          return rc;
> 
>      if ( access_type == hvm_access_insn_fetch )
> -        rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
> +        rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
>      else
> -        rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
> +        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
> 
>      switch ( rc )
>      {
> @@ -419,7 +419,7 @@ const struct x86_emulate_ops
> *shadow_init_emulation(
>          (!hvm_translate_linear_addr(
>              x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
>              hvm_access_insn_fetch, sh_ctxt, &addr) &&
> -         !hvm_fetch_from_guest_virt(
> +         !hvm_fetch_from_guest_linear(
>               sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
>          ? sizeof(sh_ctxt->insn_buf) : 0;
> 
> @@ -447,7 +447,7 @@ void shadow_continue_emulation(struct
> sh_emulate_ctxt *sh_ctxt,
>                  (!hvm_translate_linear_addr(
>                      x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
>                      hvm_access_insn_fetch, sh_ctxt, &addr) &&
> -                 !hvm_fetch_from_guest_virt(
> +                 !hvm_fetch_from_guest_linear(
>                       sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
>                  ? sizeof(sh_ctxt->insn_buf) : 0;
>              sh_ctxt->insn_buf_eip = regs->eip;
> diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-
> x86/hvm/support.h
> index 114aa04..78349f8 100644
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -73,7 +73,7 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
>      void *buf, paddr_t paddr, int size);
> 
>  /*
> - * Copy to/from a guest virtual address. @pfec should include
> PFEC_user_mode
> + * Copy to/from a guest linear address. @pfec should include
> PFEC_user_mode
>   * if emulating a user-mode access (CPL=3). All other flags in @pfec are
>   * managed by the called function: it is therefore optional for the caller
>   * to set them.
> @@ -95,14 +95,14 @@ typedef struct pagefault_info
>      int ec;
>  } pagefault_info_t;
> 
> -enum hvm_copy_result hvm_copy_to_guest_virt(
> -    unsigned long vaddr, void *buf, int size, uint32_t pfec,
> +enum hvm_copy_result hvm_copy_to_guest_linear(
> +    unsigned long addr, void *buf, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
> -enum hvm_copy_result hvm_copy_from_guest_virt(
> -    void *buf, unsigned long vaddr, int size, uint32_t pfec,
> +enum hvm_copy_result hvm_copy_from_guest_linear(
> +    void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
> -enum hvm_copy_result hvm_fetch_from_guest_virt(
> -    void *buf, unsigned long vaddr, int size, uint32_t pfec,
> +enum hvm_copy_result hvm_fetch_from_guest_linear(
> +    void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
> 
>  #define HVM_HCALL_completed  0 /* hypercall completed - no further
> action */
> --
> 2.1.4
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 5165bb2..efd6d32 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -791,8 +791,8 @@  static int __hvmemul_read(
         pfec |= PFEC_user_mode;
 
     rc = ((access_type == hvm_access_insn_fetch) ?
-          hvm_fetch_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo) :
-          hvm_copy_from_guest_virt(p_data, addr, bytes, pfec, &pfinfo));
+          hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
+          hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
 
     switch ( rc )
     {
@@ -898,7 +898,7 @@  static int hvmemul_write(
          (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
         pfec |= PFEC_user_mode;
 
-    rc = hvm_copy_to_guest_virt(addr, p_data, bytes, pfec, &pfinfo);
+    rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
 
     switch ( rc )
     {
@@ -1947,9 +1947,9 @@  void hvm_emulate_init_per_insn(
                                         hvm_access_insn_fetch,
                                         hvmemul_ctxt->ctxt.addr_size,
                                         &addr) &&
-             hvm_fetch_from_guest_virt(hvmemul_ctxt->insn_buf, addr,
-                                       sizeof(hvmemul_ctxt->insn_buf),
-                                       pfec, NULL) == HVMCOPY_okay) ?
+             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
+                                         sizeof(hvmemul_ctxt->insn_buf),
+                                         pfec, NULL) == HVMCOPY_okay) ?
             sizeof(hvmemul_ctxt->insn_buf) : 0;
     }
     else
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5eae06a..37eaee2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2925,7 +2925,7 @@  void hvm_task_switch(
         goto out;
     }
 
-    rc = hvm_copy_from_guest_virt(
+    rc = hvm_copy_from_guest_linear(
         &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
     if ( rc != HVMCOPY_okay )
         goto out;
@@ -2960,15 +2960,15 @@  void hvm_task_switch(
     hvm_get_segment_register(v, x86_seg_ldtr, &segr);
     tss.ldt = segr.sel;
 
-    rc = hvm_copy_to_guest_virt(prev_tr.base + offsetof(typeof(tss), eip),
-                                &tss.eip,
-                                offsetof(typeof(tss), trace) -
-                                offsetof(typeof(tss), eip),
-                                PFEC_page_present, &pfinfo);
+    rc = hvm_copy_to_guest_linear(prev_tr.base + offsetof(typeof(tss), eip),
+                                  &tss.eip,
+                                  offsetof(typeof(tss), trace) -
+                                  offsetof(typeof(tss), eip),
+                                  PFEC_page_present, &pfinfo);
     if ( rc != HVMCOPY_okay )
         goto out;
 
-    rc = hvm_copy_from_guest_virt(
+    rc = hvm_copy_from_guest_linear(
         &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
     /*
      * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
@@ -3008,9 +3008,9 @@  void hvm_task_switch(
         regs->eflags |= X86_EFLAGS_NT;
         tss.back_link = prev_tr.sel;
 
-        rc = hvm_copy_to_guest_virt(tr.base + offsetof(typeof(tss), back_link),
-                                    &tss.back_link, sizeof(tss.back_link), 0,
-                                    &pfinfo);
+        rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link),
+                                      &tss.back_link, sizeof(tss.back_link), 0,
+                                      &pfinfo);
         if ( rc == HVMCOPY_bad_gva_to_gfn )
             exn_raised = 1;
         else if ( rc != HVMCOPY_okay )
@@ -3047,8 +3047,8 @@  void hvm_task_switch(
                                         16 << segr.attr.fields.db,
                                         &linear_addr) )
         {
-            rc = hvm_copy_to_guest_virt(linear_addr, &errcode, opsz, 0,
-                                        &pfinfo);
+            rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
+                                          &pfinfo);
             if ( rc == HVMCOPY_bad_gva_to_gfn )
                 exn_raised = 1;
             else if ( rc != HVMCOPY_okay )
@@ -3067,7 +3067,7 @@  void hvm_task_switch(
 #define HVMCOPY_from_guest (0u<<0)
 #define HVMCOPY_to_guest   (1u<<0)
 #define HVMCOPY_phys       (0u<<2)
-#define HVMCOPY_virt       (1u<<2)
+#define HVMCOPY_linear     (1u<<2)
 static enum hvm_copy_result __hvm_copy(
     void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
     pagefault_info_t *pfinfo)
@@ -3101,7 +3101,7 @@  static enum hvm_copy_result __hvm_copy(
 
         count = min_t(int, PAGE_SIZE - gpa, todo);
 
-        if ( flags & HVMCOPY_virt )
+        if ( flags & HVMCOPY_linear )
         {
             gfn = paging_gva_to_gfn(curr, addr, &pfec);
             if ( gfn == gfn_x(INVALID_GFN) )
@@ -3295,30 +3295,30 @@  enum hvm_copy_result hvm_copy_from_guest_phys(
                       HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
 }
 
-enum hvm_copy_result hvm_copy_to_guest_virt(
-    unsigned long vaddr, void *buf, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_to_guest_linear(
+    unsigned long addr, void *buf, int size, uint32_t pfec,
     pagefault_info_t *pfinfo)
 {
-    return __hvm_copy(buf, vaddr, size,
-                      HVMCOPY_to_guest | HVMCOPY_virt,
+    return __hvm_copy(buf, addr, size,
+                      HVMCOPY_to_guest | HVMCOPY_linear,
                       PFEC_page_present | PFEC_write_access | pfec, pfinfo);
 }
 
-enum hvm_copy_result hvm_copy_from_guest_virt(
-    void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_from_guest_linear(
+    void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo)
 {
-    return __hvm_copy(buf, vaddr, size,
-                      HVMCOPY_from_guest | HVMCOPY_virt,
+    return __hvm_copy(buf, addr, size,
+                      HVMCOPY_from_guest | HVMCOPY_linear,
                       PFEC_page_present | pfec, pfinfo);
 }
 
-enum hvm_copy_result hvm_fetch_from_guest_virt(
-    void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_fetch_from_guest_linear(
+    void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo)
 {
-    return __hvm_copy(buf, vaddr, size,
-                      HVMCOPY_from_guest | HVMCOPY_virt,
+    return __hvm_copy(buf, addr, size,
+                      HVMCOPY_from_guest | HVMCOPY_linear,
                       PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
 }
 
@@ -3333,7 +3333,7 @@  unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
         return 0;
     }
 
-    rc = hvm_copy_to_guest_virt((unsigned long)to, (void *)from, len, 0, NULL);
+    rc = hvm_copy_to_guest_linear((unsigned long)to, (void *)from, len, 0, NULL);
     return rc ? len : 0; /* fake a copy_to_user() return code */
 }
 
@@ -3363,7 +3363,7 @@  unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
         return 0;
     }
 
-    rc = hvm_copy_from_guest_virt(to, (unsigned long)from, len, 0, NULL);
+    rc = hvm_copy_from_guest_linear(to, (unsigned long)from, len, 0, NULL);
     return rc ? len : 0; /* fake a copy_from_user() return code */
 }
 
@@ -4038,8 +4038,8 @@  void hvm_ud_intercept(struct cpu_user_regs *regs)
                                         (hvm_long_mode_enabled(cur) &&
                                          cs->attr.fields.l) ? 64 :
                                         cs->attr.fields.db ? 32 : 16, &addr) &&
-             (hvm_fetch_from_guest_virt(sig, addr, sizeof(sig),
-                                        walk, NULL) == HVMCOPY_okay) &&
+             (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
+                                          walk, NULL) == HVMCOPY_okay) &&
              (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
         {
             regs->eip += sizeof(sig);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 7342d12..fd7ea0a 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -452,7 +452,7 @@  static int decode_vmx_inst(struct cpu_user_regs *regs,
             goto gp_fault;
 
         if ( poperandS != NULL &&
-             hvm_copy_from_guest_virt(poperandS, base, size, 0, &pfinfo)
+             hvm_copy_from_guest_linear(poperandS, base, size, 0, &pfinfo)
                   != HVMCOPY_okay )
             return X86EMUL_EXCEPTION;
         decode->mem = base;
@@ -1622,7 +1622,7 @@  int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
 
     gpa = nvcpu->nv_vvmcxaddr;
 
-    rc = hvm_copy_to_guest_virt(decode.mem, &gpa, decode.len, 0, &pfinfo);
+    rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0, &pfinfo);
     if ( rc != HVMCOPY_okay )
         return X86EMUL_EXCEPTION;
 
@@ -1693,7 +1693,7 @@  int nvmx_handle_vmread(struct cpu_user_regs *regs)
 
     switch ( decode.type ) {
     case VMX_INST_MEMREG_TYPE_MEMORY:
-        rc = hvm_copy_to_guest_virt(decode.mem, &value, decode.len, 0, &pfinfo);
+        rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, &pfinfo);
         if ( rc != HVMCOPY_okay )
             return X86EMUL_EXCEPTION;
         break;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index b659324..0760e76 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -189,9 +189,9 @@  hvm_read(enum x86_segment seg,
         return rc;
 
     if ( access_type == hvm_access_insn_fetch )
-        rc = hvm_fetch_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
+        rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
     else
-        rc = hvm_copy_from_guest_virt(p_data, addr, bytes, 0, &pfinfo);
+        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
 
     switch ( rc )
     {
@@ -419,7 +419,7 @@  const struct x86_emulate_ops *shadow_init_emulation(
         (!hvm_translate_linear_addr(
             x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
-         !hvm_fetch_from_guest_virt(
+         !hvm_fetch_from_guest_linear(
              sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
@@ -447,7 +447,7 @@  void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
                 (!hvm_translate_linear_addr(
                     x86_seg_cs, regs->eip, sizeof(sh_ctxt->insn_buf),
                     hvm_access_insn_fetch, sh_ctxt, &addr) &&
-                 !hvm_fetch_from_guest_virt(
+                 !hvm_fetch_from_guest_linear(
                      sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
                 ? sizeof(sh_ctxt->insn_buf) : 0;
             sh_ctxt->insn_buf_eip = regs->eip;
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index 114aa04..78349f8 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -73,7 +73,7 @@  enum hvm_copy_result hvm_copy_from_guest_phys(
     void *buf, paddr_t paddr, int size);
 
 /*
- * Copy to/from a guest virtual address. @pfec should include PFEC_user_mode
+ * Copy to/from a guest linear address. @pfec should include PFEC_user_mode
  * if emulating a user-mode access (CPL=3). All other flags in @pfec are
  * managed by the called function: it is therefore optional for the caller
  * to set them.
@@ -95,14 +95,14 @@  typedef struct pagefault_info
     int ec;
 } pagefault_info_t;
 
-enum hvm_copy_result hvm_copy_to_guest_virt(
-    unsigned long vaddr, void *buf, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_to_guest_linear(
+    unsigned long addr, void *buf, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_copy_from_guest_virt(
-    void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_copy_from_guest_linear(
+    void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_fetch_from_guest_virt(
-    void *buf, unsigned long vaddr, int size, uint32_t pfec,
+enum hvm_copy_result hvm_fetch_from_guest_linear(
+    void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
 
 #define HVM_HCALL_completed  0 /* hypercall completed - no further action */