diff mbox

x86/HVM: rename mmio_gva field to mmio_gla

Message ID 5760348B02000078000F4DC9@prv-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Beulich June 14, 2016, 2:44 p.m. UTC
... to correctly reflect its purpose. To make things consistent also
rename handle_mmio_with_translation()'s respective parameter (but don't
touch sh_page_fault(), as renaming its parameter would require quite a
few more changes there).

Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
x86/HVM: rename mmio_gva field to mmio_gla

... to correctly reflect its purpose. To make things consistent also
rename handle_mmio_with_translation()'s respective parameter (but don't
touch sh_page_fault(), as renaming its parameter would require quite a
few more changes there).

Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -684,7 +684,7 @@ static void latch_linear_to_phys(struct
     if ( vio->mmio_access.gla_valid )
         return;
 
-    vio->mmio_gva = gla & PAGE_MASK;
+    vio->mmio_gla = gla & PAGE_MASK;
     vio->mmio_gpfn = PFN_DOWN(gpa);
     vio->mmio_access = (struct npfec){ .gla_valid = 1,
                                        .read_access = 1,
@@ -782,7 +782,7 @@ static int __hvmemul_read(
     if ( ((access_type != hvm_access_insn_fetch
            ? vio->mmio_access.read_access
            : vio->mmio_access.insn_fetch)) &&
-         (vio->mmio_gva == (addr & PAGE_MASK)) )
+         (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
     if ( (seg != x86_seg_none) &&
@@ -889,7 +889,7 @@ static int hvmemul_write(
         return rc;
 
     if ( vio->mmio_access.write_access &&
-         (vio->mmio_gva == (addr & PAGE_MASK)) )
+         (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
     if ( (seg != x86_seg_none) &&
@@ -1181,7 +1181,7 @@ static int hvmemul_rep_movs(
 
     bytes = PAGE_SIZE - (saddr & ~PAGE_MASK);
     if ( vio->mmio_access.read_access &&
-         (vio->mmio_gva == (saddr & PAGE_MASK)) &&
+         (vio->mmio_gla == (saddr & PAGE_MASK)) &&
          bytes >= bytes_per_rep )
     {
         sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
@@ -1200,7 +1200,7 @@ static int hvmemul_rep_movs(
 
     bytes = PAGE_SIZE - (daddr & ~PAGE_MASK);
     if ( vio->mmio_access.write_access &&
-         (vio->mmio_gva == (daddr & PAGE_MASK)) &&
+         (vio->mmio_gla == (daddr & PAGE_MASK)) &&
          bytes >= bytes_per_rep )
     {
         dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
@@ -1320,7 +1320,7 @@ static int hvmemul_rep_stos(
 
     bytes = PAGE_SIZE - (addr & ~PAGE_MASK);
     if ( vio->mmio_access.write_access &&
-         (vio->mmio_gva == (addr & PAGE_MASK)) &&
+         (vio->mmio_gla == (addr & PAGE_MASK)) &&
          bytes >= bytes_per_rep )
     {
         gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -114,7 +114,7 @@ int handle_mmio(void)
     return 1;
 }
 
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
                                  struct npfec access)
 {
     struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
@@ -122,7 +122,7 @@ int handle_mmio_with_translation(unsigne
     vio->mmio_access = access.gla_valid &&
                        access.kind == npfec_kind_with_gla
                        ? access : (struct npfec){};
-    vio->mmio_gva = gva & PAGE_MASK;
+    vio->mmio_gla = gla & PAGE_MASK;
     vio->mmio_gpfn = gpfn;
     return handle_mmio();
 }
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -119,7 +119,7 @@ void relocate_portio_handler(
 void send_timeoffset_req(unsigned long timeoff);
 void send_invalidate_req(void);
 int handle_mmio(void);
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
                                  struct npfec);
 int handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -60,12 +60,12 @@ struct hvm_vcpu_io {
 
     /*
      * HVM emulation:
-     *  Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
+     *  Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
      *  The latter is known to be an MMIO frame (not RAM).
      *  This translation is only valid for accesses as per @mmio_access.
      */
     struct npfec        mmio_access;
-    unsigned long       mmio_gva;
+    unsigned long       mmio_gla;
     unsigned long       mmio_gpfn;
 
     /*

Comments

Andrew Cooper June 14, 2016, 2:48 p.m. UTC | #1
On 14/06/16 15:44, Jan Beulich wrote:
> ... to correctly reflect its purpose. To make things consistent also
> rename handle_mmio_with_translation()'s respective parameter (but don't
> touch sh_page_fault(), as renaming its parameter would require quite a
> few more changes there).
>
> Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Paul Durrant June 14, 2016, 2:50 p.m. UTC | #2
> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 14 June 2016 15:45
> To: xen-devel
> Cc: Andrew Cooper; Paul Durrant
> Subject: [PATCH] x86/HVM: rename mmio_gva field to mmio_gla
> 
> ... to correctly reflect its purpose. To make things consistent also
> rename handle_mmio_with_translation()'s respective parameter (but don't
> touch sh_page_fault(), as renaming its parameter would require quite a
> few more changes there).
> 
> Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -684,7 +684,7 @@ static void latch_linear_to_phys(struct
>      if ( vio->mmio_access.gla_valid )
>          return;
> 
> -    vio->mmio_gva = gla & PAGE_MASK;
> +    vio->mmio_gla = gla & PAGE_MASK;
>      vio->mmio_gpfn = PFN_DOWN(gpa);
>      vio->mmio_access = (struct npfec){ .gla_valid = 1,
>                                         .read_access = 1,
> @@ -782,7 +782,7 @@ static int __hvmemul_read(
>      if ( ((access_type != hvm_access_insn_fetch
>             ? vio->mmio_access.read_access
>             : vio->mmio_access.insn_fetch)) &&
> -         (vio->mmio_gva == (addr & PAGE_MASK)) )
> +         (vio->mmio_gla == (addr & PAGE_MASK)) )
>          return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> 
>      if ( (seg != x86_seg_none) &&
> @@ -889,7 +889,7 @@ static int hvmemul_write(
>          return rc;
> 
>      if ( vio->mmio_access.write_access &&
> -         (vio->mmio_gva == (addr & PAGE_MASK)) )
> +         (vio->mmio_gla == (addr & PAGE_MASK)) )
>          return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> 
>      if ( (seg != x86_seg_none) &&
> @@ -1181,7 +1181,7 @@ static int hvmemul_rep_movs(
> 
>      bytes = PAGE_SIZE - (saddr & ~PAGE_MASK);
>      if ( vio->mmio_access.read_access &&
> -         (vio->mmio_gva == (saddr & PAGE_MASK)) &&
> +         (vio->mmio_gla == (saddr & PAGE_MASK)) &&
>           bytes >= bytes_per_rep )
>      {
>          sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
> @@ -1200,7 +1200,7 @@ static int hvmemul_rep_movs(
> 
>      bytes = PAGE_SIZE - (daddr & ~PAGE_MASK);
>      if ( vio->mmio_access.write_access &&
> -         (vio->mmio_gva == (daddr & PAGE_MASK)) &&
> +         (vio->mmio_gla == (daddr & PAGE_MASK)) &&
>           bytes >= bytes_per_rep )
>      {
>          dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
> @@ -1320,7 +1320,7 @@ static int hvmemul_rep_stos(
> 
>      bytes = PAGE_SIZE - (addr & ~PAGE_MASK);
>      if ( vio->mmio_access.write_access &&
> -         (vio->mmio_gva == (addr & PAGE_MASK)) &&
> +         (vio->mmio_gla == (addr & PAGE_MASK)) &&
>           bytes >= bytes_per_rep )
>      {
>          gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -114,7 +114,7 @@ int handle_mmio(void)
>      return 1;
>  }
> 
> -int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
> +int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
>                                   struct npfec access)
>  {
>      struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
> @@ -122,7 +122,7 @@ int handle_mmio_with_translation(unsigne
>      vio->mmio_access = access.gla_valid &&
>                         access.kind == npfec_kind_with_gla
>                         ? access : (struct npfec){};
> -    vio->mmio_gva = gva & PAGE_MASK;
> +    vio->mmio_gla = gla & PAGE_MASK;
>      vio->mmio_gpfn = gpfn;
>      return handle_mmio();
>  }
> --- a/xen/include/asm-x86/hvm/io.h
> +++ b/xen/include/asm-x86/hvm/io.h
> @@ -119,7 +119,7 @@ void relocate_portio_handler(
>  void send_timeoffset_req(unsigned long timeoff);
>  void send_invalidate_req(void);
>  int handle_mmio(void);
> -int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
> +int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
>                                   struct npfec);
>  int handle_pio(uint16_t port, unsigned int size, int dir);
>  void hvm_interrupt_post(struct vcpu *v, int vector, int type);
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -60,12 +60,12 @@ struct hvm_vcpu_io {
> 
>      /*
>       * HVM emulation:
> -     *  Virtual address @mmio_gva maps to MMIO physical frame
> @mmio_gpfn.
> +     *  Linear address @mmio_gla maps to MMIO physical frame
> @mmio_gpfn.
>       *  The latter is known to be an MMIO frame (not RAM).
>       *  This translation is only valid for accesses as per @mmio_access.
>       */
>      struct npfec        mmio_access;
> -    unsigned long       mmio_gva;
> +    unsigned long       mmio_gla;
>      unsigned long       mmio_gpfn;
> 
>      /*
>
diff mbox

Patch

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -684,7 +684,7 @@  static void latch_linear_to_phys(struct
     if ( vio->mmio_access.gla_valid )
         return;
 
-    vio->mmio_gva = gla & PAGE_MASK;
+    vio->mmio_gla = gla & PAGE_MASK;
     vio->mmio_gpfn = PFN_DOWN(gpa);
     vio->mmio_access = (struct npfec){ .gla_valid = 1,
                                        .read_access = 1,
@@ -782,7 +782,7 @@  static int __hvmemul_read(
     if ( ((access_type != hvm_access_insn_fetch
            ? vio->mmio_access.read_access
            : vio->mmio_access.insn_fetch)) &&
-         (vio->mmio_gva == (addr & PAGE_MASK)) )
+         (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
     if ( (seg != x86_seg_none) &&
@@ -889,7 +889,7 @@  static int hvmemul_write(
         return rc;
 
     if ( vio->mmio_access.write_access &&
-         (vio->mmio_gva == (addr & PAGE_MASK)) )
+         (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
     if ( (seg != x86_seg_none) &&
@@ -1181,7 +1181,7 @@  static int hvmemul_rep_movs(
 
     bytes = PAGE_SIZE - (saddr & ~PAGE_MASK);
     if ( vio->mmio_access.read_access &&
-         (vio->mmio_gva == (saddr & PAGE_MASK)) &&
+         (vio->mmio_gla == (saddr & PAGE_MASK)) &&
          bytes >= bytes_per_rep )
     {
         sgpa = pfn_to_paddr(vio->mmio_gpfn) | (saddr & ~PAGE_MASK);
@@ -1200,7 +1200,7 @@  static int hvmemul_rep_movs(
 
     bytes = PAGE_SIZE - (daddr & ~PAGE_MASK);
     if ( vio->mmio_access.write_access &&
-         (vio->mmio_gva == (daddr & PAGE_MASK)) &&
+         (vio->mmio_gla == (daddr & PAGE_MASK)) &&
          bytes >= bytes_per_rep )
     {
         dgpa = pfn_to_paddr(vio->mmio_gpfn) | (daddr & ~PAGE_MASK);
@@ -1320,7 +1320,7 @@  static int hvmemul_rep_stos(
 
     bytes = PAGE_SIZE - (addr & ~PAGE_MASK);
     if ( vio->mmio_access.write_access &&
-         (vio->mmio_gva == (addr & PAGE_MASK)) &&
+         (vio->mmio_gla == (addr & PAGE_MASK)) &&
          bytes >= bytes_per_rep )
     {
         gpa = pfn_to_paddr(vio->mmio_gpfn) | (addr & ~PAGE_MASK);
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -114,7 +114,7 @@  int handle_mmio(void)
     return 1;
 }
 
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
                                  struct npfec access)
 {
     struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
@@ -122,7 +122,7 @@  int handle_mmio_with_translation(unsigne
     vio->mmio_access = access.gla_valid &&
                        access.kind == npfec_kind_with_gla
                        ? access : (struct npfec){};
-    vio->mmio_gva = gva & PAGE_MASK;
+    vio->mmio_gla = gla & PAGE_MASK;
     vio->mmio_gpfn = gpfn;
     return handle_mmio();
 }
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -119,7 +119,7 @@  void relocate_portio_handler(
 void send_timeoffset_req(unsigned long timeoff);
 void send_invalidate_req(void);
 int handle_mmio(void);
-int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
+int handle_mmio_with_translation(unsigned long gla, unsigned long gpfn,
                                  struct npfec);
 int handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -60,12 +60,12 @@  struct hvm_vcpu_io {
 
     /*
      * HVM emulation:
-     *  Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
+     *  Linear address @mmio_gla maps to MMIO physical frame @mmio_gpfn.
      *  The latter is known to be an MMIO frame (not RAM).
      *  This translation is only valid for accesses as per @mmio_access.
      */
     struct npfec        mmio_access;
-    unsigned long       mmio_gva;
+    unsigned long       mmio_gla;
     unsigned long       mmio_gpfn;
 
     /*