diff mbox series

[v4,1/2] x86/emulate: Move hvmemul_linear_to_phys

Message ID 20190520125454.14805-1-aisaila@bitdefender.com (mailing list archive)
State New, archived
Headers show
Series [v4,1/2] x86/emulate: Move hvmemul_linear_to_phys | expand

Commit Message

Alexandru Stefan ISAILA May 20, 2019, 12:55 p.m. UTC
Thiis is done so hvmemul_linear_to_phys() can be called from
hvmemul_send_vm_event().

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
---
 xen/arch/x86/hvm/emulate.c | 181 ++++++++++++++++++-------------------
 1 file changed, 90 insertions(+), 91 deletions(-)

Comments

Paul Durrant May 22, 2019, 1:13 p.m. UTC | #1
> -----Original Message-----
> From: Alexandru Stefan ISAILA [mailto:aisaila@bitdefender.com]
> Sent: 20 May 2019 13:55
> To: xen-devel@lists.xenproject.org
> Cc: Paul Durrant <Paul.Durrant@citrix.com>; jbeulich@suse.com; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Wei Liu <wei.liu2@citrix.com>; Roger Pau Monne <roger.pau@citrix.com>;
> rcojocaru@bitdefender.com; tamas@tklengyel.com; George Dunlap <George.Dunlap@citrix.com>; Alexandru
> Stefan ISAILA <aisaila@bitdefender.com>
> Subject: [PATCH v4 1/2] x86/emulate: Move hvmemul_linear_to_phys
> 
> Thiis is done so hvmemul_linear_to_phys() can be called from
> hvmemul_send_vm_event().
> 
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> ---
>  xen/arch/x86/hvm/emulate.c | 181 ++++++++++++++++++-------------------
>  1 file changed, 90 insertions(+), 91 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 8659c89862..254ff6515d 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -530,6 +530,95 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>      return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>  }
> 

I know this is code movement, but it would probably good to a do a bit of tidying...

> +/*
> + * Convert addr from linear to physical form, valid over the range
> + * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
> + * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
> + * @pfec indicates the access checks to be performed during page-table walks.
> + */
> +static int hvmemul_linear_to_phys(
> +    unsigned long addr,
> +    paddr_t *paddr,
> +    unsigned int bytes_per_rep,
> +    unsigned long *reps,
> +    uint32_t pfec,
> +    struct hvm_emulate_ctxt *hvmemul_ctxt)

This can all be re-flowed since arg-per-line is not really canonical style.

> +{
> +    struct vcpu *curr = current;
> +    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
> +    int reverse;

Looks like this should be bool.

> +
> +    /*
> +     * Clip repetitions to a sensible maximum. This avoids extensive looping in
> +     * this function while still amortising the cost of I/O trap-and-emulate.
> +     */
> +    *reps = min_t(unsigned long, *reps, 4096);
> +
> +    /* With no paging it's easy: linear == physical. */
> +    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
> +    {
> +        *paddr = addr;
> +        return X86EMUL_OKAY;
> +    }
> +
> +    /* Reverse mode if this is a backwards multi-iteration string operation. */
> +    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
> +
> +    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
> +    {
> +        /* Do page-straddling first iteration forwards via recursion. */
> +        paddr_t _paddr;
> +        unsigned long one_rep = 1;
> +        int rc = hvmemul_linear_to_phys(
> +            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);

Blank line here.

> +        if ( rc != X86EMUL_OKAY )
> +            return rc;
> +        pfn = _paddr >> PAGE_SHIFT;

paddr_to_pfn()

> +    }
> +    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
> +    {
> +        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
> +            return X86EMUL_RETRY;
> +        *reps = 0;
> +        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
> +        return X86EMUL_EXCEPTION;
> +    }
> +
> +    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
> +    todo = *reps * bytes_per_rep;
> +    for ( i = 1; done < todo; i++ )
> +    {
> +        /* Get the next PFN in the range. */
> +        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
> +        npfn = paging_gva_to_gfn(curr, addr, &pfec);
> +
> +        /* Is it contiguous with the preceding PFNs? If not then we're done. */
> +        if ( (npfn == gfn_x(INVALID_GFN)) ||
> +             (npfn != (pfn + (reverse ? -i : i))) )
> +        {
> +            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
> +                return X86EMUL_RETRY;
> +            done /= bytes_per_rep;
> +            if ( done == 0 )
> +            {
> +                ASSERT(!reverse);
> +                if ( npfn != gfn_x(INVALID_GFN) )
> +                    return X86EMUL_UNHANDLEABLE;
> +                *reps = 0;
> +                x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt->ctxt);
> +                return X86EMUL_EXCEPTION;
> +            }
> +            *reps = done;
> +            break;
> +        }
> +
> +        done += PAGE_SIZE;
> +    }
> +
> +    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;

pfn_to_paddr() and a blank line before the return.

> +    return X86EMUL_OKAY;
> +}
> +

Thanks,

  Paul
George Dunlap May 22, 2019, 1:55 p.m. UTC | #2
On 5/22/19 2:13 PM, Paul Durrant wrote:
>> -----Original Message-----
>> From: Alexandru Stefan ISAILA [mailto:aisaila@bitdefender.com]
>> Sent: 20 May 2019 13:55
>> To: xen-devel@lists.xenproject.org
>> Cc: Paul Durrant <Paul.Durrant@citrix.com>; jbeulich@suse.com; Andrew Cooper
>> <Andrew.Cooper3@citrix.com>; Wei Liu <wei.liu2@citrix.com>; Roger Pau Monne <roger.pau@citrix.com>;
>> rcojocaru@bitdefender.com; tamas@tklengyel.com; George Dunlap <George.Dunlap@citrix.com>; Alexandru
>> Stefan ISAILA <aisaila@bitdefender.com>
>> Subject: [PATCH v4 1/2] x86/emulate: Move hvmemul_linear_to_phys
>>
>> Thiis is done so hvmemul_linear_to_phys() can be called from
>> hvmemul_send_vm_event().
>>
>> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
>> ---
>>  xen/arch/x86/hvm/emulate.c | 181 ++++++++++++++++++-------------------
>>  1 file changed, 90 insertions(+), 91 deletions(-)
>>
>> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
>> index 8659c89862..254ff6515d 100644
>> --- a/xen/arch/x86/hvm/emulate.c
>> +++ b/xen/arch/x86/hvm/emulate.c
>> @@ -530,6 +530,95 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
>>      return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
>>  }
>>
> 
> I know this is code movement, but it would probably good to a do a bit of tidying...

I think there are different minds on this; I *generally* prefer pure
code movement to be with as few changes as possible, to make sure actual
changes are easy to call out.

The changes you've asked for are pretty minor (and you're the maintainer
of the file it's being moved to), so I won't argue about it in this
particular case.  Just want to counter the idea that move + change is
the norm. :-)

 -George
diff mbox series

Patch

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 8659c89862..254ff6515d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -530,6 +530,95 @@  static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
     return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
 }
 
+/*
+ * Convert addr from linear to physical form, valid over the range
+ * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
+ * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
+ * @pfec indicates the access checks to be performed during page-table walks.
+ */
+static int hvmemul_linear_to_phys(
+    unsigned long addr,
+    paddr_t *paddr,
+    unsigned int bytes_per_rep,
+    unsigned long *reps,
+    uint32_t pfec,
+    struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    struct vcpu *curr = current;
+    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
+    int reverse;
+
+    /*
+     * Clip repetitions to a sensible maximum. This avoids extensive looping in
+     * this function while still amortising the cost of I/O trap-and-emulate.
+     */
+    *reps = min_t(unsigned long, *reps, 4096);
+
+    /* With no paging it's easy: linear == physical. */
+    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
+    {
+        *paddr = addr;
+        return X86EMUL_OKAY;
+    }
+
+    /* Reverse mode if this is a backwards multi-iteration string operation. */
+    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
+
+    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
+    {
+        /* Do page-straddling first iteration forwards via recursion. */
+        paddr_t _paddr;
+        unsigned long one_rep = 1;
+        int rc = hvmemul_linear_to_phys(
+            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
+        if ( rc != X86EMUL_OKAY )
+            return rc;
+        pfn = _paddr >> PAGE_SHIFT;
+    }
+    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
+    {
+        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
+            return X86EMUL_RETRY;
+        *reps = 0;
+        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
+        return X86EMUL_EXCEPTION;
+    }
+
+    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
+    todo = *reps * bytes_per_rep;
+    for ( i = 1; done < todo; i++ )
+    {
+        /* Get the next PFN in the range. */
+        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
+        npfn = paging_gva_to_gfn(curr, addr, &pfec);
+
+        /* Is it contiguous with the preceding PFNs? If not then we're done. */
+        if ( (npfn == gfn_x(INVALID_GFN)) ||
+             (npfn != (pfn + (reverse ? -i : i))) )
+        {
+            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
+                return X86EMUL_RETRY;
+            done /= bytes_per_rep;
+            if ( done == 0 )
+            {
+                ASSERT(!reverse);
+                if ( npfn != gfn_x(INVALID_GFN) )
+                    return X86EMUL_UNHANDLEABLE;
+                *reps = 0;
+                x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt->ctxt);
+                return X86EMUL_EXCEPTION;
+            }
+            *reps = done;
+            break;
+        }
+
+        done += PAGE_SIZE;
+    }
+
+    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
+    return X86EMUL_OKAY;
+}
+
 /*
  * Map the frame(s) covering an individual linear access, for writeable
  * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
@@ -692,97 +781,7 @@  static void hvmemul_unmap_linear_addr(
         *mfn++ = _mfn(0);
     }
 #endif
-}
-
-/*
- * Convert addr from linear to physical form, valid over the range
- * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
- * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
- * @pfec indicates the access checks to be performed during page-table walks.
- */
-static int hvmemul_linear_to_phys(
-    unsigned long addr,
-    paddr_t *paddr,
-    unsigned int bytes_per_rep,
-    unsigned long *reps,
-    uint32_t pfec,
-    struct hvm_emulate_ctxt *hvmemul_ctxt)
-{
-    struct vcpu *curr = current;
-    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
-    int reverse;
-
-    /*
-     * Clip repetitions to a sensible maximum. This avoids extensive looping in
-     * this function while still amortising the cost of I/O trap-and-emulate.
-     */
-    *reps = min_t(unsigned long, *reps, 4096);
-
-    /* With no paging it's easy: linear == physical. */
-    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
-    {
-        *paddr = addr;
-        return X86EMUL_OKAY;
-    }
-
-    /* Reverse mode if this is a backwards multi-iteration string operation. */
-    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
-
-    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
-    {
-        /* Do page-straddling first iteration forwards via recursion. */
-        paddr_t _paddr;
-        unsigned long one_rep = 1;
-        int rc = hvmemul_linear_to_phys(
-            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
-        if ( rc != X86EMUL_OKAY )
-            return rc;
-        pfn = _paddr >> PAGE_SHIFT;
-    }
-    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
-    {
-        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
-            return X86EMUL_RETRY;
-        *reps = 0;
-        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
-        return X86EMUL_EXCEPTION;
-    }
-
-    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
-    todo = *reps * bytes_per_rep;
-    for ( i = 1; done < todo; i++ )
-    {
-        /* Get the next PFN in the range. */
-        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
-        npfn = paging_gva_to_gfn(curr, addr, &pfec);
-
-        /* Is it contiguous with the preceding PFNs? If not then we're done. */
-        if ( (npfn == gfn_x(INVALID_GFN)) ||
-             (npfn != (pfn + (reverse ? -i : i))) )
-        {
-            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
-                return X86EMUL_RETRY;
-            done /= bytes_per_rep;
-            if ( done == 0 )
-            {
-                ASSERT(!reverse);
-                if ( npfn != gfn_x(INVALID_GFN) )
-                    return X86EMUL_UNHANDLEABLE;
-                *reps = 0;
-                x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt->ctxt);
-                return X86EMUL_EXCEPTION;
-            }
-            *reps = done;
-            break;
-        }
-
-        done += PAGE_SIZE;
-    }
-
-    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
-    return X86EMUL_OKAY;
-}
-    
+}  
 
 static int hvmemul_virtual_to_linear(
     enum x86_segment seg,