diff mbox

[v2,3/4] x86/hvm: Break out __hvm_copy()'s translation logic

Message ID 1504886736-1823-4-git-send-email-aisaila@bitdefender.com (mailing list archive)
State New, archived
Headers show

Commit Message

Alexandru Stefan ISAILA Sept. 8, 2017, 4:05 p.m. UTC
From: Andrew Cooper <andrew.cooper3@citrix.com>

It will be reused by later changes.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
CC: Jan Beulich <JBeulich@suse.com>

Changes since V1:
	- Changed param name
	- Added ASSERT
---
 xen/arch/x86/hvm/hvm.c            | 144 +++++++++++++++++++++++---------------
 xen/include/asm-x86/hvm/support.h |  12 ++++
 2 files changed, 98 insertions(+), 58 deletions(-)

Comments

Jan Beulich Sept. 18, 2017, 1:18 p.m. UTC | #1
>>> On 08.09.17 at 18:05, <aisaila@bitdefender.com> wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3069,6 +3069,83 @@ void hvm_task_switch(
>      hvm_unmap_entry(nptss_desc);
>  }
>  
> +enum hvm_translation_result hvm_translate_get_page(
> +    struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
> +    pagefault_info_t *pfinfo, struct page_info **page_p,
> +    gfn_t *gfn_p, p2m_type_t *p2mt_p)
> +{
> +    struct page_info *page;
> +    p2m_type_t p2mt;
> +    gfn_t gfn;
> +
> +    if ( linear )
> +    {
> +        gfn = _gfn(paging_gva_to_gfn(v, addr, &pfec));
> +
> +        if ( gfn_eq(gfn, INVALID_GFN) )
> +        {
> +            if ( pfec & PFEC_page_paged )
> +                return HVMTRANS_gfn_paged_out;
> +
> +            if ( pfec & PFEC_page_shared )
> +                return HVMTRANS_gfn_shared;
> +
> +            if ( pfinfo )
> +            {
> +                pfinfo->linear = addr;
> +                pfinfo->ec = pfec & ~PFEC_implicit;
> +            }
> +
> +            return HVMTRANS_bad_linear_to_gfn;
> +        }
> +    }
> +    else
> +    {
> +        gfn = _gfn(addr >> PAGE_SHIFT);

This wants to become gaddr_to_gfn(), now that we have it, and ...

> +        ASSERT(!pfinfo);
> +    }
> +
> +    /*
> +     * No need to do the P2M lookup for internally handled MMIO, benefiting
> +     * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
> +     * - newer Windows (like Server 2012) for HPET accesses.
> +     */
> +    if ( v == current
> +         && !nestedhvm_vcpu_in_guestmode(v)
> +         && hvm_mmio_internal(gfn_x(gfn) << PAGE_SHIFT) )

... this one gfn_to_gaddr().

> +        return HVMTRANS_bad_gfn_to_mfn;
> +
> +    page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE);
> +
> +    if ( !page )
> +        return HVMTRANS_bad_gfn_to_mfn;
> +
> +    if ( p2m_is_paging(p2mt) )
> +    {
> +        put_page(page);
> +        p2m_mem_paging_populate(v->domain, gfn_x(gfn));
> +        return HVMTRANS_gfn_paged_out;
> +    }
> +    if ( p2m_is_shared(p2mt) )
> +    {
> +        put_page(page);
> +        return HVMTRANS_gfn_shared;
> +    }
> +    if ( p2m_is_grant(p2mt) )
> +    {
> +        put_page(page);
> +        return HVMTRANS_unhandleable;
> +    }
> +
> +    *page_p = page;
> +    if ( gfn_p )
> +        *gfn_p = gfn;
> +    if ( p2mt_p )
> +        *p2mt_p = p2mt;

Considering the use in patch 4, is it really useful for p2mt_p to be an
optional output? I.e. do you foresee (in the near future) a case where
p2m_is_discard_write() doesn't need handling by the caller?

> @@ -103,6 +104,17 @@ enum hvm_translation_result hvm_fetch_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
>  
> +/*
> + * Get a reference on the page under an HVM physical or linear address.  If
> + * linear, a pagewalk is performed using pfec (fault details optionally in
> + * pfinfo).
> + * On success, returns HVMTRANS_okay with a reference taken on **_page.
> + */
> +enum hvm_translation_result hvm_translate_get_page(
> +    struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
> +    pagefault_info_t *pfinfo, struct page_info **page_p,
> +    gfn_t *gfn_p, p2m_type_t *p2mt_p);

The optional nature of whichever outputs we decide to be optional
should probably be called out in the comment, just like it is for pfinfo.

None of the points brought up would really require another version,
they could all easily be dealt with while committing. But of course we
first need to agree on at least this last one.

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 488acbf..a60149a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3069,6 +3069,83 @@  void hvm_task_switch(
     hvm_unmap_entry(nptss_desc);
 }
 
+enum hvm_translation_result hvm_translate_get_page(
+    struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
+    pagefault_info_t *pfinfo, struct page_info **page_p,
+    gfn_t *gfn_p, p2m_type_t *p2mt_p)
+{
+    struct page_info *page;
+    p2m_type_t p2mt;
+    gfn_t gfn;
+
+    if ( linear )
+    {
+        gfn = _gfn(paging_gva_to_gfn(v, addr, &pfec));
+
+        if ( gfn_eq(gfn, INVALID_GFN) )
+        {
+            if ( pfec & PFEC_page_paged )
+                return HVMTRANS_gfn_paged_out;
+
+            if ( pfec & PFEC_page_shared )
+                return HVMTRANS_gfn_shared;
+
+            if ( pfinfo )
+            {
+                pfinfo->linear = addr;
+                pfinfo->ec = pfec & ~PFEC_implicit;
+            }
+
+            return HVMTRANS_bad_linear_to_gfn;
+        }
+    }
+    else
+    {
+        gfn = _gfn(addr >> PAGE_SHIFT);
+        ASSERT(!pfinfo);
+    }
+
+    /*
+     * No need to do the P2M lookup for internally handled MMIO, benefiting
+     * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
+     * - newer Windows (like Server 2012) for HPET accesses.
+     */
+    if ( v == current
+         && !nestedhvm_vcpu_in_guestmode(v)
+         && hvm_mmio_internal(gfn_x(gfn) << PAGE_SHIFT) )
+        return HVMTRANS_bad_gfn_to_mfn;
+
+    page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE);
+
+    if ( !page )
+        return HVMTRANS_bad_gfn_to_mfn;
+
+    if ( p2m_is_paging(p2mt) )
+    {
+        put_page(page);
+        p2m_mem_paging_populate(v->domain, gfn_x(gfn));
+        return HVMTRANS_gfn_paged_out;
+    }
+    if ( p2m_is_shared(p2mt) )
+    {
+        put_page(page);
+        return HVMTRANS_gfn_shared;
+    }
+    if ( p2m_is_grant(p2mt) )
+    {
+        put_page(page);
+        return HVMTRANS_unhandleable;
+    }
+
+    *page_p = page;
+    if ( gfn_p )
+        *gfn_p = gfn;
+    if ( p2mt_p )
+        *p2mt_p = p2mt;
+
+    return HVMTRANS_okay;
+}
+
 #define HVMCOPY_from_guest (0u<<0)
 #define HVMCOPY_to_guest   (1u<<0)
 #define HVMCOPY_phys       (0u<<2)
@@ -3077,7 +3154,7 @@  static enum hvm_translation_result __hvm_copy(
     void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags,
     uint32_t pfec, pagefault_info_t *pfinfo)
 {
-    unsigned long gfn;
+    gfn_t gfn;
     struct page_info *page;
     p2m_type_t p2mt;
     char *p;
@@ -3103,65 +3180,15 @@  static enum hvm_translation_result __hvm_copy(
 
     while ( todo > 0 )
     {
+        enum hvm_translation_result res;
         paddr_t gpa = addr & ~PAGE_MASK;
 
         count = min_t(int, PAGE_SIZE - gpa, todo);
 
-        if ( flags & HVMCOPY_linear )
-        {
-            gfn = paging_gva_to_gfn(v, addr, &pfec);
-            if ( gfn == gfn_x(INVALID_GFN) )
-            {
-                if ( pfec & PFEC_page_paged )
-                    return HVMTRANS_gfn_paged_out;
-                if ( pfec & PFEC_page_shared )
-                    return HVMTRANS_gfn_shared;
-                if ( pfinfo )
-                {
-                    pfinfo->linear = addr;
-                    pfinfo->ec = pfec & ~PFEC_implicit;
-                }
-                return HVMTRANS_bad_linear_to_gfn;
-            }
-            gpa |= (paddr_t)gfn << PAGE_SHIFT;
-        }
-        else
-        {
-            gfn = addr >> PAGE_SHIFT;
-            gpa = addr;
-        }
-
-        /*
-         * No need to do the P2M lookup for internally handled MMIO, benefiting
-         * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
-         * - newer Windows (like Server 2012) for HPET accesses.
-         */
-        if ( v == current
-             && !nestedhvm_vcpu_in_guestmode(v)
-             && hvm_mmio_internal(gpa) )
-            return HVMTRANS_bad_gfn_to_mfn;
-
-        page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
-
-        if ( !page )
-            return HVMTRANS_bad_gfn_to_mfn;
-
-        if ( p2m_is_paging(p2mt) )
-        {
-            put_page(page);
-            p2m_mem_paging_populate(v->domain, gfn);
-            return HVMTRANS_gfn_paged_out;
-        }
-        if ( p2m_is_shared(p2mt) )
-        {
-            put_page(page);
-            return HVMTRANS_gfn_shared;
-        }
-        if ( p2m_is_grant(p2mt) )
-        {
-            put_page(page);
-            return HVMTRANS_unhandleable;
-        }
+        res = hvm_translate_get_page(v, addr, flags & HVMCOPY_linear,
+                                     pfec, pfinfo, &page, &gfn, &p2mt);
+        if ( res != HVMTRANS_okay )
+            return res;
 
         p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
 
@@ -3170,10 +3197,11 @@  static enum hvm_translation_result __hvm_copy(
             if ( p2m_is_discard_write(p2mt) )
             {
                 static unsigned long lastpage;
-                if ( xchg(&lastpage, gfn) != gfn )
+
+                if ( xchg(&lastpage, gfn_x(gfn)) != gfn_x(gfn) )
                     dprintk(XENLOG_G_DEBUG,
                             "%pv attempted write to read-only gfn %#lx (mfn=%#lx)\n",
-                            v, gfn, page_to_mfn(page));
+                            v, gfn_x(gfn), page_to_mfn(page));
             }
             else
             {
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index e3b035d..d784fc1 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -24,6 +24,7 @@ 
 #include <xen/sched.h>
 #include <asm/hvm/save.h>
 #include <asm/processor.h>
+#include <asm/p2m.h>
 
 #ifndef NDEBUG
 #define DBG_LEVEL_0                 (1 << 0)
@@ -103,6 +104,17 @@  enum hvm_translation_result hvm_fetch_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
 
+/*
+ * Get a reference on the page under an HVM physical or linear address.  If
+ * linear, a pagewalk is performed using pfec (fault details optionally in
+ * pfinfo).
+ * On success, returns HVMTRANS_okay with a reference taken on **_page.
+ */
+enum hvm_translation_result hvm_translate_get_page(
+    struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
+    pagefault_info_t *pfinfo, struct page_info **page_p,
+    gfn_t *gfn_p, p2m_type_t *p2mt_p);
+
 #define HVM_HCALL_completed  0 /* hypercall completed - no further action */
 #define HVM_HCALL_preempted  1 /* hypercall preempted - re-execute VMCALL */
 int hvm_hypercall(struct cpu_user_regs *regs);