diff mbox

[RFC,09/12] x86/np2m: improve nestedhvm_hap_nested_page_fault()

Message ID 20170718103429.25020-10-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli July 18, 2017, 10:34 a.m. UTC
There is a possibility for nested_p2m to became stale between
nestedhvm_hap_nested_page_fault() and nestedhap_fix_p2m(). Simply
use p2m_get_nestedp2m_lock() to guarantee that correct np2m is used.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
 xen/arch/x86/mm/hap/nested_hap.c | 29 +++++++++++------------------
 1 file changed, 11 insertions(+), 18 deletions(-)
diff mbox

Patch

diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index ed137fa784..96afe632b5 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -101,28 +101,21 @@  nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
                   unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
     int rc = 0;
+    unsigned long gfn, mask;
+    mfn_t mfn;
+
     ASSERT(p2m);
     ASSERT(p2m->set_entry);
+    ASSERT(p2m_locked_by_me(p2m));
 
-    p2m_lock(p2m);
-
-    /* If this p2m table has been flushed or recycled under our feet, 
-     * leave it alone.  We'll pick up the right one as we try to 
-     * vmenter the guest. */
-    if ( p2m->np2m_base == nhvm_vcpu_p2m_base(v) )
-    {
-        unsigned long gfn, mask;
-        mfn_t mfn;
-
-        /* If this is a superpage mapping, round down both addresses
-         * to the start of the superpage. */
-        mask = ~((1UL << page_order) - 1);
+    /* If this is a superpage mapping, round down both addresses
+     * to the start of the superpage. */
+    mask = ~((1UL << page_order) - 1);
 
-        gfn = (L2_gpa >> PAGE_SHIFT) & mask;
-        mfn = _mfn((L0_gpa >> PAGE_SHIFT) & mask);
+    gfn = (L2_gpa >> PAGE_SHIFT) & mask;
+    mfn = _mfn((L0_gpa >> PAGE_SHIFT) & mask);
 
-        rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
-    }
+    rc = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
 
     p2m_unlock(p2m);
 
@@ -212,7 +205,6 @@  nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
     uint8_t p2ma_21 = p2m_access_rwx;
 
     p2m = p2m_get_hostp2m(d); /* L0 p2m */
-    nested_p2m = p2m_get_nestedp2m(v);
 
     /* walk the L1 P2M table */
     rv = nestedhap_walk_L1_p2m(v, *L2_gpa, &L1_gpa, &page_order_21, &p2ma_21,
@@ -278,6 +270,7 @@  nestedhvm_hap_nested_page_fault(struct vcpu *v, paddr_t *L2_gpa,
     p2ma_10 &= (p2m_access_t)p2ma_21;
 
     /* fix p2m_get_pagetable(nested_p2m) */
+    nested_p2m = p2m_get_nestedp2m_locked(v);
     nestedhap_fix_p2m(v, nested_p2m, *L2_gpa, L0_gpa, page_order_20,
         p2mt_10, p2ma_10);