diff mbox

[v3,2/9] x86/np2m: flush all np2m objects on nested INVEPT

Message ID 20171003152104.1432-3-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli Oct. 3, 2017, 3:20 p.m. UTC
At the moment, nvmx_handle_invept() updates the current np2m just to
flush it.  Instead introduce a function, np2m_flush_base(), which will
look up the np2m base pointer and call p2m_flush_table() instead.

Unfortunately, since we don't know which p2m a given vcpu is using, we
must flush all p2ms that share that base pointer.

Convert p2m_flush_table() into p2m_flush_table_locked() in order not
to release the p2m_lock after np2m_base check.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
v2 --> v3:
- Commit message update
---
 xen/arch/x86/hvm/vmx/vvmx.c |  7 +------
 xen/arch/x86/mm/p2m.c       | 35 +++++++++++++++++++++++++++++------
 xen/include/asm-x86/p2m.h   |  2 ++
 3 files changed, 32 insertions(+), 12 deletions(-)

Comments

George Dunlap Oct. 4, 2017, 2:12 p.m. UTC | #1
On 10/03/2017 04:20 PM, Sergey Dyasli wrote:
> At the moment, nvmx_handle_invept() updates the current np2m just to
> flush it.  Instead introduce a function, np2m_flush_base(), which will
> look up the np2m base pointer and call p2m_flush_table() instead.
> 
> Unfortunately, since we don't know which p2m a given vcpu is using, we
> must flush all p2ms that share that base pointer.
> 
> Convert p2m_flush_table() into p2m_flush_table_locked() in order not
> to release the p2m_lock after np2m_base check.
> 
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
> ---
> v2 --> v3:
> - Commit message update
> ---
>  xen/arch/x86/hvm/vmx/vvmx.c |  7 +------
>  xen/arch/x86/mm/p2m.c       | 35 +++++++++++++++++++++++++++++------
>  xen/include/asm-x86/p2m.h   |  2 ++

This needs a VMX maintainer's Ack.

 -George
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index cd0ee0a307..d333aa6d78 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1910,12 +1910,7 @@  int nvmx_handle_invept(struct cpu_user_regs *regs)
     {
     case INVEPT_SINGLE_CONTEXT:
     {
-        struct p2m_domain *p2m = p2m_get_nestedp2m(current, eptp);
-        if ( p2m )
-        {
-            p2m_flush(current, p2m);
-            ept_sync_domain(p2m);
-        }
+        np2m_flush_base(current, eptp);
         break;
     }
     case INVEPT_ALL_CONTEXT:
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 27b90eb815..b7588b2ec1 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1711,15 +1711,14 @@  p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
     return p2m;
 }
 
-/* Reset this p2m table to be empty */
 static void
-p2m_flush_table(struct p2m_domain *p2m)
+p2m_flush_table_locked(struct p2m_domain *p2m)
 {
     struct page_info *top, *pg;
     struct domain *d = p2m->domain;
     mfn_t mfn;
 
-    p2m_lock(p2m);
+    ASSERT(p2m_locked_by_me(p2m));
 
     /*
      * "Host" p2m tables can have shared entries &c that need a bit more care
@@ -1732,10 +1731,7 @@  p2m_flush_table(struct p2m_domain *p2m)
 
     /* No need to flush if it's already empty */
     if ( p2m_is_nestedp2m(p2m) && p2m->np2m_base == P2M_BASE_EADDR )
-    {
-        p2m_unlock(p2m);
         return;
-    }
 
     /* This is no longer a valid nested p2m for any address space */
     p2m->np2m_base = P2M_BASE_EADDR;
@@ -1755,7 +1751,14 @@  p2m_flush_table(struct p2m_domain *p2m)
             d->arch.paging.free_page(d, pg);
     }
     page_list_add(top, &p2m->pages);
+}
 
+/* Reset this p2m table to be empty */
+static void
+p2m_flush_table(struct p2m_domain *p2m)
+{
+    p2m_lock(p2m);
+    p2m_flush_table_locked(p2m);
     p2m_unlock(p2m);
 }
 
@@ -1776,6 +1779,26 @@  p2m_flush_nestedp2m(struct domain *d)
         p2m_flush_table(d->arch.nested_p2m[i]);
 }
 
+void np2m_flush_base(struct vcpu *v, unsigned long np2m_base)
+{
+    struct domain *d = v->domain;
+    struct p2m_domain *p2m;
+    unsigned int i;
+
+    np2m_base &= ~(0xfffull);
+
+    nestedp2m_lock(d);
+    for ( i = 0; i < MAX_NESTEDP2M; i++ )
+    {
+        p2m = d->arch.nested_p2m[i];
+        p2m_lock(p2m);
+        if ( p2m->np2m_base == np2m_base )
+            p2m_flush_table_locked(p2m);
+        p2m_unlock(p2m);
+    }
+    nestedp2m_unlock(d);
+}
+
 static void assign_np2m(struct vcpu *v, struct p2m_domain *p2m)
 {
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 338317a782..ce50e37f46 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -772,6 +772,8 @@  int p2m_pt_handle_deferred_changes(uint64_t gpa);
 void p2m_flush(struct vcpu *v, struct p2m_domain *p2m);
 /* Flushes all nested p2m tables */
 void p2m_flush_nestedp2m(struct domain *d);
+/* Flushes all np2m objects with the specified np2m_base */
+void np2m_flush_base(struct vcpu *v, unsigned long np2m_base);
 
 void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
     l1_pgentry_t *p, l1_pgentry_t new, unsigned int level);