diff mbox

[v1,02/14] x86/np2m: add np2m_flush_base()

Message ID 20170904081452.12960-3-sergey.dyasli@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sergey Dyasli Sept. 4, 2017, 8:14 a.m. UTC
The new function finds all np2m objects with the specified np2m_base
and flushes them.

Convert p2m_flush_table() into p2m_flush_table_locked() in order not to
release the p2m_lock after np2m_base check.

Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
RFC --> v1:
- p2m_unlock(p2m) is moved from p2m_flush_table_locked() to
  p2m_flush_table() for balanced lock/unlock
- np2m_flush_eptp() is renamed to np2m_flush_base()

 xen/arch/x86/mm/p2m.c     | 35 +++++++++++++++++++++++++++++------
 xen/include/asm-x86/p2m.h |  2 ++
 2 files changed, 31 insertions(+), 6 deletions(-)

Comments

George Dunlap Sept. 28, 2017, 2:01 p.m. UTC | #1
On 09/04/2017 09:14 AM, Sergey Dyasli wrote:
> The new function finds all np2m objects with the specified np2m_base
> and flushes them.
> 
> Convert p2m_flush_table() into p2m_flush_table_locked() in order not to
> release the p2m_lock after np2m_base check.
> 
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>

Reviewed-by: George Dunlap <george.dunlap@citrix.com>

> ---
> RFC --> v1:
> - p2m_unlock(p2m) is moved from p2m_flush_table_locked() to
>   p2m_flush_table() for balanced lock/unlock
> - np2m_flush_eptp() is renamed to np2m_flush_base()
> 
>  xen/arch/x86/mm/p2m.c     | 35 +++++++++++++++++++++++++++++------
>  xen/include/asm-x86/p2m.h |  2 ++
>  2 files changed, 31 insertions(+), 6 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index b8c8bba421..94a42400ad 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1708,15 +1708,14 @@ p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
>      return p2m;
>  }
>  
> -/* Reset this p2m table to be empty */
>  static void
> -p2m_flush_table(struct p2m_domain *p2m)
> +p2m_flush_table_locked(struct p2m_domain *p2m)
>  {
>      struct page_info *top, *pg;
>      struct domain *d = p2m->domain;
>      mfn_t mfn;
>  
> -    p2m_lock(p2m);
> +    ASSERT(p2m_locked_by_me(p2m));
>  
>      /*
>       * "Host" p2m tables can have shared entries &c that need a bit more care
> @@ -1729,10 +1728,7 @@ p2m_flush_table(struct p2m_domain *p2m)
>  
>      /* No need to flush if it's already empty */
>      if ( p2m_is_nestedp2m(p2m) && p2m->np2m_base == P2M_BASE_EADDR )
> -    {
> -        p2m_unlock(p2m);
>          return;
> -    }
>  
>      /* This is no longer a valid nested p2m for any address space */
>      p2m->np2m_base = P2M_BASE_EADDR;
> @@ -1752,7 +1748,14 @@ p2m_flush_table(struct p2m_domain *p2m)
>              d->arch.paging.free_page(d, pg);
>      }
>      page_list_add(top, &p2m->pages);
> +}
>  
> +/* Reset this p2m table to be empty */
> +static void
> +p2m_flush_table(struct p2m_domain *p2m)
> +{
> +    p2m_lock(p2m);
> +    p2m_flush_table_locked(p2m);
>      p2m_unlock(p2m);
>  }
>  
> @@ -1773,6 +1776,26 @@ p2m_flush_nestedp2m(struct domain *d)
>          p2m_flush_table(d->arch.nested_p2m[i]);
>  }
>  
> +void np2m_flush_base(struct vcpu *v, unsigned long np2m_base)
> +{
> +    struct domain *d = v->domain;
> +    struct p2m_domain *p2m;
> +    unsigned int i;
> +
> +    np2m_base &= ~(0xfffull);
> +
> +    nestedp2m_lock(d);
> +    for ( i = 0; i < MAX_NESTEDP2M; i++ )
> +    {
> +        p2m = d->arch.nested_p2m[i];
> +        p2m_lock(p2m);
> +        if ( p2m->np2m_base == np2m_base )
> +            p2m_flush_table_locked(p2m);
> +        p2m_unlock(p2m);
> +    }
> +    nestedp2m_unlock(d);
> +}
> +
>  static void assign_np2m(struct vcpu *v, struct p2m_domain *p2m)
>  {
>      struct nestedvcpu *nv = &vcpu_nestedhvm(v);
> diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
> index 9086bb35dc..cfb00591cd 100644
> --- a/xen/include/asm-x86/p2m.h
> +++ b/xen/include/asm-x86/p2m.h
> @@ -779,6 +779,8 @@ int p2m_pt_handle_deferred_changes(uint64_t gpa);
>  void p2m_flush(struct vcpu *v, struct p2m_domain *p2m);
>  /* Flushes all nested p2m tables */
>  void p2m_flush_nestedp2m(struct domain *d);
> +/* Flushes all np2m objects with the specified np2m_base */
> +void np2m_flush_base(struct vcpu *v, unsigned long np2m_base);
>  
>  void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
>      l1_pgentry_t *p, l1_pgentry_t new, unsigned int level);
>
diff mbox

Patch

diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index b8c8bba421..94a42400ad 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1708,15 +1708,14 @@  p2m_getlru_nestedp2m(struct domain *d, struct p2m_domain *p2m)
     return p2m;
 }
 
-/* Reset this p2m table to be empty */
 static void
-p2m_flush_table(struct p2m_domain *p2m)
+p2m_flush_table_locked(struct p2m_domain *p2m)
 {
     struct page_info *top, *pg;
     struct domain *d = p2m->domain;
     mfn_t mfn;
 
-    p2m_lock(p2m);
+    ASSERT(p2m_locked_by_me(p2m));
 
     /*
      * "Host" p2m tables can have shared entries &c that need a bit more care
@@ -1729,10 +1728,7 @@  p2m_flush_table(struct p2m_domain *p2m)
 
     /* No need to flush if it's already empty */
     if ( p2m_is_nestedp2m(p2m) && p2m->np2m_base == P2M_BASE_EADDR )
-    {
-        p2m_unlock(p2m);
         return;
-    }
 
     /* This is no longer a valid nested p2m for any address space */
     p2m->np2m_base = P2M_BASE_EADDR;
@@ -1752,7 +1748,14 @@  p2m_flush_table(struct p2m_domain *p2m)
             d->arch.paging.free_page(d, pg);
     }
     page_list_add(top, &p2m->pages);
+}
 
+/* Reset this p2m table to be empty */
+static void
+p2m_flush_table(struct p2m_domain *p2m)
+{
+    p2m_lock(p2m);
+    p2m_flush_table_locked(p2m);
     p2m_unlock(p2m);
 }
 
@@ -1773,6 +1776,26 @@  p2m_flush_nestedp2m(struct domain *d)
         p2m_flush_table(d->arch.nested_p2m[i]);
 }
 
+void np2m_flush_base(struct vcpu *v, unsigned long np2m_base)
+{
+    struct domain *d = v->domain;
+    struct p2m_domain *p2m;
+    unsigned int i;
+
+    np2m_base &= ~(0xfffull);
+
+    nestedp2m_lock(d);
+    for ( i = 0; i < MAX_NESTEDP2M; i++ )
+    {
+        p2m = d->arch.nested_p2m[i];
+        p2m_lock(p2m);
+        if ( p2m->np2m_base == np2m_base )
+            p2m_flush_table_locked(p2m);
+        p2m_unlock(p2m);
+    }
+    nestedp2m_unlock(d);
+}
+
 static void assign_np2m(struct vcpu *v, struct p2m_domain *p2m)
 {
     struct nestedvcpu *nv = &vcpu_nestedhvm(v);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 9086bb35dc..cfb00591cd 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -779,6 +779,8 @@  int p2m_pt_handle_deferred_changes(uint64_t gpa);
 void p2m_flush(struct vcpu *v, struct p2m_domain *p2m);
 /* Flushes all nested p2m tables */
 void p2m_flush_nestedp2m(struct domain *d);
+/* Flushes all np2m objects with the specified np2m_base */
+void np2m_flush_base(struct vcpu *v, unsigned long np2m_base);
 
 void nestedp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
     l1_pgentry_t *p, l1_pgentry_t new, unsigned int level);