diff mbox series

[RFC,60/84] x86/domain_page: use PMAP when d/vcache is not ready.

Message ID 4b8e2d635bedc8a51281cac0eb93b6761c1eec5c.1569489002.git.hongyax@amazon.com (mailing list archive)
State New, archived
Headers show
Series Remove direct map from Xen | expand

Commit Message

Xia, Hongyan Sept. 26, 2019, 9:46 a.m. UTC
From: Hongyan Xia <hongyax@amazon.com>

Also fix a place where unmap_domain_page should only be conditionally
used.

Signed-off-by: Hongyan Xia <hongyax@amazon.com>
---
 xen/arch/x86/domain_page.c | 27 ++++++++++++++++++++++++---
 xen/arch/x86/mm.c          |  3 ++-
 2 files changed, 26 insertions(+), 4 deletions(-)

Comments

Wei Liu Sept. 26, 2019, 1:30 p.m. UTC | #1
On Thu, Sep 26, 2019 at 10:46:23AM +0100, hongyax@amazon.com wrote:
> From: Hongyan Xia <hongyax@amazon.com>
> 
> Also fix a place where unmap_domain_page should only be conditionally
> used.
> 
> Signed-off-by: Hongyan Xia <hongyax@amazon.com>
> ---
>  xen/arch/x86/domain_page.c | 27 ++++++++++++++++++++++++---
>  xen/arch/x86/mm.c          |  3 ++-
>  2 files changed, 26 insertions(+), 4 deletions(-)
> 
> diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
> index 9ea74b456c..bece9d8cd0 100644
> --- a/xen/arch/x86/domain_page.c
> +++ b/xen/arch/x86/domain_page.c
> @@ -17,6 +17,8 @@
>  #include <asm/flushtlb.h>
>  #include <asm/hardirq.h>
>  #include <asm/setup.h>
> +#include <asm/pmap.h>
> +#include <asm/fixmap.h>
>  
>  static DEFINE_PER_CPU(struct vcpu *, override);
>  
> @@ -83,12 +85,26 @@ void *map_domain_page(mfn_t mfn)
>  
>      v = mapcache_current_vcpu();
>      if ( !v || !is_pv_vcpu(v) )
> -        return mfn_to_virt(mfn_x(mfn));
> +    {
> +        void *ret;
> +        pmap_lock();
> +        ret = pmap_map(mfn);
> +        pmap_unlock();
> +        flush_tlb_one_local(ret);

Oh this is a side effect of manipulating PTEs directly, right? I would
prefer you put the flush into pmap_map. Its caller shouldn't need to
flush the VA.

If you do that, please do it in the commit that introduces PMAP as well.

I will need more time to understand the overall design in this series to
make further comments.

> +        return ret;
> +    }
>  
>      dcache = &v->domain->arch.pv.mapcache;
>      vcache = &v->arch.pv.mapcache;
>      if ( !dcache->inuse )
> -        return mfn_to_virt(mfn_x(mfn));
> +    {
> +        void *ret;
> +        pmap_lock();
> +        ret = pmap_map(mfn);
> +        pmap_unlock();
> +        flush_tlb_one_local(ret);
> +        return ret;
> +    }
>  
>      perfc_incr(map_domain_page_count);
>  
> @@ -181,8 +197,13 @@ void unmap_domain_page(const void *ptr)
>      unsigned long va = (unsigned long)ptr, mfn, flags;
>      struct vcpu_maphash_entry *hashent;
>  
> -    if ( va >= DIRECTMAP_VIRT_START )
> +    if ( va >= FIXADDR_START && va < FIXADDR_TOP )
> +    {
> +        pmap_lock();
> +        pmap_unmap((void *)ptr);
> +        pmap_unlock();
>          return;
> +    }
>  
>      ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
>  
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 145c5ab47c..9619182f52 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5949,7 +5949,8 @@ int create_perdomain_mapping(struct domain *d, unsigned long va,
>          if ( rc || !nr || !l1_table_offset(va) )
>          {
>              /* Note that this is a no-op for the alloc_xenheap_page() case. */
> -            unmap_domain_page(l1tab);
> +            if( (unsigned long)l1tab < DIRECTMAP_VIRT_START )
> +                unmap_domain_page(l1tab);

If this is a fix to one of my previous patches, please split the change
out and merge it there.

And then, call out the bug fix in the change log for that patch.

Wei.

>              l1tab = NULL;
>          }
>      }
> -- 
> 2.17.1
>
diff mbox series

Patch

diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 9ea74b456c..bece9d8cd0 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -17,6 +17,8 @@ 
 #include <asm/flushtlb.h>
 #include <asm/hardirq.h>
 #include <asm/setup.h>
+#include <asm/pmap.h>
+#include <asm/fixmap.h>
 
 static DEFINE_PER_CPU(struct vcpu *, override);
 
@@ -83,12 +85,26 @@  void *map_domain_page(mfn_t mfn)
 
     v = mapcache_current_vcpu();
     if ( !v || !is_pv_vcpu(v) )
-        return mfn_to_virt(mfn_x(mfn));
+    {
+        void *ret;
+        pmap_lock();
+        ret = pmap_map(mfn);
+        pmap_unlock();
+        flush_tlb_one_local(ret);
+        return ret;
+    }
 
     dcache = &v->domain->arch.pv.mapcache;
     vcache = &v->arch.pv.mapcache;
     if ( !dcache->inuse )
-        return mfn_to_virt(mfn_x(mfn));
+    {
+        void *ret;
+        pmap_lock();
+        ret = pmap_map(mfn);
+        pmap_unlock();
+        flush_tlb_one_local(ret);
+        return ret;
+    }
 
     perfc_incr(map_domain_page_count);
 
@@ -181,8 +197,13 @@  void unmap_domain_page(const void *ptr)
     unsigned long va = (unsigned long)ptr, mfn, flags;
     struct vcpu_maphash_entry *hashent;
 
-    if ( va >= DIRECTMAP_VIRT_START )
+    if ( va >= FIXADDR_START && va < FIXADDR_TOP )
+    {
+        pmap_lock();
+        pmap_unmap((void *)ptr);
+        pmap_unlock();
         return;
+    }
 
     ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 145c5ab47c..9619182f52 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5949,7 +5949,8 @@  int create_perdomain_mapping(struct domain *d, unsigned long va,
         if ( rc || !nr || !l1_table_offset(va) )
         {
             /* Note that this is a no-op for the alloc_xenheap_page() case. */
-            unmap_domain_page(l1tab);
+            if( (unsigned long)l1tab < DIRECTMAP_VIRT_START )
+                unmap_domain_page(l1tab);
             l1tab = NULL;
         }
     }