diff mbox series

[RFC,65/84] x86: fix some wrong assumptions on direct map. Increase PMAP slots to 8.

Message ID 0aa271bfe3670f7058128c728d392faa69418a49.1569489002.git.hongyax@amazon.com (mailing list archive)
State New, archived
Headers show
Series Remove direct map from Xen | expand

Commit Message

Xia, Hongyan Sept. 26, 2019, 9:46 a.m. UTC
From: Hongyan Xia <hongyax@amazon.com>

Signed-off-by: Hongyan Xia <hongyax@amazon.com>
---
 xen/arch/x86/domain_page.c | 8 --------
 xen/arch/x86/x86_64/mm.c   | 3 ++-
 xen/include/asm-x86/pmap.h | 4 ++--
 3 files changed, 4 insertions(+), 11 deletions(-)

Comments

Wei Liu Sept. 26, 2019, 2:08 p.m. UTC | #1
On Thu, Sep 26, 2019 at 10:46:28AM +0100, hongyax@amazon.com wrote:
> From: Hongyan Xia <hongyax@amazon.com>
> 
> Signed-off-by: Hongyan Xia <hongyax@amazon.com>
> ---
>  xen/arch/x86/domain_page.c | 8 --------
>  xen/arch/x86/x86_64/mm.c   | 3 ++-
>  xen/include/asm-x86/pmap.h | 4 ++--
>  3 files changed, 4 insertions(+), 11 deletions(-)
> 
> diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
> index 4a3995ccef..f4f53a2a33 100644
> --- a/xen/arch/x86/domain_page.c
> +++ b/xen/arch/x86/domain_page.c
> @@ -328,11 +328,6 @@ void *map_domain_page_global(mfn_t mfn)
>               system_state < SYS_STATE_active) ||
>              local_irq_is_enabled()));
>  
> -#ifdef NDEBUG
> -    if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
> -        return mfn_to_virt(mfn_x(mfn));
> -#endif
> -

I wouldn't call this a wrong assumption.

This path is a fast path. The problem is it is not longer applicable in
the new world.

I would change the commit message to something like:

   Drop fast paths in map_domain_page_global API pair, because Xen will
   no longer have a direct map.

>      return vmap(&mfn, 1);
>  }
>  
> @@ -340,9 +335,6 @@ void unmap_domain_page_global(const void *ptr)
>  {
>      unsigned long va = (unsigned long)ptr;
>  
> -    if ( va >= DIRECTMAP_VIRT_START )
> -        return;
> -
>      ASSERT(va >= VMAP_VIRT_START && va < VMAP_VIRT_END);
>  
>      vunmap(ptr);
> diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
> index 37e8d59e5d..40f29f8ddc 100644
> --- a/xen/arch/x86/x86_64/mm.c
> +++ b/xen/arch/x86/x86_64/mm.c
> @@ -712,7 +712,8 @@ void __init paging_init(void)
>      if ( mfn_eq(l2_ro_mpt_mfn, INVALID_MFN) )
>          goto nomem;
>      l2_ro_mpt = map_xen_pagetable(l2_ro_mpt_mfn);
> -    compat_idle_pg_table_l2 = l2_ro_mpt;
> +    /* compat_idle_pg_table_l2 is used globally. */
> +    compat_idle_pg_table_l2 = map_domain_page_global(l2_ro_mpt_mfn);

Again, if this is a bug in a previous patch, it should be fixed there.

>      clear_page(l2_ro_mpt);
>      l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
>                l3e_from_mfn(l2_ro_mpt_mfn, __PAGE_HYPERVISOR_RO));
> diff --git a/xen/include/asm-x86/pmap.h b/xen/include/asm-x86/pmap.h
> index feab1e9170..34d4f2bb38 100644
> --- a/xen/include/asm-x86/pmap.h
> +++ b/xen/include/asm-x86/pmap.h
> @@ -1,8 +1,8 @@
>  #ifndef __X86_PMAP_H__
>  #define __X86_PMAP_H__
>  
> -/* Large enough for mapping 5 levels of page tables */
> -#define NUM_FIX_PMAP 5
> +/* Large enough for mapping 5 levels of page tables with some headroom */
> +#define NUM_FIX_PMAP 8
>  

This looks rather arbitrary to me. Can you specify why extra slots are
needed? The original justification for 5 is for page tables. Now
obviously it is used for more than just mapping page tables.  I'm
worried that even 8 may not be enough. 

Also, this change should either be in a separate patch or folded into
PMAP patch itself.

Wei.

>  void pmap_lock(void);
>  void pmap_unlock(void);
> -- 
> 2.17.1
>
diff mbox series

Patch

diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 4a3995ccef..f4f53a2a33 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -328,11 +328,6 @@  void *map_domain_page_global(mfn_t mfn)
              system_state < SYS_STATE_active) ||
             local_irq_is_enabled()));
 
-#ifdef NDEBUG
-    if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
-        return mfn_to_virt(mfn_x(mfn));
-#endif
-
     return vmap(&mfn, 1);
 }
 
@@ -340,9 +335,6 @@  void unmap_domain_page_global(const void *ptr)
 {
     unsigned long va = (unsigned long)ptr;
 
-    if ( va >= DIRECTMAP_VIRT_START )
-        return;
-
     ASSERT(va >= VMAP_VIRT_START && va < VMAP_VIRT_END);
 
     vunmap(ptr);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 37e8d59e5d..40f29f8ddc 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -712,7 +712,8 @@  void __init paging_init(void)
     if ( mfn_eq(l2_ro_mpt_mfn, INVALID_MFN) )
         goto nomem;
     l2_ro_mpt = map_xen_pagetable(l2_ro_mpt_mfn);
-    compat_idle_pg_table_l2 = l2_ro_mpt;
+    /* compat_idle_pg_table_l2 is used globally. */
+    compat_idle_pg_table_l2 = map_domain_page_global(l2_ro_mpt_mfn);
     clear_page(l2_ro_mpt);
     l3e_write(&l3_ro_mpt[l3_table_offset(HIRO_COMPAT_MPT_VIRT_START)],
               l3e_from_mfn(l2_ro_mpt_mfn, __PAGE_HYPERVISOR_RO));
diff --git a/xen/include/asm-x86/pmap.h b/xen/include/asm-x86/pmap.h
index feab1e9170..34d4f2bb38 100644
--- a/xen/include/asm-x86/pmap.h
+++ b/xen/include/asm-x86/pmap.h
@@ -1,8 +1,8 @@ 
 #ifndef __X86_PMAP_H__
 #define __X86_PMAP_H__
 
-/* Large enough for mapping 5 levels of page tables */
-#define NUM_FIX_PMAP 5
+/* Large enough for mapping 5 levels of page tables with some headroom */
+#define NUM_FIX_PMAP 8
 
 void pmap_lock(void);
 void pmap_unlock(void);