diff mbox series

[RFC,80/84] x86/setup: Install dummy 1:1 mappings for all mem passed to allocators.

Message ID 969374c5a1c62eabb329694092d551b04d77b8be.1569489002.git.hongyax@amazon.com (mailing list archive)
State New, archived
Headers show
Series Remove direct map from Xen | expand

Commit Message

Xia, Hongyan Sept. 26, 2019, 9:46 a.m. UTC
From: Hongyan Xia <hongyax@amazon.com>

This means we no longer have an always-mapped direct map now.

Signed-off-by: Hongyan Xia <hongyax@amazon.com>
---
 xen/arch/x86/setup.c | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

Comments

Wei Liu Sept. 26, 2019, 3:47 p.m. UTC | #1
On Thu, Sep 26, 2019 at 10:46:43AM +0100, hongyax@amazon.com wrote:
> From: Hongyan Xia <hongyax@amazon.com>
> 
> This means we no longer have an always-mapped direct map now.

But why a dummy mapping is this needed at all? That's the same question
that was asked in a previous patch.

Wei.
diff mbox series

Patch

diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 9015e3b723..e6a3f1e0a1 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -240,7 +240,9 @@  void __init discard_initial_images(void)
         uint64_t start = (uint64_t)initial_images[i].mod_start << PAGE_SHIFT;
 
         init_domheap_pages(start,
-                           start + PAGE_ALIGN(initial_images[i].mod_end));
+                start + PAGE_ALIGN(initial_images[i].mod_end));
+        map_pages_to_xen((unsigned long)__va(start), INVALID_MFN,
+                PFN_DOWN(PAGE_ALIGN(initial_images[i].mod_end)), _PAGE_NONE);
     }
 
     nr_initial_images = 0;
@@ -1346,6 +1348,8 @@  void __init noreturn __start_xen(unsigned long mbi_p)
 
         /* Pass mapped memory to allocator /before/ creating new mappings. */
         init_boot_pages(s, min(map_s, e));
+        map_pages_to_xen((unsigned long)__va(s), INVALID_MFN,
+                         PFN_DOWN(min(map_s, e) - s), _PAGE_NONE);
         s = map_s;
         if ( s < map_e )
         {
@@ -1354,6 +1358,8 @@  void __init noreturn __start_xen(unsigned long mbi_p)
             map_s = (s + mask) & ~mask;
             map_e &= ~mask;
             init_boot_pages(map_s, map_e);
+            map_pages_to_xen((unsigned long)__va(map_s), INVALID_MFN,
+                             PFN_DOWN(map_e - map_s), _PAGE_NONE);
         }
 
         if ( map_s > map_e )
@@ -1367,9 +1373,9 @@  void __init noreturn __start_xen(unsigned long mbi_p)
 
             if ( map_e < end )
             {
+                init_boot_pages(map_e, end);
                 map_pages_to_xen((unsigned long)__va(map_e), INVALID_MFN,
                                  PFN_DOWN(end - map_e), _PAGE_NONE);
-                init_boot_pages(map_e, end);
                 map_e = end;
             }
         }
@@ -1382,9 +1388,9 @@  void __init noreturn __start_xen(unsigned long mbi_p)
         }
         if ( s < map_s )
         {
+            init_boot_pages(s, map_s);
             map_pages_to_xen((unsigned long)__va(s), INVALID_MFN,
                              PFN_DOWN(map_s - s), _PAGE_NONE);
-            init_boot_pages(s, map_s);
         }
     }
 
@@ -1506,6 +1512,8 @@  void __init noreturn __start_xen(unsigned long mbi_p)
             if ( PFN_DOWN(s) <= limit )
                 s = pfn_to_paddr(limit + 1);
             init_domheap_pages(s, e);
+            map_pages_to_xen((unsigned long)__va(s), INVALID_MFN,
+                             PFN_DOWN(e - s), _PAGE_NONE);
         }
     }
     else