diff mbox series

[v2,03/55] x86: introduce a new set of APIs to manage Xen page tables

Message ID cb0923895bbcccf00bac8592f692cdca540f4258.1569833766.git.hongyax@amazon.com (mailing list archive)
State New, archived
Headers show
Series Switch to domheap for Xen PTEs | expand

Commit Message

Xia, Hongyan Sept. 30, 2019, 10:32 a.m. UTC
From: Wei Liu <wei.liu2@citrix.com>

We are going to switch to using domheap page for page tables.
A new set of APIs is introduced to allocate, map, unmap and free pages
for page tables.

The allocation and deallocation work on mfn_t but not page_info,
because they are required to work even before frame table is set up.

Implement the old functions with the new ones. We will rewrite, site
by site, other mm functions that manipulate page tables to use the new
APIs.

Note these new APIs still use xenheap page underneath and no actual
map and unmap is done so that we don't break xen half way. They will
be switched to use domheap and dynamic mappings when usage of old APIs
is eliminated.

No functional change intended in this patch.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/mm.c        | 39 ++++++++++++++++++++++++++++++++++-----
 xen/include/asm-x86/mm.h | 11 +++++++++++
 2 files changed, 45 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e90c8a63a6..e2c8c3f3a1 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -119,6 +119,7 @@ 
 #include <xen/efi.h>
 #include <xen/grant_table.h>
 #include <xen/hypercall.h>
+#include <xen/mm.h>
 #include <asm/paging.h>
 #include <asm/shadow.h>
 #include <asm/page.h>
@@ -4847,22 +4848,50 @@  int mmcfg_intercept_write(
 }
 
 void *alloc_xen_pagetable(void)
+{
+    mfn_t mfn;
+
+    mfn = alloc_xen_pagetable_new();
+    ASSERT(!mfn_eq(mfn, INVALID_MFN));
+
+    return map_xen_pagetable_new(mfn);
+}
+
+void free_xen_pagetable(void *v)
+{
+    if ( system_state != SYS_STATE_early_boot )
+        free_xen_pagetable_new(virt_to_mfn(v));
+}
+
+mfn_t alloc_xen_pagetable_new(void)
 {
     if ( system_state != SYS_STATE_early_boot )
     {
         void *ptr = alloc_xenheap_page();
 
         BUG_ON(!hardware_domain && !ptr);
-        return ptr;
+        return virt_to_mfn(ptr);
     }
 
-    return mfn_to_virt(mfn_x(alloc_boot_pages(1, 1)));
+    return alloc_boot_pages(1, 1);
 }
 
-void free_xen_pagetable(void *v)
+void *map_xen_pagetable_new(mfn_t mfn)
 {
-    if ( system_state != SYS_STATE_early_boot )
-        free_xenheap_page(v);
+    return mfn_to_virt(mfn_x(mfn));
+}
+
+/* v can point to an entry within a table or be NULL */
+void unmap_xen_pagetable_new(void *v)
+{
+    /* XXX still using xenheap page, no need to do anything.  */
+}
+
+/* mfn can be INVALID_MFN */
+void free_xen_pagetable_new(mfn_t mfn)
+{
+    if ( system_state != SYS_STATE_early_boot && !mfn_eq(mfn, INVALID_MFN) )
+        free_xenheap_page(mfn_to_virt(mfn_x(mfn)));
 }
 
 static DEFINE_SPINLOCK(map_pgdir_lock);
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 2800106327..80173eb4c3 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -633,6 +633,17 @@  int arch_acquire_resource(struct domain *d, unsigned int type,
 /* Allocator functions for Xen pagetables. */
 void *alloc_xen_pagetable(void);
 void free_xen_pagetable(void *v);
+mfn_t alloc_xen_pagetable_new(void);
+void *map_xen_pagetable_new(mfn_t mfn);
+void unmap_xen_pagetable_new(void *v);
+void free_xen_pagetable_new(mfn_t mfn);
+
+#define UNMAP_XEN_PAGETABLE_NEW(ptr)    \
+    do {                                \
+        unmap_xen_pagetable_new((ptr)); \
+        (ptr) = NULL;                   \
+    } while (0)
+
 l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
 
 #endif /* __ASM_X86_MM_H__ */