diff mbox series

[v5,08/15] xen/page_alloc: Add a path for xenheap when there is no direct map

Message ID 20250108151822.16030-9-alejandro.vallejo@cloud.com (mailing list archive)
State New
Headers show
Series Remove the directmap | expand

Commit Message

Alejandro Vallejo Jan. 8, 2025, 3:18 p.m. UTC
From: Hongyan Xia <hongyxia@amazon.com>

When there is not an always-mapped direct map, xenheap allocations need
to be mapped and unmapped on-demand.

Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
Signed-off-by: Julien Grall <jgrall@amazon.com>
Signed-off-by: Elias El Yandouzi <eliasely@amazon.com>
Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com>
---
v4->v5:
  * Remove stray comma in printk() after XENLOG_WARNING.

Elias @ v4:
  I have left the call to map_pages_to_xen() and destroy_xen_mappings()
  in the split heap for now. I am not entirely convinced this is
necessary
  because in that setup only the xenheap would be always mapped and
  this doesn't contain any guest memory (aside the grant-table).
  So map/unmapping for every allocation seems unnecessary.

v3->v4:
  * Call printk instead of dprintk()

v1->v2:
  * Fix remaining wrong indentation in alloc_xenheap_pages()

Changes since Hongyan's version:
  * Rebase
  * Fix indentation in alloc_xenheap_pages()
  * Fix build for arm32
---
 xen/common/page_alloc.c | 43 +++++++++++++++++++++++++++++++++++++++--
 1 file changed, 41 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 1bf070c8c5df..1c01332b6cb0 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2435,6 +2435,7 @@  void init_xenheap_pages(paddr_t ps, paddr_t pe)
 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
 {
     struct page_info *pg;
+    void *virt_addr;
 
     ASSERT_ALLOC_CONTEXT();
 
@@ -2443,17 +2444,36 @@  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
     if ( unlikely(pg == NULL) )
         return NULL;
 
-    return page_to_virt(pg);
+    virt_addr = page_to_virt(pg);
+
+    if ( !has_directmap() &&
+         map_pages_to_xen((unsigned long)virt_addr, page_to_mfn(pg), 1UL << order,
+                          PAGE_HYPERVISOR) )
+    {
+        /* Failed to map xenheap pages. */
+        free_heap_pages(pg, order, false);
+        return NULL;
+    }
+
+    return virt_addr;
 }
 
 
 void free_xenheap_pages(void *v, unsigned int order)
 {
+    unsigned long va = (unsigned long)v & PAGE_MASK;
+
     ASSERT_ALLOC_CONTEXT();
 
     if ( v == NULL )
         return;
 
+    if ( !has_directmap() &&
+         destroy_xen_mappings(va, va + (PAGE_SIZE << order)) )
+        printk(XENLOG_WARNING
+                "Error while destroying xenheap mappings at %p, order %u\n",
+                v, order);
+
     free_heap_pages(virt_to_page(v), order, false);
 }
 
@@ -2477,6 +2497,7 @@  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
 {
     struct page_info *pg;
     unsigned int i;
+    void *virt_addr;
 
     ASSERT_ALLOC_CONTEXT();
 
@@ -2489,16 +2510,28 @@  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
     if ( unlikely(pg == NULL) )
         return NULL;
 
+    virt_addr = page_to_virt(pg);
+
+    if ( !has_directmap() &&
+         map_pages_to_xen((unsigned long)virt_addr, page_to_mfn(pg), 1UL << order,
+                          PAGE_HYPERVISOR) )
+    {
+        /* Failed to map xenheap pages. */
+        free_domheap_pages(pg, order);
+        return NULL;
+    }
+
     for ( i = 0; i < (1u << order); i++ )
         pg[i].count_info |= PGC_xen_heap;
 
-    return page_to_virt(pg);
+    return virt_addr;
 }
 
 void free_xenheap_pages(void *v, unsigned int order)
 {
     struct page_info *pg;
     unsigned int i;
+    unsigned long va = (unsigned long)v & PAGE_MASK;
 
     ASSERT_ALLOC_CONTEXT();
 
@@ -2510,6 +2543,12 @@  void free_xenheap_pages(void *v, unsigned int order)
     for ( i = 0; i < (1u << order); i++ )
         pg[i].count_info &= ~PGC_xen_heap;
 
+    if ( !has_directmap() &&
+         destroy_xen_mappings(va, va + (PAGE_SIZE << order)) )
+        printk(XENLOG_WARNING
+                "Error while destroying xenheap mappings at %p, order %u\n",
+                v, order);
+
     free_heap_pages(pg, order, true);
 }