diff mbox series

[V3,(resend),10/19] xen/page_alloc: Add a path for xenheap when there is no direct map

Message ID 20240513134046.82605-11-eliasely@amazon.com (mailing list archive)
State New
Headers show
Series Remove the directmap | expand

Commit Message

Elias El Yandouzi May 13, 2024, 1:40 p.m. UTC
From: Hongyan Xia <hongyxia@amazon.com>

When there is not an always-mapped direct map, xenheap allocations need
to be mapped and unmapped on-demand.

Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
Signed-off-by: Julien Grall <jgrall@amazon.com>
Signed-off-by: Elias El Yandouzi <eliasely@amazon.com>

----

    I have left the call to map_pages_to_xen() and destroy_xen_mappings()
    in the split heap for now. I am not entirely convinced this is necessary
    because in that setup only the xenheap would be always mapped and
    this doesn't contain any guest memory (aside the grant-table).
    So map/unmapping for every allocation seems unnecessary.

    Changes in v2:
        * Fix remaining wrong indentation in alloc_xenheap_pages()

    Changes since Hongyan's version:
        * Rebase
        * Fix indentation in alloc_xenheap_pages()
        * Fix build for arm32

Comments

Roger Pau Monné May 14, 2024, 1:07 p.m. UTC | #1
On Mon, May 13, 2024 at 01:40:37PM +0000, Elias El Yandouzi wrote:
> From: Hongyan Xia <hongyxia@amazon.com>
> 
> When there is not an always-mapped direct map, xenheap allocations need
> to be mapped and unmapped on-demand.
> 
> Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
> Signed-off-by: Julien Grall <jgrall@amazon.com>
> Signed-off-by: Elias El Yandouzi <eliasely@amazon.com>
> 
> ----
> 
>     I have left the call to map_pages_to_xen() and destroy_xen_mappings()
>     in the split heap for now. I am not entirely convinced this is necessary
>     because in that setup only the xenheap would be always mapped and
>     this doesn't contain any guest memory (aside the grant-table).
>     So map/unmapping for every allocation seems unnecessary.

I'm also concerned by this, did you test that
CONFIG_SEPARATE_XENHEAP=y works properly with the added {,un}map
calls?

If CONFIG_SEPARATE_XENHEAP=y I would expect the memory returned by
alloc_heap_pages(MEMZONE_XEN...) to already have the virtual mappings
created ahead?

The comment at the top of page_alloc.c also needs to be updated to
notice how the removal of the direct map affects xenheap allocations,
AFAICT a new combination is now possible:

CONFIG_SEPARATE_XENHEAP=n & CONFIG_NO_DIRECTMAP=y

>     Changes in v2:
>         * Fix remaining wrong indentation in alloc_xenheap_pages()
> 
>     Changes since Hongyan's version:
>         * Rebase
>         * Fix indentation in alloc_xenheap_pages()
>         * Fix build for arm32
> 
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 9b7e4721cd..dfb2c05322 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -2242,6 +2242,7 @@ void init_xenheap_pages(paddr_t ps, paddr_t pe)
>  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
>  {
>      struct page_info *pg;
> +    void *ret;

virt_addr maybe? ret is what I would expect to store the return value
of the function usually.

>  
>      ASSERT_ALLOC_CONTEXT();
>  
> @@ -2250,17 +2251,36 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
>      if ( unlikely(pg == NULL) )
>          return NULL;
>  
> +    ret = page_to_virt(pg);
> +
> +    if ( !has_directmap() &&
> +         map_pages_to_xen((unsigned long)ret, page_to_mfn(pg), 1UL << order,
> +                          PAGE_HYPERVISOR) )
> +    {
> +        /* Failed to map xenheap pages. */
> +        free_heap_pages(pg, order, false);
> +        return NULL;
> +    }
> +
>      return page_to_virt(pg);
>  }
>  
>  
>  void free_xenheap_pages(void *v, unsigned int order)
>  {
> +    unsigned long va = (unsigned long)v & PAGE_MASK;
> +
>      ASSERT_ALLOC_CONTEXT();
>  
>      if ( v == NULL )
>          return;
>  
> +    if ( !has_directmap() &&
> +         destroy_xen_mappings(va, va + (1UL << (order + PAGE_SHIFT))) )
> +        dprintk(XENLOG_WARNING,
> +                "Error while destroying xenheap mappings at %p, order %u\n",
> +                v, order);
> +
>      free_heap_pages(virt_to_page(v), order, false);
>  }
>  
> @@ -2284,6 +2304,7 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
>  {
>      struct page_info *pg;
>      unsigned int i;
> +    void *ret;
>  
>      ASSERT_ALLOC_CONTEXT();
>  
> @@ -2296,16 +2317,28 @@ void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
>      if ( unlikely(pg == NULL) )
>          return NULL;
>  
> +    ret = page_to_virt(pg);
> +
> +    if ( !has_directmap() &&
> +         map_pages_to_xen((unsigned long)ret, page_to_mfn(pg), 1UL << order,
> +                          PAGE_HYPERVISOR) )
> +    {
> +        /* Failed to map xenheap pages. */
> +        free_domheap_pages(pg, order);
> +        return NULL;
> +    }
> +
>      for ( i = 0; i < (1u << order); i++ )
>          pg[i].count_info |= PGC_xen_heap;
>  
> -    return page_to_virt(pg);
> +    return ret;
>  }
>  
>  void free_xenheap_pages(void *v, unsigned int order)
>  {
>      struct page_info *pg;
>      unsigned int i;
> +    unsigned long va = (unsigned long)v & PAGE_MASK;
>  
>      ASSERT_ALLOC_CONTEXT();
>  
> @@ -2317,6 +2350,12 @@ void free_xenheap_pages(void *v, unsigned int order)
>      for ( i = 0; i < (1u << order); i++ )
>          pg[i].count_info &= ~PGC_xen_heap;
>  
> +    if ( !has_directmap() &&
> +         destroy_xen_mappings(va, va + (1UL << (order + PAGE_SHIFT))) )
> +        dprintk(XENLOG_WARNING,
> +                "Error while destroying xenheap mappings at %p, order %u\n",
> +                v, order);

I don't think this should be a dprintk(), leaving mappings behind
could be a severe issue given the point of this work is to prevent
leaking data by having everything mapped on the direct map.

This needs to be a printk() IMO, I'm unsure whether freeing the memory
would need to be avoided if destroying the mappings failed, I can't
think of how we could recover from this gracefully.

Thanks, Roger.
Jan Beulich May 15, 2024, 3:13 p.m. UTC | #2
On 13.05.2024 15:40, Elias El Yandouzi wrote:
> From: Hongyan Xia <hongyxia@amazon.com>
> 
> When there is not an always-mapped direct map, xenheap allocations need
> to be mapped and unmapped on-demand.
> 
> Signed-off-by: Hongyan Xia <hongyxia@amazon.com>
> Signed-off-by: Julien Grall <jgrall@amazon.com>
> Signed-off-by: Elias El Yandouzi <eliasely@amazon.com>
> 
> ----
> 
>     I have left the call to map_pages_to_xen() and destroy_xen_mappings()
>     in the split heap for now. I am not entirely convinced this is necessary
>     because in that setup only the xenheap would be always mapped and
>     this doesn't contain any guest memory (aside the grant-table).
>     So map/unmapping for every allocation seems unnecessary.

Kind of depends on whether you expect guest data to potentially be copied
into memory that cam from the Xen heap, even if just transiently.

> @@ -2317,6 +2350,12 @@ void free_xenheap_pages(void *v, unsigned int order)
>      for ( i = 0; i < (1u << order); i++ )
>          pg[i].count_info &= ~PGC_xen_heap;
>  
> +    if ( !has_directmap() &&
> +         destroy_xen_mappings(va, va + (1UL << (order + PAGE_SHIFT))) )

PAGE_SIZE << order ?

Also, on x86 at least, to limit the impact of this, I think there wants to
be a prereq change to modify_xen_mappings() limiting the final flush_area()
there to just the one page altered, when it is just a single page that is
being fiddled with.

Jan
diff mbox series

Patch

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 9b7e4721cd..dfb2c05322 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2242,6 +2242,7 @@  void init_xenheap_pages(paddr_t ps, paddr_t pe)
 void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
 {
     struct page_info *pg;
+    void *ret;
 
     ASSERT_ALLOC_CONTEXT();
 
@@ -2250,17 +2251,36 @@  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
     if ( unlikely(pg == NULL) )
         return NULL;
 
+    ret = page_to_virt(pg);
+
+    if ( !has_directmap() &&
+         map_pages_to_xen((unsigned long)ret, page_to_mfn(pg), 1UL << order,
+                          PAGE_HYPERVISOR) )
+    {
+        /* Failed to map xenheap pages. */
+        free_heap_pages(pg, order, false);
+        return NULL;
+    }
+
     return page_to_virt(pg);
 }
 
 
 void free_xenheap_pages(void *v, unsigned int order)
 {
+    unsigned long va = (unsigned long)v & PAGE_MASK;
+
     ASSERT_ALLOC_CONTEXT();
 
     if ( v == NULL )
         return;
 
+    if ( !has_directmap() &&
+         destroy_xen_mappings(va, va + (1UL << (order + PAGE_SHIFT))) )
+        dprintk(XENLOG_WARNING,
+                "Error while destroying xenheap mappings at %p, order %u\n",
+                v, order);
+
     free_heap_pages(virt_to_page(v), order, false);
 }
 
@@ -2284,6 +2304,7 @@  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
 {
     struct page_info *pg;
     unsigned int i;
+    void *ret;
 
     ASSERT_ALLOC_CONTEXT();
 
@@ -2296,16 +2317,28 @@  void *alloc_xenheap_pages(unsigned int order, unsigned int memflags)
     if ( unlikely(pg == NULL) )
         return NULL;
 
+    ret = page_to_virt(pg);
+
+    if ( !has_directmap() &&
+         map_pages_to_xen((unsigned long)ret, page_to_mfn(pg), 1UL << order,
+                          PAGE_HYPERVISOR) )
+    {
+        /* Failed to map xenheap pages. */
+        free_domheap_pages(pg, order);
+        return NULL;
+    }
+
     for ( i = 0; i < (1u << order); i++ )
         pg[i].count_info |= PGC_xen_heap;
 
-    return page_to_virt(pg);
+    return ret;
 }
 
 void free_xenheap_pages(void *v, unsigned int order)
 {
     struct page_info *pg;
     unsigned int i;
+    unsigned long va = (unsigned long)v & PAGE_MASK;
 
     ASSERT_ALLOC_CONTEXT();
 
@@ -2317,6 +2350,12 @@  void free_xenheap_pages(void *v, unsigned int order)
     for ( i = 0; i < (1u << order); i++ )
         pg[i].count_info &= ~PGC_xen_heap;
 
+    if ( !has_directmap() &&
+         destroy_xen_mappings(va, va + (1UL << (order + PAGE_SHIFT))) )
+        dprintk(XENLOG_WARNING,
+                "Error while destroying xenheap mappings at %p, order %u\n",
+                v, order);
+
     free_heap_pages(pg, order, true);
 }