diff mbox series

[v5,6/7] xen/arm: introduce acquire_staticmem_pages and acquire_domstatic_pages

Message ID 20210824095045.2281500-7-penny.zheng@arm.com (mailing list archive)
State Superseded
Headers show
Series Domain on Static Allocation | expand

Commit Message

Penny Zheng Aug. 24, 2021, 9:50 a.m. UTC
acquire_staticmem_pages aims to acquire nr_mfns contiguous pages of
static memory, starting at smfn. And it is the equivalent of alloc_heap_pages
for static memory.

For each page, it shall check if the page is reserved(PGC_reserved)
and free. It shall also do a set of necessary initialization, which are
mostly the same ones in alloc_heap_pages, like, following the same
cache-coherency policy and turning page status into PGC_state_inuse, etc.

acquire_domstatic_pages is the equivalent of alloc_domheap_pages for
static memory, and it is to acquire nr_mfns contiguous pages of static memory
and assign them to one specific domain.

It uses acquire_staticmem_pages to acquire nr_mfns pages of static memory,
then on success, it will use assign_pages to assign those pages
to one specific domain.

In order to differentiate pages of static memory from those allocated from
heap, this patch introduces a new page flag PGC_reserved, then mark pages of
static memory PGC_reserved when initializing them.

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
v5 changes
- bundle all the functions for static allocation in a single place
- return an error and revert the changes, when the page is not free
and reserved.
- check the MFN is valid for every page and also add a comment to warn
that this function needs to be reworked if used outside of boot.
- use less of mfn_to_page/page_to_mfn
- use ASSERT_UNREACHABLE() to also check that the two flags are clear
- pass the start MFN first and then the number of pages in both
acquire_staticmem_pages and acquire_domstatic_pages
- make acquire_domstatic_pages() to return an errno
- combine the commit of "xen/arm: introduce PGC_reserved"
---
 xen/common/page_alloc.c  | 118 ++++++++++++++++++++++++++++++++++++++-
 xen/include/asm-arm/mm.h |   3 +
 xen/include/xen/mm.h     |   2 +
 3 files changed, 121 insertions(+), 2 deletions(-)

Comments

Jan Beulich Aug. 24, 2021, 11:03 a.m. UTC | #1
On 24.08.2021 11:50, Penny Zheng wrote:
> +/*
> + * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
> + * static memory.
> + * This function needs to be reworked if used outside of boot.
> + */
> +static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
> +                                                         unsigned long nr_mfns,
> +                                                         unsigned int memflags)
> +{
> +    bool need_tlbflush = false;
> +    uint32_t tlbflush_timestamp = 0;
> +    unsigned long i;
> +    struct page_info *pg;
> +
> +    ASSERT(nr_mfns);
> +    for ( unsigned long i = 0; i < nr_mfns; i++ )

This form of variable declaration gets warned about by some compiler
versions and may only be used once we settle on C99 as the base line
language level. There's one more such instance below, and the one
here is even worse in that it shadows a function scope variable.

> +        if ( !mfn_valid(mfn_add(smfn, i)) )
> +            return NULL;
> +
> +    pg = mfn_to_page(smfn);
> +
> +    spin_lock(&heap_lock);
> +
> +    for ( i = 0; i < nr_mfns; i++ )
> +    {
> +        /* The page should be reserved and not yet allocated. */
> +        if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
> +        {
> +            printk(XENLOG_ERR
> +                   "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
> +                   i, mfn_x(smfn) + i,
> +                   pg[i].count_info, pg[i].tlbflush_timestamp);
> +            goto out_err;
> +        }
> +
> +        if ( !(memflags & MEMF_no_tlbflush) )
> +            accumulate_tlbflush(&need_tlbflush, &pg[i],
> +                                &tlbflush_timestamp);
> +
> +        /*
> +         * Preserve flag PGC_reserved and change page state
> +         * to PGC_state_inuse.
> +         */
> +        pg[i].count_info = PGC_reserved | PGC_state_inuse;
> +        /* Initialise fields which have other uses for free pages. */
> +        pg[i].u.inuse.type_info = 0;
> +        page_set_owner(&pg[i], NULL);
>      }
> +
> +    spin_unlock(&heap_lock);
> +
> +    if ( need_tlbflush )
> +        filtered_flush_tlb_mask(tlbflush_timestamp);
> +
> +    /*
> +     * Ensure cache and RAM are consistent for platforms where the guest
> +     * can control its own visibility of/through the cache.
> +     */
> +    for ( i = 0; i < nr_mfns; i++ )
> +        flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
> +
> +    return pg;
> +
> +out_err:

Please indent labels by at least one space.

> +    for ( unsigned long j = 0; j < i; j++ )

You don't need the extra variable here at all - simply use
"while ( i-- )".

Jan
diff mbox series

Patch

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 05c9834dc2..c0a8898502 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,6 +151,10 @@ 
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
+#ifndef PGC_reserved
+#define PGC_reserved 0
+#endif
+
 /*
  * Comma-separated list of hexadecimal page numbers containing bad bytes.
  * e.g. 'badpage=0x3f45,0x8a321'.
@@ -2282,7 +2286,7 @@  int assign_pages(
 
         for ( i = 0; i < nr; i++ )
         {
-            ASSERT(!(pg[i].count_info & ~PGC_extra));
+            ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
             if ( pg[i].count_info & PGC_extra )
                 extra_pages++;
         }
@@ -2321,7 +2325,8 @@  int assign_pages(
         page_set_owner(&pg[i], d);
         smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
         pg[i].count_info =
-            (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
+            (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;
+
         page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
     }
 
@@ -2625,7 +2630,116 @@  void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
             /* TODO: asynchronous scrubbing for pages of static memory. */
             scrub_one_page(pg);
         }
+
+        /* In case initializing page of static memory, mark it PGC_reserved. */
+        pg[i].count_info |= PGC_reserved;
+    }
+}
+
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ * This function needs to be reworked if used outside of boot.
+ */
+static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
+                                                         unsigned long nr_mfns,
+                                                         unsigned int memflags)
+{
+    bool need_tlbflush = false;
+    uint32_t tlbflush_timestamp = 0;
+    unsigned long i;
+    struct page_info *pg;
+
+    ASSERT(nr_mfns);
+    for ( unsigned long i = 0; i < nr_mfns; i++ )
+        if ( !mfn_valid(mfn_add(smfn, i)) )
+            return NULL;
+
+    pg = mfn_to_page(smfn);
+
+    spin_lock(&heap_lock);
+
+    for ( i = 0; i < nr_mfns; i++ )
+    {
+        /* The page should be reserved and not yet allocated. */
+        if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+        {
+            printk(XENLOG_ERR
+                   "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
+                   i, mfn_x(smfn) + i,
+                   pg[i].count_info, pg[i].tlbflush_timestamp);
+            goto out_err;
+        }
+
+        if ( !(memflags & MEMF_no_tlbflush) )
+            accumulate_tlbflush(&need_tlbflush, &pg[i],
+                                &tlbflush_timestamp);
+
+        /*
+         * Preserve flag PGC_reserved and change page state
+         * to PGC_state_inuse.
+         */
+        pg[i].count_info = PGC_reserved | PGC_state_inuse;
+        /* Initialise fields which have other uses for free pages. */
+        pg[i].u.inuse.type_info = 0;
+        page_set_owner(&pg[i], NULL);
     }
+
+    spin_unlock(&heap_lock);
+
+    if ( need_tlbflush )
+        filtered_flush_tlb_mask(tlbflush_timestamp);
+
+    /*
+     * Ensure cache and RAM are consistent for platforms where the guest
+     * can control its own visibility of/through the cache.
+     */
+    for ( i = 0; i < nr_mfns; i++ )
+        flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
+
+    return pg;
+
+out_err:
+    for ( unsigned long j = 0; j < i; j++ )
+        pg[j].count_info = PGC_reserved | PGC_state_free;
+
+    spin_unlock(&heap_lock);
+
+    return NULL;
+}
+
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
+                                   unsigned long nr_mfns, unsigned int memflags)
+{
+    struct page_info *pg;
+
+    ASSERT(!in_irq());
+
+    pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
+    if ( !pg )
+        return -ENOENT;
+
+    if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
+    {
+        /*
+         * Respective handling omitted here because right now
+         * acquired static memory is only for guest RAM.
+         */
+        ASSERT_UNREACHABLE();
+        return -EINVAL;
+    }
+
+    if ( assign_pages(d, pg, nr_mfns, memflags) )
+    {
+        free_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
+        return -EINVAL;
+    }
+
+    return 0;
 }
 #endif
 
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index ded74d29da..7b5e7b7f69 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -108,6 +108,9 @@  struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap     PG_shift(2)
 #define PGC_xen_heap      PG_mask(1, 2)
+  /* Page is reserved */
+#define _PGC_reserved     PG_shift(3)
+#define PGC_reserved      PG_mask(1, 3)
 /* ... */
 /* Page is broken? */
 #define _PGC_broken       PG_shift(7)
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index f243ff88d7..6d83b7894b 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -89,6 +89,8 @@  bool scrub_free_pages(void);
 /* These functions are for static memory */
 void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
                           bool need_scrub);
+int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned long nr_mfns,
+                            unsigned int memflags);
 #endif
 
 /* Map machine page range in Xen virtual address space. */