diff mbox series

[V4,08/10] xen/arm: introduce acquire_staticmem_pages and acquire_domstatic_pages

Message ID 20210728102758.3269446-9-penny.zheng@arm.com (mailing list archive)
State Superseded
Headers show
Series Domain on Static Allocation | expand

Commit Message

Penny Zheng July 28, 2021, 10:27 a.m. UTC
alloc_staticmem_pages aims to acquire nr_mfns contiguous pages of
static memory. And it is the equivalent of alloc_heap_pages for static
memory. Here only covers acquiring pre-configured static memory.

For each page, it shall check if the page is reserved(PGC_reserved)
and free. It shall also do a set of necessary initialization, which are
mostly the same ones in alloc_heap_pages, like, following the same
cache-coherency policy and turning page status into PGC_state_inuse, etc.

acquire_domstatic_pages is the equivalent of alloc_domheap_pages for
static memory, and it is to acquire nr_mfns contiguous pages of static memory
and assign them to one specific domain.

It uses acquire_staticmem_pages to acquire nr_mfns pre-configured pages of
static memory, then on success, it will use assign_pages to assign those pages
to one specific domain.

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
v4 change:
- moving tlb/cache flush outside of the locked region, considering XSA-364
and reducing the amount of work happening with the heap_lock held
- remove MEMF_no_refcount case
- make acquire_staticmem_pages/acquire_domstatic_pages being __init
---
 xen/common/page_alloc.c | 108 +++++++++++++++++++++++++++++++++++++++-
 xen/include/xen/mm.h    |   3 ++
 2 files changed, 109 insertions(+), 2 deletions(-)

Comments

Jan Beulich Aug. 5, 2021, 6:52 a.m. UTC | #1
On 28.07.2021 12:27, Penny Zheng wrote:
> @@ -1065,6 +1069,73 @@ static struct page_info *alloc_heap_pages(
>      return pg;
>  }
>  
> +#ifdef CONFIG_STATIC_MEMORY
> +/*
> + * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
> + * static memory.
> + */
> +static struct page_info * __init acquire_staticmem_pages(unsigned long nr_mfns,
> +                                                         mfn_t smfn,
> +                                                         unsigned int memflags)
> +{
> +    bool need_tlbflush = false;
> +    uint32_t tlbflush_timestamp = 0;
> +    unsigned long i;
> +    struct page_info *pg;
> +
> +    /* For now, it only supports pre-configured static memory. */
> +    if ( !mfn_valid(smfn) || !nr_mfns )
> +        return NULL;
> +
> +    spin_lock(&heap_lock);
> +
> +    pg = mfn_to_page(smfn);

Nit: This can easily be done prior to acquiring the lock. And since PDX
compression causes this to be a non-trivial calculation, it is probably
better that way despite the compiler then being forced to use a non-
call-clobbered register to hold the variable (it might do so anyway,
but as it looks there currently is nothing actually requiring this).

> +    for ( i = 0; i < nr_mfns; i++ )
> +    {
> +        /*
> +         * Reference count must continuously be zero for free pages
> +         * of static memory(PGC_reserved).
> +         */
> +        if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
> +        {
> +            printk(XENLOG_ERR
> +                   "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
> +                   i, mfn_x(page_to_mfn(pg + i)),

"mfn_x(smfn) + i" would cause less code to be generated, again due to
PDX compression making the transformation a non-trivial operation.

> +                   pg[i].count_info, pg[i].tlbflush_timestamp);
> +            BUG();
> +        }
> +
> +        if ( !(memflags & MEMF_no_tlbflush) )
> +            accumulate_tlbflush(&need_tlbflush, &pg[i],
> +                                &tlbflush_timestamp);
> +
> +        /*
> +         * Preserve flag PGC_reserved and change page state
> +         * to PGC_state_inuse.
> +         */
> +        pg[i].count_info = (PGC_reserved | PGC_state_inuse);

Nit: Parentheses aren't really needed here.

> @@ -2411,6 +2483,38 @@ struct page_info *alloc_domheap_pages(
>      return pg;
>  }
>  
> +#ifdef CONFIG_STATIC_MEMORY
> +/*
> + * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
> + * then assign them to one specific domain #d.
> + */
> +struct page_info * __init acquire_domstatic_pages(struct domain *d,
> +                                                  unsigned long nr_mfns,
> +                                                  mfn_t smfn, unsigned int memflags)
> +{
> +    struct page_info *pg = NULL;

Unnecessary initializer.

> +    ASSERT(!in_irq());
> +
> +    pg = acquire_staticmem_pages(nr_mfns, smfn, memflags);
> +    if ( !pg )
> +        return NULL;
> +
> +    /*
> +     * MEMF_no_owner/MEMF_no_refcount cases are missing here because
> +     * right now, acquired static memory is only for guest RAM.
> +     */

I think you want to ASSERT() or otherwise check that the two flags are
clear. Whether you then still deem the comment appropriate in this
form is up to you, I guess. Otoh ...

> +    ASSERT(d);

... I'm less convinced that this ASSERT() is very useful. At the very
least if you use other than or more than ASSERT() for the checking
above, this one should follow suit. Perhaps altogether

    if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
    {
        /*
         * Respective handling omitted here because right now
         * acquired static memory is only for guest RAM.
         */
        ASSERT_UNREACHABLE();
        return NULL;
    }

?

Jan
Julien Grall Aug. 13, 2021, 1 p.m. UTC | #2
Hi Penny,

On 28/07/2021 11:27, Penny Zheng wrote:
> alloc_staticmem_pages aims to acquire nr_mfns contiguous pages of
> static memory. And it is the equivalent of alloc_heap_pages for static
> memory. Here only covers acquiring pre-configured static memory.
> 
> For each page, it shall check if the page is reserved(PGC_reserved)
> and free. It shall also do a set of necessary initialization, which are
> mostly the same ones in alloc_heap_pages, like, following the same
> cache-coherency policy and turning page status into PGC_state_inuse, etc.
> 
> acquire_domstatic_pages is the equivalent of alloc_domheap_pages for
> static memory, and it is to acquire nr_mfns contiguous pages of static memory
> and assign them to one specific domain.
> 
> It uses acquire_staticmem_pages to acquire nr_mfns pre-configured pages of
> static memory, then on success, it will use assign_pages to assign those pages
> to one specific domain.
> 
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> ---
> v4 change:
> - moving tlb/cache flush outside of the locked region, considering XSA-364
> and reducing the amount of work happening with the heap_lock held
> - remove MEMF_no_refcount case
> - make acquire_staticmem_pages/acquire_domstatic_pages being __init
> ---
>   xen/common/page_alloc.c | 108 +++++++++++++++++++++++++++++++++++++++-
>   xen/include/xen/mm.h    |   3 ++
>   2 files changed, 109 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index e279c6f713..b0edaf12b3 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -151,6 +151,10 @@
>   #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
>   #endif
>   
> +#ifndef CONFIG_STATIC_MEMORY
> +#define PGC_reserved 0
> +#endif
> +
>   /*
>    * Comma-separated list of hexadecimal page numbers containing bad bytes.
>    * e.g. 'badpage=0x3f45,0x8a321'.
> @@ -1065,6 +1069,73 @@ static struct page_info *alloc_heap_pages(
>       return pg;
>   }
>   
> +#ifdef CONFIG_STATIC_MEMORY

Rather than having multiple #ifdef in the code. Could we bundle all the 
functions for static allocation in a single place?

> +/*
> + * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
> + * static memory.
> + */
> +static struct page_info * __init acquire_staticmem_pages(unsigned long nr_mfns,
> +                                                         mfn_t smfn,
> +                                                         unsigned int memflags)

NIT: I find more intuitive if we pass the start MFN first and then the 
number of pages. So this can be seen as a range.

If you agree with that, then the caller would also have to be changed.

> +{
> +    bool need_tlbflush = false;
> +    uint32_t tlbflush_timestamp = 0;
> +    unsigned long i;
> +    struct page_info *pg;
> +
> +    /* For now, it only supports pre-configured static memory. */

This comment doesn't seem to match the check below.

> +    if ( !mfn_valid(smfn) || !nr_mfns )

This check only guarantees that there will be a page for the first MFN. 
Shouldn't we also check for the other MFNs?

> +        return NULL;
> +
> +    spin_lock(&heap_lock);
> +
> +    pg = mfn_to_page(smfn);
> +
> +    for ( i = 0; i < nr_mfns; i++ )
> +    {
> +        /*
> +         * Reference count must continuously be zero for free pages
> +         * of static memory(PGC_reserved).
> +         */

How about: "The page should be reserved and not yet allocated"?

> +        if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
> +        {
> +            printk(XENLOG_ERR
> +                   "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
> +                   i, mfn_x(page_to_mfn(pg + i)),
> +                   pg[i].count_info, pg[i].tlbflush_timestamp);
> +            BUG();

This BUG() can be easily hit by misconfiguring the Device-Tree. I think 
it would be best if we return an error and revert the changes.

> +        }
> +
> +        if ( !(memflags & MEMF_no_tlbflush) )
> +            accumulate_tlbflush(&need_tlbflush, &pg[i],
> +                                &tlbflush_timestamp);
> +
> +        /*
> +         * Preserve flag PGC_reserved and change page state
> +         * to PGC_state_inuse.
> +         */
> +        pg[i].count_info = (PGC_reserved | PGC_state_inuse);
> +        /* Initialise fields which have other uses for free pages. */
> +        pg[i].u.inuse.type_info = 0;
> +        page_set_owner(&pg[i], NULL);
> +    }
> +
> +    spin_unlock(&heap_lock);
> +
> +    if ( need_tlbflush )
> +        filtered_flush_tlb_mask(tlbflush_timestamp);
> +
> +    /*
> +     * Ensure cache and RAM are consistent for platforms where the guest
> +     * can control its own visibility of/through the cache.
> +     */
> +    for ( i = 0; i < nr_mfns; i++ )
> +        flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
> +
> +    return pg;
> +}
> +#endif
> +
>   /* Remove any offlined page in the buddy pointed to by head. */
>   static int reserve_offlined_page(struct page_info *head)
>   {
> @@ -2306,7 +2377,7 @@ int assign_pages(
>   
>           for ( i = 0; i < nr; i++ )
>           {
> -            ASSERT(!(pg[i].count_info & ~PGC_extra));
> +            ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
>               if ( pg[i].count_info & PGC_extra )
>                   extra_pages++;
>           }
> @@ -2345,7 +2416,8 @@ int assign_pages(
>           page_set_owner(&pg[i], d);
>           smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
>           pg[i].count_info =
> -            (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
> +            (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;
> +
>           page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
>       }
>   
> @@ -2411,6 +2483,38 @@ struct page_info *alloc_domheap_pages(
>       return pg;
>   }
>   
> +#ifdef CONFIG_STATIC_MEMORY
> +/*
> + * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
> + * then assign them to one specific domain #d.
> + */
> +struct page_info * __init acquire_domstatic_pages(struct domain *d,
> +                                                  unsigned long nr_mfns,
> +                                                  mfn_t smfn, unsigned int memflags)
> +{
> +    struct page_info *pg = NULL;
> +
> +    ASSERT(!in_irq());
> +
> +    pg = acquire_staticmem_pages(nr_mfns, smfn, memflags);
> +    if ( !pg )
> +        return NULL;
> +
> +    /*
> +     * MEMF_no_owner/MEMF_no_refcount cases are missing here because
> +     * right now, acquired static memory is only for guest RAM.
> +     */
> +    ASSERT(d);
> +    if ( assign_pages(pg, nr_mfns, d, memflags) )
> +    {
> +        free_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
> +        return NULL;
> +    }
> +
> +    return pg;
> +}
> +#endif
> +
>   void free_domheap_pages(struct page_info *pg, unsigned int order)
>   {
>       struct domain *d = page_get_owner(pg);
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index 2e75cdcbb7..62e8e2ad61 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -89,6 +89,9 @@ bool scrub_free_pages(void);
>   /* These functions are for static memory */
>   void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
>                             bool need_scrub);
> +struct page_info *acquire_domstatic_pages(struct domain *d,
> +                                          unsigned long nr_mfns, mfn_t smfn,
> +                                          unsigned int memflags);
>   #endif
>   
>   /* Map machine page range in Xen virtual address space. */
> 

Cheers,
Penny Zheng Aug. 16, 2021, 6:43 a.m. UTC | #3
Hi Julien,

> -----Original Message-----
> From: Julien Grall <julien@xen.org>
> Sent: Friday, August 13, 2021 9:00 PM
> To: Penny Zheng <Penny.Zheng@arm.com>; xen-devel@lists.xenproject.org;
> sstabellini@kernel.org
> Cc: Bertrand Marquis <Bertrand.Marquis@arm.com>; Wei Chen
> <Wei.Chen@arm.com>; nd <nd@arm.com>
> Subject: Re: [PATCH V4 08/10] xen/arm: introduce acquire_staticmem_pages
> and acquire_domstatic_pages
> 
> Hi Penny,
> 
> On 28/07/2021 11:27, Penny Zheng wrote:
> > alloc_staticmem_pages aims to acquire nr_mfns contiguous pages of
> > static memory. And it is the equivalent of alloc_heap_pages for static
> > memory. Here only covers acquiring pre-configured static memory.
> >
> > For each page, it shall check if the page is reserved(PGC_reserved)
> > and free. It shall also do a set of necessary initialization, which
> > are mostly the same ones in alloc_heap_pages, like, following the same
> > cache-coherency policy and turning page status into PGC_state_inuse, etc.
> >
> > acquire_domstatic_pages is the equivalent of alloc_domheap_pages for
> > static memory, and it is to acquire nr_mfns contiguous pages of static
> > memory and assign them to one specific domain.
> >
> > It uses acquire_staticmem_pages to acquire nr_mfns pre-configured
> > pages of static memory, then on success, it will use assign_pages to
> > assign those pages to one specific domain.
> >
> > Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> > ---
> > v4 change:
> > - moving tlb/cache flush outside of the locked region, considering
> > XSA-364 and reducing the amount of work happening with the heap_lock
> > held
> > - remove MEMF_no_refcount case
> > - make acquire_staticmem_pages/acquire_domstatic_pages being __init
> > ---
> >   xen/common/page_alloc.c | 108
> +++++++++++++++++++++++++++++++++++++++-
> >   xen/include/xen/mm.h    |   3 ++
> >   2 files changed, 109 insertions(+), 2 deletions(-)
> >
> > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index
> > e279c6f713..b0edaf12b3 100644
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -151,6 +151,10 @@
> >   #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
> >   #endif
> >
> > +#ifndef CONFIG_STATIC_MEMORY
> > +#define PGC_reserved 0
> > +#endif
> > +
> >   /*
> >    * Comma-separated list of hexadecimal page numbers containing bad
> bytes.
> >    * e.g. 'badpage=0x3f45,0x8a321'.
> > @@ -1065,6 +1069,73 @@ static struct page_info *alloc_heap_pages(
> >       return pg;
> >   }
> >
> > +#ifdef CONFIG_STATIC_MEMORY
> 
> Rather than having multiple #ifdef in the code. Could we bundle all the
> functions for static allocation in a single place?
> 

Sure. I'll reorganize them. 

> > +/*
> > + * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
> > + * static memory.
> > + */
> > +static struct page_info * __init acquire_staticmem_pages(unsigned long
> nr_mfns,
> > +                                                         mfn_t smfn,
> > +                                                         unsigned int
> > +memflags)
> 
> NIT: I find more intuitive if we pass the start MFN first and then the number of
> pages. So this can be seen as a range.
> 
> If you agree with that, then the caller would also have to be changed.
> 

Sure, it's more clear in your way.

> > +{
> > +    bool need_tlbflush = false;
> > +    uint32_t tlbflush_timestamp = 0;
> > +    unsigned long i;
> > +    struct page_info *pg;
> > +
> > +    /* For now, it only supports pre-configured static memory. */
> 
> This comment doesn't seem to match the check below.
> 
> > +    if ( !mfn_valid(smfn) || !nr_mfns )
> 
> This check only guarantees that there will be a page for the first MFN.
> Shouldn't we also check for the other MFNs?
> 

Hmm, Do you think that it should be all checked, the whole range, [smfn, smfn + nr_mfns).
Since it is in linear growth, maybe adding another check of "!mfn_valid(smfn + nr_mfns - 1)"
is enough?

> > +        return NULL;
> > +
> > +    spin_lock(&heap_lock);
> > +
> > +    pg = mfn_to_page(smfn);
> > +
> > +    for ( i = 0; i < nr_mfns; i++ )
> > +    {
> > +        /*
> > +         * Reference count must continuously be zero for free pages
> > +         * of static memory(PGC_reserved).
> > +         */
> 
> How about: "The page should be reserved and not yet allocated"?
>

Sure.

> > +        if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
> > +        {
> > +            printk(XENLOG_ERR
> > +                   "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
> > +                   i, mfn_x(page_to_mfn(pg + i)),
> > +                   pg[i].count_info, pg[i].tlbflush_timestamp);
> > +            BUG();
> 
> This BUG() can be easily hit by misconfiguring the Device-Tree. I think it would
> be best if we return an error and revert the changes.
> 

Ok. I'll return NULL.

And about the reverting part, do you mean changing the state of pages back to "PGC_state_free | PGC_reserved",
like something as follows:
"
out_error:
for ( unsigned long j = 0; j < i; j++ )
	pg[j].count_info = PGC_state_free | PGC_reserved;
"
 
> > +        }
> > +
> > +        if ( !(memflags & MEMF_no_tlbflush) )
> > +            accumulate_tlbflush(&need_tlbflush, &pg[i],
> > +                                &tlbflush_timestamp);
> > +
> > +        /*
> > +         * Preserve flag PGC_reserved and change page state
> > +         * to PGC_state_inuse.
> > +         */
> > +        pg[i].count_info = (PGC_reserved | PGC_state_inuse);
> > +        /* Initialise fields which have other uses for free pages. */
> > +        pg[i].u.inuse.type_info = 0;
> > +        page_set_owner(&pg[i], NULL);
> > +    }
> > +
> > +    spin_unlock(&heap_lock);
> > +
> > +    if ( need_tlbflush )
> > +        filtered_flush_tlb_mask(tlbflush_timestamp);
> > +
> > +    /*
> > +     * Ensure cache and RAM are consistent for platforms where the guest
> > +     * can control its own visibility of/through the cache.
> > +     */
> > +    for ( i = 0; i < nr_mfns; i++ )
> > +        flush_page_to_ram(mfn_x(smfn) + i, !(memflags &
> > + MEMF_no_icache_flush));
> > +
> > +    return pg;
> > +}
> > +#endif
> > +
> >   /* Remove any offlined page in the buddy pointed to by head. */
> >   static int reserve_offlined_page(struct page_info *head)
> >   {
> > @@ -2306,7 +2377,7 @@ int assign_pages(
> >
> >           for ( i = 0; i < nr; i++ )
> >           {
> > -            ASSERT(!(pg[i].count_info & ~PGC_extra));
> > +            ASSERT(!(pg[i].count_info & ~(PGC_extra |
> > + PGC_reserved)));
> >               if ( pg[i].count_info & PGC_extra )
> >                   extra_pages++;
> >           }
> > @@ -2345,7 +2416,8 @@ int assign_pages(
> >           page_set_owner(&pg[i], d);
> >           smp_wmb(); /* Domain pointer must be visible before updating refcnt.
> */
> >           pg[i].count_info =
> > -            (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
> > +            (pg[i].count_info & (PGC_extra | PGC_reserved)) |
> > + PGC_allocated | 1;
> > +
> >           page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
> >       }
> >
> > @@ -2411,6 +2483,38 @@ struct page_info *alloc_domheap_pages(
> >       return pg;
> >   }
> >
> > +#ifdef CONFIG_STATIC_MEMORY
> > +/*
> > + * Acquire nr_mfns contiguous pages, starting at #smfn, of static
> > +memory,
> > + * then assign them to one specific domain #d.
> > + */
> > +struct page_info * __init acquire_domstatic_pages(struct domain *d,
> > +                                                  unsigned long nr_mfns,
> > +                                                  mfn_t smfn,
> > +unsigned int memflags) {
> > +    struct page_info *pg = NULL;
> > +
> > +    ASSERT(!in_irq());
> > +
> > +    pg = acquire_staticmem_pages(nr_mfns, smfn, memflags);
> > +    if ( !pg )
> > +        return NULL;
> > +
> > +    /*
> > +     * MEMF_no_owner/MEMF_no_refcount cases are missing here because
> > +     * right now, acquired static memory is only for guest RAM.
> > +     */
> > +    ASSERT(d);
> > +    if ( assign_pages(pg, nr_mfns, d, memflags) )
> > +    {
> > +        free_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
> > +        return NULL;
> > +    }
> > +
> > +    return pg;
> > +}
> > +#endif
> > +
> >   void free_domheap_pages(struct page_info *pg, unsigned int order)
> >   {
> >       struct domain *d = page_get_owner(pg); diff --git
> > a/xen/include/xen/mm.h b/xen/include/xen/mm.h index
> > 2e75cdcbb7..62e8e2ad61 100644
> > --- a/xen/include/xen/mm.h
> > +++ b/xen/include/xen/mm.h
> > @@ -89,6 +89,9 @@ bool scrub_free_pages(void);
> >   /* These functions are for static memory */
> >   void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> >                             bool need_scrub);
> > +struct page_info *acquire_domstatic_pages(struct domain *d,
> > +                                          unsigned long nr_mfns, mfn_t smfn,
> > +                                          unsigned int memflags);
> >   #endif
> >
> >   /* Map machine page range in Xen virtual address space. */
> >
> 
> Cheers,
> 
> --

Cheers

--
Penny Zheng
> Julien Grall
Julien Grall Aug. 16, 2021, 5:43 p.m. UTC | #4
On 16/08/2021 07:43, Penny Zheng wrote:
> Hi Julien,

Hi,

>>> +{
>>> +    bool need_tlbflush = false;
>>> +    uint32_t tlbflush_timestamp = 0;
>>> +    unsigned long i;
>>> +    struct page_info *pg;
>>> +
>>> +    /* For now, it only supports pre-configured static memory. */
>>
>> This comment doesn't seem to match the check below.
>>
>>> +    if ( !mfn_valid(smfn) || !nr_mfns )
>>
>> This check only guarantees that there will be a page for the first MFN.
>> Shouldn't we also check for the other MFNs?
>>
> 
> Hmm, Do you think that it should be all checked, the whole range, [smfn, smfn + nr_mfns).
> Since it is in linear growth, maybe adding another check of "!mfn_valid(smfn + nr_mfns - 1)"
> is enough?

The only requirement for Xen is to provide a valid struct page for each 
RAM page. So checking the first and end page may not be sufficient if 
there is a hole in the middle.

My point here is either:
   - we trust the callers so we remove the mfn_valid() check
   - we don't trust the callers so we check the MFN is valid for every page

My preference is the latter, the more if we plan to us the helper after 
boot in the future. An possible compromise is to add a comment that this 
function needs to be reworked if used outside of boot.

Cheers,
Penny Zheng Aug. 17, 2021, 2:33 a.m. UTC | #5
> -----Original Message-----
> From: Julien Grall <julien@xen.org>
> Sent: Tuesday, August 17, 2021 1:44 AM
> To: Penny Zheng <Penny.Zheng@arm.com>; xen-devel@lists.xenproject.org;
> sstabellini@kernel.org
> Cc: Bertrand Marquis <Bertrand.Marquis@arm.com>; Wei Chen
> <Wei.Chen@arm.com>; nd <nd@arm.com>
> Subject: Re: [PATCH V4 08/10] xen/arm: introduce acquire_staticmem_pages
> and acquire_domstatic_pages
> 
> 
> 
> On 16/08/2021 07:43, Penny Zheng wrote:
> > Hi Julien,
> 
> Hi,
> 
> >>> +{
> >>> +    bool need_tlbflush = false;
> >>> +    uint32_t tlbflush_timestamp = 0;
> >>> +    unsigned long i;
> >>> +    struct page_info *pg;
> >>> +
> >>> +    /* For now, it only supports pre-configured static memory. */
> >>
> >> This comment doesn't seem to match the check below.
> >>
> >>> +    if ( !mfn_valid(smfn) || !nr_mfns )
> >>
> >> This check only guarantees that there will be a page for the first MFN.
> >> Shouldn't we also check for the other MFNs?
> >>
> >
> > Hmm, Do you think that it should be all checked, the whole range, [smfn,
> smfn + nr_mfns).
> > Since it is in linear growth, maybe adding another check of "!mfn_valid(smfn
> + nr_mfns - 1)"
> > is enough?
> 
> The only requirement for Xen is to provide a valid struct page for each RAM
> page. So checking the first and end page may not be sufficient if there is a hole
> in the middle.
> 
> My point here is either:
>    - we trust the callers so we remove the mfn_valid() check
>    - we don't trust the callers so we check the MFN is valid for every page
> 
> My preference is the latter, the more if we plan to us the helper after boot in
> the future. An possible compromise is to add a comment that this function
> needs to be reworked if used outside of boot.
> 

Ok. I'll do the whole range check and add the comments.

> Cheers,
> 
> 
> --
> Julien Grall
diff mbox series

Patch

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index e279c6f713..b0edaf12b3 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -151,6 +151,10 @@ 
 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
 #endif
 
+#ifndef CONFIG_STATIC_MEMORY
+#define PGC_reserved 0
+#endif
+
 /*
  * Comma-separated list of hexadecimal page numbers containing bad bytes.
  * e.g. 'badpage=0x3f45,0x8a321'.
@@ -1065,6 +1069,73 @@  static struct page_info *alloc_heap_pages(
     return pg;
 }
 
+#ifdef CONFIG_STATIC_MEMORY
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ */
+static struct page_info * __init acquire_staticmem_pages(unsigned long nr_mfns,
+                                                         mfn_t smfn,
+                                                         unsigned int memflags)
+{
+    bool need_tlbflush = false;
+    uint32_t tlbflush_timestamp = 0;
+    unsigned long i;
+    struct page_info *pg;
+
+    /* For now, it only supports pre-configured static memory. */
+    if ( !mfn_valid(smfn) || !nr_mfns )
+        return NULL;
+
+    spin_lock(&heap_lock);
+
+    pg = mfn_to_page(smfn);
+
+    for ( i = 0; i < nr_mfns; i++ )
+    {
+        /*
+         * Reference count must continuously be zero for free pages
+         * of static memory(PGC_reserved).
+         */
+        if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+        {
+            printk(XENLOG_ERR
+                   "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
+                   i, mfn_x(page_to_mfn(pg + i)),
+                   pg[i].count_info, pg[i].tlbflush_timestamp);
+            BUG();
+        }
+
+        if ( !(memflags & MEMF_no_tlbflush) )
+            accumulate_tlbflush(&need_tlbflush, &pg[i],
+                                &tlbflush_timestamp);
+
+        /*
+         * Preserve flag PGC_reserved and change page state
+         * to PGC_state_inuse.
+         */
+        pg[i].count_info = (PGC_reserved | PGC_state_inuse);
+        /* Initialise fields which have other uses for free pages. */
+        pg[i].u.inuse.type_info = 0;
+        page_set_owner(&pg[i], NULL);
+    }
+
+    spin_unlock(&heap_lock);
+
+    if ( need_tlbflush )
+        filtered_flush_tlb_mask(tlbflush_timestamp);
+
+    /*
+     * Ensure cache and RAM are consistent for platforms where the guest
+     * can control its own visibility of/through the cache.
+     */
+    for ( i = 0; i < nr_mfns; i++ )
+        flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
+
+    return pg;
+}
+#endif
+
 /* Remove any offlined page in the buddy pointed to by head. */
 static int reserve_offlined_page(struct page_info *head)
 {
@@ -2306,7 +2377,7 @@  int assign_pages(
 
         for ( i = 0; i < nr; i++ )
         {
-            ASSERT(!(pg[i].count_info & ~PGC_extra));
+            ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
             if ( pg[i].count_info & PGC_extra )
                 extra_pages++;
         }
@@ -2345,7 +2416,8 @@  int assign_pages(
         page_set_owner(&pg[i], d);
         smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
         pg[i].count_info =
-            (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
+            (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;
+
         page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
     }
 
@@ -2411,6 +2483,38 @@  struct page_info *alloc_domheap_pages(
     return pg;
 }
 
+#ifdef CONFIG_STATIC_MEMORY
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+struct page_info * __init acquire_domstatic_pages(struct domain *d,
+                                                  unsigned long nr_mfns,
+                                                  mfn_t smfn, unsigned int memflags)
+{
+    struct page_info *pg = NULL;
+
+    ASSERT(!in_irq());
+
+    pg = acquire_staticmem_pages(nr_mfns, smfn, memflags);
+    if ( !pg )
+        return NULL;
+
+    /*
+     * MEMF_no_owner/MEMF_no_refcount cases are missing here because
+     * right now, acquired static memory is only for guest RAM.
+     */
+    ASSERT(d);
+    if ( assign_pages(pg, nr_mfns, d, memflags) )
+    {
+        free_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
+        return NULL;
+    }
+
+    return pg;
+}
+#endif
+
 void free_domheap_pages(struct page_info *pg, unsigned int order)
 {
     struct domain *d = page_get_owner(pg);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 2e75cdcbb7..62e8e2ad61 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -89,6 +89,9 @@  bool scrub_free_pages(void);
 /* These functions are for static memory */
 void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
                           bool need_scrub);
+struct page_info *acquire_domstatic_pages(struct domain *d,
+                                          unsigned long nr_mfns, mfn_t smfn,
+                                          unsigned int memflags);
 #endif
 
 /* Map machine page range in Xen virtual address space. */