diff mbox series

[v4,6/6] xen: retrieve reserved pages on populate_physmap

Message ID 20220510022733.2422581-7-Penny.Zheng@arm.com (mailing list archive)
State New, archived
Headers show
Series populate/unpopulate memory when domain on static | expand

Commit Message

Penny Zheng May 10, 2022, 2:27 a.m. UTC
When static domain populates memory through populate_physmap on runtime,
other than allocating from heap, it shall retrieve reserved pages from
resv_page_list to make sure that guest RAM is still restricted in statically
configured memory regions. And this commit introduces a new helper
acquire_reserved_page to make it work.

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
---
v4 changes:
- miss dropping __init in acquire_domstatic_pages
- add the page back to the reserved list in case of error
- remove redundant printk
- refine log message and make it warn level
---
v3 changes:
- move is_domain_using_staticmem to the common header file
- remove #ifdef CONFIG_STATIC_MEMORY-ary
- remove meaningless page_to_mfn(page) in error log
---
v2 changes:
- introduce acquire_reserved_page to retrieve reserved pages from
resv_page_list
- forbid non-zero-order requests in populate_physmap
- let is_domain_static return ((void)(d), false) on x86
---
 xen/common/memory.c      | 23 +++++++++++++++++++++++
 xen/common/page_alloc.c  | 35 +++++++++++++++++++++++++++++++++--
 xen/include/xen/domain.h |  4 ++++
 xen/include/xen/mm.h     |  1 +
 4 files changed, 61 insertions(+), 2 deletions(-)

Comments

Julien Grall May 16, 2022, 6:29 p.m. UTC | #1
Hi Penny,

On 10/05/2022 03:27, Penny Zheng wrote:
> When static domain populates memory through populate_physmap on runtime,

Typo: s/when static/when a static/ or "when static domains populate"

s/on runtime/at runtime/

> other than allocating from heap, it shall retrieve reserved pages from

I am not sure to understand the part before the comma. But it doens't 
sound necessary so maybe drop it?

> resv_page_list to make sure that guest RAM is still restricted in statically
> configured memory regions. And this commit introduces a new helper
> acquire_reserved_page to make it work.
> 
> Signed-off-by: Penny Zheng <penny.zheng@arm.com>

[...]

> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 290526adaf..06e7037a28 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -2740,8 +2740,8 @@ static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
>    * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
>    * then assign them to one specific domain #d.
>    */
> -int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
> -                                   unsigned int nr_mfns, unsigned int memflags)
> +int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
> +                            unsigned int memflags)
>   {
>       struct page_info *pg;
>   
> @@ -2769,12 +2769,43 @@ int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
>   
>       return 0;
>   }
> +
> +/*
> + * Acquire a page from reserved page list(resv_page_list), when populating
> + * memory for static domain on runtime.
> + */
> +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> +{
> +    struct page_info *page;
> +    mfn_t smfn;
> +
> +    /* Acquire a page from reserved page list(resv_page_list). */
> +    page = page_list_remove_head(&d->resv_page_list);
Alloc/free of memory can happen concurrently. So access to rsv_page_list 
needs to be protected with a spinlock (mostly like d->page_alloc_lock).

> +    if ( unlikely(!page) )
> +        return INVALID_MFN;
> +
> +    smfn = page_to_mfn(page);
> +
> +    if ( acquire_domstatic_pages(d, smfn, 1, memflags) )

I am OK if we call acquire_domstatic_pages() for now. But long term, I 
think we should consider to optimize it because we know the page is 
valid and belong to the guest. So there are a lot of pointless work 
(checking mfn_valid(), scrubbing in the free part, cleaning the cache...).

> +    {
> +        page_list_add_tail(page, &d->resv_page_list);
> +        return INVALID_MFN;
> +    }
> +
> +    return smfn;
> +}
>   #else
>   void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
>                             bool need_scrub)
>   {
>       ASSERT_UNREACHABLE();
>   }
> +
> +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> +{
> +    ASSERT_UNREACHABLE();
> +    return INVALID_MFN;
> +}
>   #endif
>   
>   /*
> diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
> index 35dc7143a4..c613afa57e 100644
> --- a/xen/include/xen/domain.h
> +++ b/xen/include/xen/domain.h
> @@ -38,6 +38,10 @@ void arch_get_domain_info(const struct domain *d,
>   #define CDF_staticmem            (1U << 2)
>   #endif
>   
> +#ifndef is_domain_using_staticmem
> +#define is_domain_using_staticmem(d) ((void)(d), false)
> +#endif
> +
>   /*
>    * Arch-specifics.
>    */
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index 9fd95deaec..74810e1f54 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -92,6 +92,7 @@ void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
>   int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
>                               unsigned int memflags);
>   #endif
> +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags);
>   
>   /* Map machine page range in Xen virtual address space. */
>   int map_pages_to_xen(

Cheers,
Penny Zheng May 17, 2022, 6:24 a.m. UTC | #2
Hi Julien

> -----Original Message-----
> From: Julien Grall <julien@xen.org>
> Sent: Tuesday, May 17, 2022 2:29 AM
> To: Penny Zheng <Penny.Zheng@arm.com>; xen-devel@lists.xenproject.org
> Cc: Wei Chen <Wei.Chen@arm.com>; Andrew Cooper
> <andrew.cooper3@citrix.com>; George Dunlap <george.dunlap@citrix.com>;
> Jan Beulich <jbeulich@suse.com>; Stefano Stabellini <sstabellini@kernel.org>;
> Wei Liu <wl@xen.org>
> Subject: Re: [PATCH v4 6/6] xen: retrieve reserved pages on populate_physmap
> 
> Hi Penny,
> 
> On 10/05/2022 03:27, Penny Zheng wrote:
> > When static domain populates memory through populate_physmap on
> > runtime,
> 
> Typo: s/when static/when a static/ or "when static domains populate"
> 
> s/on runtime/at runtime/
> 

Sure, 

> > other than allocating from heap, it shall retrieve reserved pages from
> 
> I am not sure to understand the part before the comma. But it doens't sound
> necessary so maybe drop it?
>  

Sure,

> > resv_page_list to make sure that guest RAM is still restricted in
> > statically configured memory regions. And this commit introduces a new
> > helper acquire_reserved_page to make it work.
> >
> > Signed-off-by: Penny Zheng <penny.zheng@arm.com>
> 
> [...]
> 
> > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index
> > 290526adaf..06e7037a28 100644
> > --- a/xen/common/page_alloc.c
> > +++ b/xen/common/page_alloc.c
> > @@ -2740,8 +2740,8 @@ static struct page_info * __init
> acquire_staticmem_pages(mfn_t smfn,
> >    * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
> >    * then assign them to one specific domain #d.
> >    */
> > -int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
> > -                                   unsigned int nr_mfns, unsigned int memflags)
> > +int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int
> nr_mfns,
> > +                            unsigned int memflags)
> >   {
> >       struct page_info *pg;
> >
> > @@ -2769,12 +2769,43 @@ int __init acquire_domstatic_pages(struct
> > domain *d, mfn_t smfn,
> >
> >       return 0;
> >   }
> > +
> > +/*
> > + * Acquire a page from reserved page list(resv_page_list), when
> > +populating
> > + * memory for static domain on runtime.
> > + */
> > +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> > +{
> > +    struct page_info *page;
> > +    mfn_t smfn;
> > +
> > +    /* Acquire a page from reserved page list(resv_page_list). */
> > +    page = page_list_remove_head(&d->resv_page_list);
> Alloc/free of memory can happen concurrently. So access to rsv_page_list
> needs to be protected with a spinlock (mostly like d->page_alloc_lock).
>

Oh, understood, will fix.
 
> > +    if ( unlikely(!page) )
> > +        return INVALID_MFN;
> > +
> > +    smfn = page_to_mfn(page);
> > +
> > +    if ( acquire_domstatic_pages(d, smfn, 1, memflags) )
> 
> I am OK if we call acquire_domstatic_pages() for now. But long term, I think we
> should consider to optimize it because we know the page is valid and belong
> to the guest. So there are a lot of pointless work (checking mfn_valid(),
> scrubbing in the free part, cleaning the cache...).
> 

I'm willing to fix it here since this fix is not blocking any other patch serie~~
I'm considering that maybe we could add a new memflag MEMF_xxx, (oh,
Naming something is really "killing" me), then all these pointless work, checking
mfn_valid, flushing TLB and cache, we could exclude them by checking
memflags & MEMF_xxxx.
Wdyt?

> > +    {
> > +        page_list_add_tail(page, &d->resv_page_list);
> > +        return INVALID_MFN;
> > +    }
> > +
> > +    return smfn;
> > +}
> >   #else
> >   void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
> >                             bool need_scrub)
> >   {
> >       ASSERT_UNREACHABLE();
> >   }
> > +
> > +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> > +{
> > +    ASSERT_UNREACHABLE();
> > +    return INVALID_MFN;
> > +}
> >   #endif
> >
> >   /*
> > diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h index
> > 35dc7143a4..c613afa57e 100644
> > --- a/xen/include/xen/domain.h
> > +++ b/xen/include/xen/domain.h
> > @@ -38,6 +38,10 @@ void arch_get_domain_info(const struct domain *d,
> >   #define CDF_staticmem            (1U << 2)
> >   #endif
> >
> > +#ifndef is_domain_using_staticmem
> > +#define is_domain_using_staticmem(d) ((void)(d), false) #endif
> > +
> >   /*
> >    * Arch-specifics.
> >    */
> > diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h index
> > 9fd95deaec..74810e1f54 100644
> > --- a/xen/include/xen/mm.h
> > +++ b/xen/include/xen/mm.h
> > @@ -92,6 +92,7 @@ void free_staticmem_pages(struct page_info *pg,
> unsigned long nr_mfns,
> >   int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int
> nr_mfns,
> >                               unsigned int memflags);
> >   #endif
> > +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags);
> >
> >   /* Map machine page range in Xen virtual address space. */
> >   int map_pages_to_xen(
> 
> Cheers,
> 
> --
> Julien Grall
Julien Grall May 17, 2022, 8:48 a.m. UTC | #3
On 17/05/2022 07:24, Penny Zheng wrote:
> Hi Julien

Hi Penny,

>>> +    if ( unlikely(!page) )
>>> +        return INVALID_MFN;
>>> +
>>> +    smfn = page_to_mfn(page);
>>> +
>>> +    if ( acquire_domstatic_pages(d, smfn, 1, memflags) )
>>
>> I am OK if we call acquire_domstatic_pages() for now. But long term, I think we
>> should consider to optimize it because we know the page is valid and belong
>> to the guest. So there are a lot of pointless work (checking mfn_valid(),
>> scrubbing in the free part, cleaning the cache...).
>>
> 
> I'm willing to fix it here since this fix is not blocking any other patch serie~~
> I'm considering that maybe we could add a new memflag MEMF_xxx, (oh,
> Naming something is really "killing" me), then all these pointless work, checking
> mfn_valid, flushing TLB and cache, we could exclude them by checking
> memflags & MEMF_xxxx.
> Wdyt?

I don't think we need a new flag because the decision is internal to the 
page allocator. Instead, acquire_staticmem_pages() could be split in two 
parts. Something like (function names are random):


static bool foo(struct page_info *pg,
	        unsigned long nr,
	        unsigned long memflags)
{
	spin_lock(&heap_lock);

	for ( i = 0; i < nr; i++ )
		...

	spin_unlock(&heap_lock);

	if ( need_tlbflush )
	  filtered...

  	return true;

out_err:
	for ( ... )
	  ...
	return false;
}

static struct page_info *bar(mfn_t smfn,
			     unsigned long mfn,
			     unsigned int memflags)
{
	ASSERT(nr_mfns);
	for ( i = 0; i < nr_mfns; i++ )
	    if ( !mfn_valid(mfn_add(smfn, i)) )
		return NULL;

	pg = mfn_to_page(mfn);
	if ( !foo(...) )
	  return NULL;

	for ( i = 0; i < nr_mfns; i++ )
		flush_page_to_ram(...);
}


acquire_reserved_page() would then only call foo() and assign_pages().

Cheers,
Jan Beulich May 17, 2022, 4:16 p.m. UTC | #4
On 10.05.2022 04:27, Penny Zheng wrote:
> @@ -2769,12 +2769,43 @@ int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
>  
>      return 0;
>  }
> +
> +/*
> + * Acquire a page from reserved page list(resv_page_list), when populating
> + * memory for static domain on runtime.
> + */
> +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> +{
> +    struct page_info *page;
> +    mfn_t smfn;
> +
> +    /* Acquire a page from reserved page list(resv_page_list). */
> +    page = page_list_remove_head(&d->resv_page_list);
> +    if ( unlikely(!page) )
> +        return INVALID_MFN;
> +
> +    smfn = page_to_mfn(page);
> +
> +    if ( acquire_domstatic_pages(d, smfn, 1, memflags) )
> +    {
> +        page_list_add_tail(page, &d->resv_page_list);
> +        return INVALID_MFN;
> +    }
> +
> +    return smfn;
> +}
>  #else
>  void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
>                            bool need_scrub)
>  {
>      ASSERT_UNREACHABLE();
>  }
> +
> +mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
> +{
> +    ASSERT_UNREACHABLE();
> +    return INVALID_MFN;
> +}
>  #endif

Much like for the other stub function added in the earlier patch: If
is_domain_using_staticmem() was compile time constant "false" when
!CONFIG_STATIC_MEM, there would be no need for this one since the
compiler would DCE the only call site.
diff mbox series

Patch

diff --git a/xen/common/memory.c b/xen/common/memory.c
index f2d009843a..cb330ce877 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -245,6 +245,29 @@  static void populate_physmap(struct memop_args *a)
 
                 mfn = _mfn(gpfn);
             }
+            else if ( is_domain_using_staticmem(d) )
+            {
+                /*
+                 * No easy way to guarantee the retrieved pages are contiguous,
+                 * so forbid non-zero-order requests here.
+                 */
+                if ( a->extent_order != 0 )
+                {
+                    gdprintk(XENLOG_WARNING,
+                             "Cannot allocate static order-%u pages for static %pd\n",
+                             a->extent_order, d);
+                    goto out;
+                }
+
+                mfn = acquire_reserved_page(d, a->memflags);
+                if ( mfn_eq(mfn, INVALID_MFN) )
+                {
+                    gdprintk(XENLOG_WARNING,
+                             "%pd: failed to retrieve a reserved page\n",
+                             d);
+                    goto out;
+                }
+            }
             else
             {
                 page = alloc_domheap_pages(d, a->extent_order, a->memflags);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 290526adaf..06e7037a28 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2740,8 +2740,8 @@  static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
  * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
  * then assign them to one specific domain #d.
  */
-int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
-                                   unsigned int nr_mfns, unsigned int memflags)
+int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
+                            unsigned int memflags)
 {
     struct page_info *pg;
 
@@ -2769,12 +2769,43 @@  int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
 
     return 0;
 }
+
+/*
+ * Acquire a page from reserved page list(resv_page_list), when populating
+ * memory for static domain on runtime.
+ */
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
+{
+    struct page_info *page;
+    mfn_t smfn;
+
+    /* Acquire a page from reserved page list(resv_page_list). */
+    page = page_list_remove_head(&d->resv_page_list);
+    if ( unlikely(!page) )
+        return INVALID_MFN;
+
+    smfn = page_to_mfn(page);
+
+    if ( acquire_domstatic_pages(d, smfn, 1, memflags) )
+    {
+        page_list_add_tail(page, &d->resv_page_list);
+        return INVALID_MFN;
+    }
+
+    return smfn;
+}
 #else
 void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
                           bool need_scrub)
 {
     ASSERT_UNREACHABLE();
 }
+
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags)
+{
+    ASSERT_UNREACHABLE();
+    return INVALID_MFN;
+}
 #endif
 
 /*
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index 35dc7143a4..c613afa57e 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -38,6 +38,10 @@  void arch_get_domain_info(const struct domain *d,
 #define CDF_staticmem            (1U << 2)
 #endif
 
+#ifndef is_domain_using_staticmem
+#define is_domain_using_staticmem(d) ((void)(d), false)
+#endif
+
 /*
  * Arch-specifics.
  */
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 9fd95deaec..74810e1f54 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -92,6 +92,7 @@  void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
 int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
                             unsigned int memflags);
 #endif
+mfn_t acquire_reserved_page(struct domain *d, unsigned int memflags);
 
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(