diff mbox series

[v4,1/3] mm: keep PGC_extra pages on a separate list

Message ID 20200318173243.29183-2-paul@xen.org (mailing list archive)
State New, archived
Headers show
Series make sure PGC_extra pages are dealt with properly | expand

Commit Message

Paul Durrant March 18, 2020, 5:32 p.m. UTC
This patch adds a new page_list_head into struct domain to hold PGC_extra
pages. This avoids them getting confused with 'normal' domheap pages where
the domain's page_list is walked.

A new dump loop is also added to dump_pageframe_info() to unconditionally
dump the 'extra page list'.

Signed-off-by: Paul Durrant <paul@xen.org>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <george.dunlap@citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Julien Grall <julien@xen.org>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Wei Liu <wl@xen.org>
Cc: "Roger Pau Monné" <roger.pau@citrix.com>

v7:
 - Cosmetic changes

v6:
 - New in v6
---
 xen/arch/x86/domain.c    |  9 +++++++++
 xen/common/domain.c      |  1 +
 xen/common/page_alloc.c  |  2 +-
 xen/include/asm-x86/mm.h |  6 ++----
 xen/include/xen/mm.h     |  5 ++---
 xen/include/xen/sched.h  | 13 +++++++++++++
 6 files changed, 28 insertions(+), 8 deletions(-)

Comments

Julien Grall March 24, 2020, 2:34 p.m. UTC | #1
On 18/03/2020 17:32, Paul Durrant wrote:
> This patch adds a new page_list_head into struct domain to hold PGC_extra
> pages. This avoids them getting confused with 'normal' domheap pages where
> the domain's page_list is walked.
> 
> A new dump loop is also added to dump_pageframe_info() to unconditionally
> dump the 'extra page list'.
> 
> Signed-off-by: Paul Durrant <paul@xen.org>
> Reviewed-by: Jan Beulich <jbeulich@suse.com>

Acked-by: Julien Grall <julien@xen.org>

> ---
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: George Dunlap <george.dunlap@citrix.com>
> Cc: Ian Jackson <ian.jackson@eu.citrix.com>
> Cc: Julien Grall <julien@xen.org>
> Cc: Stefano Stabellini <sstabellini@kernel.org>
> Cc: Wei Liu <wl@xen.org>
> Cc: "Roger Pau Monné" <roger.pau@citrix.com>
> 
> v7:
>   - Cosmetic changes
> 
> v6:
>   - New in v6
> ---
>   xen/arch/x86/domain.c    |  9 +++++++++
>   xen/common/domain.c      |  1 +
>   xen/common/page_alloc.c  |  2 +-
>   xen/include/asm-x86/mm.h |  6 ++----
>   xen/include/xen/mm.h     |  5 ++---
>   xen/include/xen/sched.h  | 13 +++++++++++++
>   6 files changed, 28 insertions(+), 8 deletions(-)
> 
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index caf2ecad7e..683bc619aa 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -251,12 +251,21 @@ void dump_pageframe_info(struct domain *d)
>           p2m_pod_dump_data(d);
>   
>       spin_lock(&d->page_alloc_lock);
> +
>       page_list_for_each ( page, &d->xenpage_list )
>       {
>           printk("    XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
>                  _p(mfn_x(page_to_mfn(page))),
>                  page->count_info, page->u.inuse.type_info);
>       }
> +
> +    page_list_for_each ( page, &d->extra_page_list )
> +    {
> +        printk("    ExtraPage %p: caf=%08lx, taf=%" PRtype_info "\n",
> +               _p(mfn_x(page_to_mfn(page))),
> +               page->count_info, page->u.inuse.type_info);
> +    }
> +
>       spin_unlock(&d->page_alloc_lock);
>   }
>   
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index b4eb476a9c..3dcd73f67c 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -403,6 +403,7 @@ struct domain *domain_create(domid_t domid,
>       spin_lock_init_prof(d, page_alloc_lock);
>       spin_lock_init(&d->hypercall_deadlock_mutex);
>       INIT_PAGE_LIST_HEAD(&d->page_list);
> +    INIT_PAGE_LIST_HEAD(&d->extra_page_list);
>       INIT_PAGE_LIST_HEAD(&d->xenpage_list);
>   
>       spin_lock_init(&d->node_affinity_lock);
> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
> index 76d37226df..10b7aeca48 100644
> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -2314,7 +2314,7 @@ int assign_pages(
>           smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
>           pg[i].count_info =
>               (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
> -        page_list_add_tail(&pg[i], &d->page_list);
> +        page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
>       }
>   
>    out:
> diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
> index a06b2fb81f..1fa334b306 100644
> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -629,10 +629,8 @@ typedef struct mm_rwlock {
>       const char        *locker_function; /* func that took it */
>   } mm_rwlock_t;
>   
> -#define arch_free_heap_page(d, pg)                                      \
> -    page_list_del2(pg, is_xen_heap_page(pg) ?                           \
> -                       &(d)->xenpage_list : &(d)->page_list,            \
> -                   &(d)->arch.relmem_list)
> +#define arch_free_heap_page(d, pg) \
> +    page_list_del2(pg, page_to_list(d, pg), &(d)->arch.relmem_list)
>   
>   extern const char zero_page[];
>   
> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
> index d0d095d9c7..a163c201e2 100644
> --- a/xen/include/xen/mm.h
> +++ b/xen/include/xen/mm.h
> @@ -583,9 +583,8 @@ static inline unsigned int get_order_from_pages(unsigned long nr_pages)
>   void scrub_one_page(struct page_info *);
>   
>   #ifndef arch_free_heap_page
> -#define arch_free_heap_page(d, pg)                      \
> -    page_list_del(pg, is_xen_heap_page(pg) ?            \
> -                      &(d)->xenpage_list : &(d)->page_list)
> +#define arch_free_heap_page(d, pg) \
> +    page_list_del(pg, page_to_list(d, pg))
>   #endif
>   
>   int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index e6813288ab..4b78291d51 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -329,6 +329,7 @@ struct domain
>   
>       spinlock_t       page_alloc_lock; /* protects all the following fields  */
>       struct page_list_head page_list;  /* linked list */
> +    struct page_list_head extra_page_list; /* linked list (size extra_pages) */
>       struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
>   
>       /*
> @@ -512,6 +513,18 @@ struct domain
>   #endif
>   };
>   
> +static inline struct page_list_head *page_to_list(
> +    struct domain *d, const struct page_info *pg)
> +{
> +    if ( is_xen_heap_page(pg) )
> +        return &d->xenpage_list;
> +
> +    if ( pg->count_info & PGC_extra )
> +        return &d->extra_page_list;
> +
> +    return &d->page_list;
> +}
> +
>   /* Return number of pages currently posessed by the domain */
>   static inline unsigned int domain_tot_pages(const struct domain *d)
>   {
>
diff mbox series

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index caf2ecad7e..683bc619aa 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -251,12 +251,21 @@  void dump_pageframe_info(struct domain *d)
         p2m_pod_dump_data(d);
 
     spin_lock(&d->page_alloc_lock);
+
     page_list_for_each ( page, &d->xenpage_list )
     {
         printk("    XenPage %p: caf=%08lx, taf=%" PRtype_info "\n",
                _p(mfn_x(page_to_mfn(page))),
                page->count_info, page->u.inuse.type_info);
     }
+
+    page_list_for_each ( page, &d->extra_page_list )
+    {
+        printk("    ExtraPage %p: caf=%08lx, taf=%" PRtype_info "\n",
+               _p(mfn_x(page_to_mfn(page))),
+               page->count_info, page->u.inuse.type_info);
+    }
+
     spin_unlock(&d->page_alloc_lock);
 }
 
diff --git a/xen/common/domain.c b/xen/common/domain.c
index b4eb476a9c..3dcd73f67c 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -403,6 +403,7 @@  struct domain *domain_create(domid_t domid,
     spin_lock_init_prof(d, page_alloc_lock);
     spin_lock_init(&d->hypercall_deadlock_mutex);
     INIT_PAGE_LIST_HEAD(&d->page_list);
+    INIT_PAGE_LIST_HEAD(&d->extra_page_list);
     INIT_PAGE_LIST_HEAD(&d->xenpage_list);
 
     spin_lock_init(&d->node_affinity_lock);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 76d37226df..10b7aeca48 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2314,7 +2314,7 @@  int assign_pages(
         smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
         pg[i].count_info =
             (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
-        page_list_add_tail(&pg[i], &d->page_list);
+        page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
     }
 
  out:
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index a06b2fb81f..1fa334b306 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -629,10 +629,8 @@  typedef struct mm_rwlock {
     const char        *locker_function; /* func that took it */
 } mm_rwlock_t;
 
-#define arch_free_heap_page(d, pg)                                      \
-    page_list_del2(pg, is_xen_heap_page(pg) ?                           \
-                       &(d)->xenpage_list : &(d)->page_list,            \
-                   &(d)->arch.relmem_list)
+#define arch_free_heap_page(d, pg) \
+    page_list_del2(pg, page_to_list(d, pg), &(d)->arch.relmem_list)
 
 extern const char zero_page[];
 
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index d0d095d9c7..a163c201e2 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -583,9 +583,8 @@  static inline unsigned int get_order_from_pages(unsigned long nr_pages)
 void scrub_one_page(struct page_info *);
 
 #ifndef arch_free_heap_page
-#define arch_free_heap_page(d, pg)                      \
-    page_list_del(pg, is_xen_heap_page(pg) ?            \
-                      &(d)->xenpage_list : &(d)->page_list)
+#define arch_free_heap_page(d, pg) \
+    page_list_del(pg, page_to_list(d, pg))
 #endif
 
 int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index e6813288ab..4b78291d51 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -329,6 +329,7 @@  struct domain
 
     spinlock_t       page_alloc_lock; /* protects all the following fields  */
     struct page_list_head page_list;  /* linked list */
+    struct page_list_head extra_page_list; /* linked list (size extra_pages) */
     struct page_list_head xenpage_list; /* linked list (size xenheap_pages) */
 
     /*
@@ -512,6 +513,18 @@  struct domain
 #endif
 };
 
+static inline struct page_list_head *page_to_list(
+    struct domain *d, const struct page_info *pg)
+{
+    if ( is_xen_heap_page(pg) )
+        return &d->xenpage_list;
+
+    if ( pg->count_info & PGC_extra )
+        return &d->extra_page_list;
+
+    return &d->page_list;
+}
+
 /* Return number of pages currently posessed by the domain */
 static inline unsigned int domain_tot_pages(const struct domain *d)
 {