diff mbox series

[21/36] xen/common: add colored allocator initialization

Message ID 20220304174701.1453977-22-marco.solieri@minervasys.tech (mailing list archive)
State New, archived
Headers show
Series Arm cache coloring | expand

Commit Message

Marco Solieri March 4, 2022, 5:46 p.m. UTC
From: Luca Miccio <lucmiccio@gmail.com>

Initialize colored heap and allocator data structures. It is assumed
that pages are given to the init function is in ascending order. To
ensure that, pages are retrieved from bootmem_regions starting from the
first one. Moreover, this allows quickly insertion of freed pages into
the colored allocator's internal data structures -- sorted lists.
If coloring is disabled, changing the free page order should not affect
both performance and functionalities of the buddy allocator.

Do not allocate Dom0 memory with direct mapping if colored is enabled.

Signed-off-by: Luca Miccio <206497@studenti.unimore.it>
Signed-off-by: Marco Solieri <marco.solieri@minervasys.tech>
Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
---
 xen/arch/arm/domain_build.c |  7 +++++-
 xen/common/page_alloc.c     | 43 +++++++++++++++++++++++++++++++------
 2 files changed, 42 insertions(+), 8 deletions(-)

Comments

Jan Beulich March 9, 2022, 2:58 p.m. UTC | #1
On 04.03.2022 18:46, Marco Solieri wrote:
> From: Luca Miccio <lucmiccio@gmail.com>
> 
> Initialize colored heap and allocator data structures. It is assumed
> that pages are given to the init function is in ascending order.

I don't think this is a good assumption to make.

> To
> ensure that, pages are retrieved from bootmem_regions starting from the
> first one. Moreover, this allows quickly insertion of freed pages into
> the colored allocator's internal data structures -- sorted lists.

I wouldn't call insertion by linear scan "quick", to be honest.

> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -2154,11 +2154,26 @@ void __init end_boot_allocator(void)
>              break;
>          }
>      }
> -    for ( i = nr_bootmem_regions; i-- > 0; )
> +
> +    for ( i = 0; i < nr_bootmem_regions; i++ )
>      {
>          struct bootmem_region *r = &bootmem_region_list[i];
> -        if ( r->s < r->e )
> -            init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
> +
> +        /*
> +         * Find the first region that can fill the buddy allocator memory
> +         * specified by buddy_required_size.
> +         */

Why would all of this memory need to come from a single region? And
why would any region - regardless of address - be okay?

> +        if ( buddy_required_size && (r->e - r->s) >
> +            PFN_DOWN(buddy_required_size) )

I think >= will do here?

Also - nit: Indentation.

> +        {
> +            init_heap_pages(mfn_to_page(_mfn(r->s)),
> +                PFN_DOWN(buddy_required_size));

And again - indentation.

> +            r->s += PFN_DOWN(buddy_required_size);
> +            buddy_required_size = 0;
> +        }
> +
> +        init_col_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);

Judging from this, buddy_required_size can actually be __initdata in
the previous patch. Being able to spot such is another reason to not
split patches like this.

> @@ -2619,9 +2634,12 @@ int assign_pages(
>          page_set_owner(&pg[i], d);
>          smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
>          pg[i].count_info =
> -            (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;
> +             (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;

Why the change?

> @@ -2642,6 +2660,15 @@ struct page_info *alloc_domheap_pages(
>      unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
>      unsigned int dma_zone;
>  
> +    /* Only Dom0 and DomUs are supported for coloring */
> +    if ( d && d->max_colors > 0 )
> +    {
> +        /* Colored allocation must be done on 0 order */
> +        if (order)

Nit: Missing blanks.

> @@ -2761,8 +2788,10 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
>              scrub = 1;
>          }
>  
> -        free_heap_pages(pg, order, scrub);
> -    }
> +        if ( is_page_colored(pg) )
> +            free_col_heap_page(pg);
> +        else
> +            free_heap_pages(pg, order, scrub);}

Very interesting brace placement.

Jan
diff mbox series

Patch

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 9630d00066..03a2573d67 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -3292,7 +3292,12 @@  static int __init construct_dom0(struct domain *d)
     /* type must be set before allocate_memory */
     d->arch.type = kinfo.type;
 #endif
-    allocate_memory_11(d, &kinfo);
+#ifdef CONFIG_COLORING
+    if ( d->max_colors )
+        allocate_memory(d, &kinfo);
+    else
+#endif
+        allocate_memory_11(d, &kinfo);
     find_gnttab_region(d, &kinfo);
 
     /* Map extra GIC MMIO, irqs and other hw stuffs to dom0. */
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index fffa438029..dea14bc39f 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -2154,11 +2154,26 @@  void __init end_boot_allocator(void)
             break;
         }
     }
-    for ( i = nr_bootmem_regions; i-- > 0; )
+
+    for ( i = 0; i < nr_bootmem_regions; i++ )
     {
         struct bootmem_region *r = &bootmem_region_list[i];
-        if ( r->s < r->e )
-            init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
+
+        /*
+         * Find the first region that can fill the buddy allocator memory
+         * specified by buddy_required_size.
+         */
+        if ( buddy_required_size && (r->e - r->s) >
+            PFN_DOWN(buddy_required_size) )
+        {
+            init_heap_pages(mfn_to_page(_mfn(r->s)),
+                PFN_DOWN(buddy_required_size));
+
+            r->s += PFN_DOWN(buddy_required_size);
+            buddy_required_size = 0;
+        }
+
+        init_col_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s);
     }
     nr_bootmem_regions = 0;
 
@@ -2619,9 +2634,12 @@  int assign_pages(
         page_set_owner(&pg[i], d);
         smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
         pg[i].count_info =
-            (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;
+             (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;
 
-        page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
+        if ( is_page_colored(pg) )
+            page_list_add(&pg[i], page_to_list(d, &pg[i]));
+        else
+            page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
     }
 
  out:
@@ -2642,6 +2660,15 @@  struct page_info *alloc_domheap_pages(
     unsigned int bits = memflags >> _MEMF_bits, zone_hi = NR_ZONES - 1;
     unsigned int dma_zone;
 
+    /* Only Dom0 and DomUs are supported for coloring */
+    if ( d && d->max_colors > 0 )
+    {
+        /* Colored allocation must be done on 0 order */
+        if (order)
+            return NULL;
+
+        return alloc_col_domheap_page(d, memflags);
+    }
     ASSERT(!in_irq());
 
     bits = domain_clamp_alloc_bitsize(memflags & MEMF_no_owner ? NULL : d,
@@ -2761,8 +2788,10 @@  void free_domheap_pages(struct page_info *pg, unsigned int order)
             scrub = 1;
         }
 
-        free_heap_pages(pg, order, scrub);
-    }
+        if ( is_page_colored(pg) )
+            free_col_heap_page(pg);
+        else
+            free_heap_pages(pg, order, scrub);}
 
     if ( drop_dom_ref )
         put_domain(d);