diff mbox series

[RFC,08/26] mm: page_alloc: claim blocks during compaction capturing

Message ID 20230418191313.268131-9-hannes@cmpxchg.org (mailing list archive)
State New
Headers show
Series mm: reliable huge page allocator | expand

Commit Message

Johannes Weiner April 18, 2023, 7:12 p.m. UTC
When capturing a whole block, update the migratetype accordingly. For
example, a THP allocation might capture an unmovable block. If the THP
gets split and partially freed later, the remainder should group up
with movable allocations.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
 mm/internal.h   |  1 +
 mm/page_alloc.c | 42 ++++++++++++++++++++++++------------------
 2 files changed, 25 insertions(+), 18 deletions(-)

Comments

Mel Gorman April 21, 2023, 1:12 p.m. UTC | #1
On Tue, Apr 18, 2023 at 03:12:55PM -0400, Johannes Weiner wrote:
> When capturing a whole block, update the migratetype accordingly. For
> example, a THP allocation might capture an unmovable block. If the THP
> gets split and partially freed later, the remainder should group up
> with movable allocations.
> 
> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> ---
>  mm/internal.h   |  1 +
>  mm/page_alloc.c | 42 ++++++++++++++++++++++++------------------
>  2 files changed, 25 insertions(+), 18 deletions(-)
> 
> diff --git a/mm/internal.h b/mm/internal.h
> index 024affd4e4b5..39f65a463631 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -432,6 +432,7 @@ struct compact_control {
>   */
>  struct capture_control {
>  	struct compact_control *cc;
> +	int migratetype;
>  	struct page *page;
>  };
>  
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 4d20513c83be..8e5996f8b4b4 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -615,6 +615,17 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
>  				page_to_pfn(page), MIGRATETYPE_MASK);
>  }
>  
> +static void change_pageblock_range(struct page *pageblock_page,
> +					int start_order, int migratetype)
> +{
> +	int nr_pageblocks = 1 << (start_order - pageblock_order);
> +
> +	while (nr_pageblocks--) {
> +		set_pageblock_migratetype(pageblock_page, migratetype);
> +		pageblock_page += pageblock_nr_pages;
> +	}
> +}
> +
>  #ifdef CONFIG_DEBUG_VM
>  static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
>  {
> @@ -962,14 +973,19 @@ compaction_capture(struct capture_control *capc, struct page *page,
>  	    is_migrate_isolate(migratetype))
>  		return false;
>  
> -	/*
> -	 * Do not let lower order allocations pollute a movable pageblock.
> -	 * This might let an unmovable request use a reclaimable pageblock
> -	 * and vice-versa but no more than normal fallback logic which can
> -	 * have trouble finding a high-order free page.
> -	 */
> -	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
> +	if (order >= pageblock_order) {
> +		migratetype = capc->migratetype;
> +		change_pageblock_range(page, order, migratetype);
> +	} else if (migratetype == MIGRATE_MOVABLE) {
> +		/*
> +		 * Do not let lower order allocations pollute a
> +		 * movable pageblock.  This might let an unmovable
> +		 * request use a reclaimable pageblock and vice-versa
> +		 * but no more than normal fallback logic which can
> +		 * have trouble finding a high-order free page.
> +		 */
>  		return false;
> +	}
>  

For capturing pageblock order or larger, why not unconditionally make
the block MOVABLE? Even if it's a zero page allocation, it would be nice
to keep the pageblock for movable pages after the split as long as
possible.
Johannes Weiner April 25, 2023, 2:39 p.m. UTC | #2
On Fri, Apr 21, 2023 at 02:12:27PM +0100, Mel Gorman wrote:
> On Tue, Apr 18, 2023 at 03:12:55PM -0400, Johannes Weiner wrote:
> > When capturing a whole block, update the migratetype accordingly. For
> > example, a THP allocation might capture an unmovable block. If the THP
> > gets split and partially freed later, the remainder should group up
> > with movable allocations.
> > 
> > Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
> > ---
> >  mm/internal.h   |  1 +
> >  mm/page_alloc.c | 42 ++++++++++++++++++++++++------------------
> >  2 files changed, 25 insertions(+), 18 deletions(-)
> > 
> > diff --git a/mm/internal.h b/mm/internal.h
> > index 024affd4e4b5..39f65a463631 100644
> > --- a/mm/internal.h
> > +++ b/mm/internal.h
> > @@ -432,6 +432,7 @@ struct compact_control {
> >   */
> >  struct capture_control {
> >  	struct compact_control *cc;
> > +	int migratetype;
> >  	struct page *page;
> >  };
> >  
> > diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> > index 4d20513c83be..8e5996f8b4b4 100644
> > --- a/mm/page_alloc.c
> > +++ b/mm/page_alloc.c
> > @@ -615,6 +615,17 @@ void set_pageblock_migratetype(struct page *page, int migratetype)
> >  				page_to_pfn(page), MIGRATETYPE_MASK);
> >  }
> >  
> > +static void change_pageblock_range(struct page *pageblock_page,
> > +					int start_order, int migratetype)
> > +{
> > +	int nr_pageblocks = 1 << (start_order - pageblock_order);
> > +
> > +	while (nr_pageblocks--) {
> > +		set_pageblock_migratetype(pageblock_page, migratetype);
> > +		pageblock_page += pageblock_nr_pages;
> > +	}
> > +}
> > +
> >  #ifdef CONFIG_DEBUG_VM
> >  static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
> >  {
> > @@ -962,14 +973,19 @@ compaction_capture(struct capture_control *capc, struct page *page,
> >  	    is_migrate_isolate(migratetype))
> >  		return false;
> >  
> > -	/*
> > -	 * Do not let lower order allocations pollute a movable pageblock.
> > -	 * This might let an unmovable request use a reclaimable pageblock
> > -	 * and vice-versa but no more than normal fallback logic which can
> > -	 * have trouble finding a high-order free page.
> > -	 */
> > -	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
> > +	if (order >= pageblock_order) {
> > +		migratetype = capc->migratetype;
> > +		change_pageblock_range(page, order, migratetype);
> > +	} else if (migratetype == MIGRATE_MOVABLE) {
> > +		/*
> > +		 * Do not let lower order allocations pollute a
> > +		 * movable pageblock.  This might let an unmovable
> > +		 * request use a reclaimable pageblock and vice-versa
> > +		 * but no more than normal fallback logic which can
> > +		 * have trouble finding a high-order free page.
> > +		 */
> >  		return false;
> > +	}
> >  
> 
> For capturing pageblock order or larger, why not unconditionally make
> the block MOVABLE? Even if it's a zero page allocation, it would be nice
> to keep the pageblock for movable pages after the split as long as
> possible.

The zero page isn't split, but if some other unmovable allocation does
a split and free later on I want to avoid filling a block with an
unmovable allocation with movables. That block is already lost to
compaction, and this way future unmovable allocations are more likely
to group into that block rather than claim an additional unmovable.

I had to double take for block merges beyond pageblock order,
wondering if we can claim multiple blocks for requests (capc->order)
smaller than a block. But that can't happen. Once we reach
pageblock_order during merging we claim, capture and exit. That means
order > pageblock_order can only happen if capc->order is actually
larger than a pageblock as well. I'll add a comment.
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 024affd4e4b5..39f65a463631 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -432,6 +432,7 @@  struct compact_control {
  */
 struct capture_control {
 	struct compact_control *cc;
+	int migratetype;
 	struct page *page;
 };
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4d20513c83be..8e5996f8b4b4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -615,6 +615,17 @@  void set_pageblock_migratetype(struct page *page, int migratetype)
 				page_to_pfn(page), MIGRATETYPE_MASK);
 }
 
+static void change_pageblock_range(struct page *pageblock_page,
+					int start_order, int migratetype)
+{
+	int nr_pageblocks = 1 << (start_order - pageblock_order);
+
+	while (nr_pageblocks--) {
+		set_pageblock_migratetype(pageblock_page, migratetype);
+		pageblock_page += pageblock_nr_pages;
+	}
+}
+
 #ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
@@ -962,14 +973,19 @@  compaction_capture(struct capture_control *capc, struct page *page,
 	    is_migrate_isolate(migratetype))
 		return false;
 
-	/*
-	 * Do not let lower order allocations pollute a movable pageblock.
-	 * This might let an unmovable request use a reclaimable pageblock
-	 * and vice-versa but no more than normal fallback logic which can
-	 * have trouble finding a high-order free page.
-	 */
-	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
+	if (order >= pageblock_order) {
+		migratetype = capc->migratetype;
+		change_pageblock_range(page, order, migratetype);
+	} else if (migratetype == MIGRATE_MOVABLE) {
+		/*
+		 * Do not let lower order allocations pollute a
+		 * movable pageblock.  This might let an unmovable
+		 * request use a reclaimable pageblock and vice-versa
+		 * but no more than normal fallback logic which can
+		 * have trouble finding a high-order free page.
+		 */
 		return false;
+	}
 
 	capc->page = page;
 	return true;
@@ -2674,17 +2690,6 @@  int move_freepages_block(struct zone *zone, struct page *page,
 			      old_mt, new_mt, num_movable);
 }
 
-static void change_pageblock_range(struct page *pageblock_page,
-					int start_order, int migratetype)
-{
-	int nr_pageblocks = 1 << (start_order - pageblock_order);
-
-	while (nr_pageblocks--) {
-		set_pageblock_migratetype(pageblock_page, migratetype);
-		pageblock_page += pageblock_nr_pages;
-	}
-}
-
 /*
  * When we are falling back to another migratetype during allocation, try to
  * steal extra free pages from the same pageblocks to satisfy further
@@ -4481,6 +4486,7 @@  __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
 	unsigned long pflags;
 	unsigned int noreclaim_flag;
 	struct capture_control capc = {
+		.migratetype = ac->migratetype,
 		.page = NULL,
 	};