Message ID | 20230825135918.4164671-10-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Rearrange batched folio freeing | expand |
On 25/08/2023 14:59, Matthew Wilcox (Oracle) wrote: > Call folio_undo_large_rmappable() if needed. free_unref_page_prepare() > destroys the ability to call folio_order(), so stash the order in > folio->private for the benefit of the second loop. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > --- > mm/page_alloc.c | 21 +++++++++++++++------ > 1 file changed, 15 insertions(+), 6 deletions(-) > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index bca5c70b5576..e586d17fb7f2 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -2465,7 +2465,7 @@ void free_unref_page(struct page *page, unsigned int order) > } > > /* > - * Free a batch of 0-order pages > + * Free a batch of folios > */ > void free_unref_folios(struct folio_batch *folios) > { > @@ -2478,7 +2478,11 @@ void free_unref_folios(struct folio_batch *folios) > for (i = 0, j = 0; i < folios->nr; i++) { > struct folio *folio = folios->folios[i]; > unsigned long pfn = folio_pfn(folio); > - if (!free_unref_page_prepare(&folio->page, pfn, 0)) > + unsigned int order = folio_order(folio); Do you need to do anything special for hugetlb folios? I see that destroy_large_folio() has: if (folio_test_hugetlb(folio)) { free_huge_folio(folio); return; } > + > + if (order > 0 && folio_test_large_rmappable(folio)) > + folio_undo_large_rmappable(folio); > + if (!free_unref_page_prepare(&folio->page, pfn, order)) > continue; > > /* > @@ -2486,11 +2490,13 @@ void free_unref_folios(struct folio_batch *folios) > * comment in free_unref_page. > */ > migratetype = get_pcppage_migratetype(&folio->page); > - if (unlikely(is_migrate_isolate(migratetype))) { > + if (order > PAGE_ALLOC_COSTLY_ORDER || Should this be `if (!pcp_allowed_order(order) ||` ? That helper includes the THP pageblock_order too. > + is_migrate_isolate(migratetype)) { > free_one_page(folio_zone(folio), &folio->page, pfn, > - 0, migratetype, FPI_NONE); > + order, migratetype, FPI_NONE); > continue; > } > + folio->private = (void *)(unsigned long)order; > if (j != i) > folios->folios[j] = folio; > j++; > @@ -2500,7 +2506,9 @@ void free_unref_folios(struct folio_batch *folios) > for (i = 0; i < folios->nr; i++) { > struct folio *folio = folios->folios[i]; > struct zone *zone = folio_zone(folio); > + unsigned int order = (unsigned long)folio->private; > > + folio->private = NULL; > migratetype = get_pcppage_migratetype(&folio->page); > > /* Different zone requires a different pcp lock */ > @@ -2519,7 +2527,7 @@ void free_unref_folios(struct folio_batch *folios) > if (unlikely(!pcp)) { > pcp_trylock_finish(UP_flags); > free_one_page(zone, &folio->page, > - folio_pfn(folio), 0, > + folio_pfn(folio), order, > migratetype, FPI_NONE); > locked_zone = NULL; > continue; > @@ -2535,7 +2543,8 @@ void free_unref_folios(struct folio_batch *folios) > migratetype = MIGRATE_MOVABLE; > > trace_mm_page_free_batched(&folio->page); > - free_unref_page_commit(zone, pcp, &folio->page, migratetype, 0); > + free_unref_page_commit(zone, pcp, &folio->page, migratetype, > + order); > } > > if (pcp) {
On Thu, Aug 31, 2023 at 04:21:53PM +0100, Ryan Roberts wrote: > On 25/08/2023 14:59, Matthew Wilcox (Oracle) wrote: > > @@ -2478,7 +2478,11 @@ void free_unref_folios(struct folio_batch *folios) > > for (i = 0, j = 0; i < folios->nr; i++) { > > struct folio *folio = folios->folios[i]; > > unsigned long pfn = folio_pfn(folio); > > - if (!free_unref_page_prepare(&folio->page, pfn, 0)) > > + unsigned int order = folio_order(folio); > > Do you need to do anything special for hugetlb folios? I see that > destroy_large_folio() has: > > if (folio_test_hugetlb(folio)) { > free_huge_folio(folio); > return; > } Right; hugetlb folios get freed specially and never come this way. I could put in an assertion, I suppose? > > @@ -2486,11 +2490,13 @@ void free_unref_folios(struct folio_batch *folios) > > * comment in free_unref_page. > > */ > > migratetype = get_pcppage_migratetype(&folio->page); > > - if (unlikely(is_migrate_isolate(migratetype))) { > > + if (order > PAGE_ALLOC_COSTLY_ORDER || > > Should this be `if (!pcp_allowed_order(order) ||` ? That helper includes the THP > pageblock_order too. Oh, yes, that's obviously far better than what I had here. I got the BUG in order_to_pindex() and didn't think to look around for the correct predicate. Thanks!
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index bca5c70b5576..e586d17fb7f2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2465,7 +2465,7 @@ void free_unref_page(struct page *page, unsigned int order) } /* - * Free a batch of 0-order pages + * Free a batch of folios */ void free_unref_folios(struct folio_batch *folios) { @@ -2478,7 +2478,11 @@ void free_unref_folios(struct folio_batch *folios) for (i = 0, j = 0; i < folios->nr; i++) { struct folio *folio = folios->folios[i]; unsigned long pfn = folio_pfn(folio); - if (!free_unref_page_prepare(&folio->page, pfn, 0)) + unsigned int order = folio_order(folio); + + if (order > 0 && folio_test_large_rmappable(folio)) + folio_undo_large_rmappable(folio); + if (!free_unref_page_prepare(&folio->page, pfn, order)) continue; /* @@ -2486,11 +2490,13 @@ void free_unref_folios(struct folio_batch *folios) * comment in free_unref_page. */ migratetype = get_pcppage_migratetype(&folio->page); - if (unlikely(is_migrate_isolate(migratetype))) { + if (order > PAGE_ALLOC_COSTLY_ORDER || + is_migrate_isolate(migratetype)) { free_one_page(folio_zone(folio), &folio->page, pfn, - 0, migratetype, FPI_NONE); + order, migratetype, FPI_NONE); continue; } + folio->private = (void *)(unsigned long)order; if (j != i) folios->folios[j] = folio; j++; @@ -2500,7 +2506,9 @@ void free_unref_folios(struct folio_batch *folios) for (i = 0; i < folios->nr; i++) { struct folio *folio = folios->folios[i]; struct zone *zone = folio_zone(folio); + unsigned int order = (unsigned long)folio->private; + folio->private = NULL; migratetype = get_pcppage_migratetype(&folio->page); /* Different zone requires a different pcp lock */ @@ -2519,7 +2527,7 @@ void free_unref_folios(struct folio_batch *folios) if (unlikely(!pcp)) { pcp_trylock_finish(UP_flags); free_one_page(zone, &folio->page, - folio_pfn(folio), 0, + folio_pfn(folio), order, migratetype, FPI_NONE); locked_zone = NULL; continue; @@ -2535,7 +2543,8 @@ void free_unref_folios(struct folio_batch *folios) migratetype = MIGRATE_MOVABLE; trace_mm_page_free_batched(&folio->page); - free_unref_page_commit(zone, pcp, &folio->page, migratetype, 0); + free_unref_page_commit(zone, pcp, &folio->page, migratetype, + order); } if (pcp) {
Call folio_undo_large_rmappable() if needed. free_unref_page_prepare() destroys the ability to call folio_order(), so stash the order in folio->private for the benefit of the second loop. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/page_alloc.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-)