diff mbox series

[2/6] Revert "mm: remove free_unref_page_list()"

Message ID 20240730125346.1580150-3-usamaarif642@gmail.com (mailing list archive)
State New
Headers show
Series mm: split underutilized THPs | expand

Commit Message

Usama Arif July 30, 2024, 12:45 p.m. UTC
free_unref_page_list will be needed in a later patch for an
optimization to free zapped tail pages when splitting isolated thp.

Signed-off-by: Usama Arif <usamaarif642@gmail.com>
---
 mm/internal.h   |  1 +
 mm/page_alloc.c | 18 ++++++++++++++++++
 2 files changed, 19 insertions(+)
diff mbox series

Patch

diff --git a/mm/internal.h b/mm/internal.h
index 7a3bcc6d95e7..259afe44dc88 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -680,6 +680,7 @@  extern int user_min_free_kbytes;
 
 void free_unref_page(struct page *page, unsigned int order);
 void free_unref_folios(struct folio_batch *fbatch);
+void free_unref_page_list(struct list_head *list);
 
 extern void zone_pcp_reset(struct zone *zone);
 extern void zone_pcp_disable(struct zone *zone);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aae00ba3b3bd..38832e6b1e6c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2774,6 +2774,24 @@  void free_unref_folios(struct folio_batch *folios)
 	folio_batch_reinit(folios);
 }
 
+void free_unref_page_list(struct list_head *list)
+{
+	struct folio_batch fbatch;
+
+	folio_batch_init(&fbatch);
+	while (!list_empty(list)) {
+		struct folio *folio = list_first_entry(list, struct folio, lru);
+
+		list_del(&folio->lru);
+		if (folio_batch_add(&fbatch, folio) > 0)
+			continue;
+		free_unref_folios(&fbatch);
+	}
+
+	if (fbatch.nr)
+		free_unref_folios(&fbatch);
+}
+
 /*
  * split_page takes a non-compound higher-order page, and splits it into
  * n (1<<order) sub-pages: page[0..n]