diff mbox series

[RFC,1/7] mm: page_owner: split page by order

Message ID 20200814173131.2803002-2-minchan@kernel.org (mailing list archive)
State New, archived
Headers show
Series Support high-order page bulk allocation | expand

Commit Message

Minchan Kim Aug. 14, 2020, 5:31 p.m. UTC
split_page_owner has assumed that a high-order page allocation is
always split into order-0 allocations. This patch enables splitting
a high-order allocation into any smaller-order allocations.

Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 include/linux/page_owner.h | 10 ++++++----
 mm/huge_memory.c           |  2 +-
 mm/page_alloc.c            |  2 +-
 mm/page_owner.c            |  7 +++++--
 4 files changed, 13 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 8679ccd722e8..60231997edb7 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -11,7 +11,8 @@  extern struct page_ext_operations page_owner_ops;
 extern void __reset_page_owner(struct page *page, unsigned int order);
 extern void __set_page_owner(struct page *page,
 			unsigned int order, gfp_t gfp_mask);
-extern void __split_page_owner(struct page *page, unsigned int order);
+extern void __split_page_owner(struct page *page, unsigned int order,
+			unsigned int new_order);
 extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
 extern void __dump_page_owner(struct page *page);
@@ -31,10 +32,11 @@  static inline void set_page_owner(struct page *page,
 		__set_page_owner(page, order, gfp_mask);
 }
 
-static inline void split_page_owner(struct page *page, unsigned int order)
+static inline void split_page_owner(struct page *page, unsigned int order,
+			unsigned int new_order)
 {
 	if (static_branch_unlikely(&page_owner_inited))
-		__split_page_owner(page, order);
+		__split_page_owner(page, order, new_order);
 }
 static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
 {
@@ -60,7 +62,7 @@  static inline void set_page_owner(struct page *page,
 {
 }
 static inline void split_page_owner(struct page *page,
-			unsigned int order)
+			unsigned int order, unsigned int new_order)
 {
 }
 static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 07007a8b68fe..2858a342ce87 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2420,7 +2420,7 @@  static void __split_huge_page(struct page *page, struct list_head *list,
 
 	ClearPageCompound(head);
 
-	split_page_owner(head, HPAGE_PMD_ORDER);
+	split_page_owner(head, HPAGE_PMD_ORDER, 0);
 
 	/* See comment in __split_huge_page_tail() */
 	if (PageAnon(head)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cf0b25161fea..8ce30cc50577 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3205,7 +3205,7 @@  void split_page(struct page *page, unsigned int order)
 
 	for (i = 1; i < (1 << order); i++)
 		set_page_refcounted(page + i);
-	split_page_owner(page, order);
+	split_page_owner(page, order, 0);
 }
 EXPORT_SYMBOL_GPL(split_page);
 
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 360461509423..c7a07b53eb92 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -204,7 +204,8 @@  void __set_page_owner_migrate_reason(struct page *page, int reason)
 	page_owner->last_migrate_reason = reason;
 }
 
-void __split_page_owner(struct page *page, unsigned int order)
+void __split_page_owner(struct page *page, unsigned int order,
+			unsigned int new_order)
 {
 	int i;
 	struct page_ext *page_ext = lookup_page_ext(page);
@@ -213,9 +214,11 @@  void __split_page_owner(struct page *page, unsigned int order)
 	if (unlikely(!page_ext))
 		return;
 
+	VM_BUG_ON_PAGE(order < new_order, page);
+
 	for (i = 0; i < (1 << order); i++) {
 		page_owner = get_page_owner(page_ext);
-		page_owner->order = 0;
+		page_owner->order = new_order;
 		page_ext = page_ext_next(page_ext);
 	}
 }