diff mbox series

[-mm,1/2] mm: page_owner: split page_owner's flag from the comm flags

Message ID 20221217105833.24851-2-laoar.shao@gmail.com (mailing list archive)
State New
Headers show
Series mm: page_ext: split page_ext flags | expand

Commit Message

Yafang Shao Dec. 17, 2022, 10:58 a.m. UTC
We split page_owner's flag from the common page_ext's flag, temporarily
this patch will increase the memory overhead of page owner. But
considering the page owner is for debugging purpose only, it is
acceptable. After we split all page exts' flags from the comm flags, we
can reduce the memory overhead for the page ext which wants to run on
production environment.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 include/linux/page_ext.h |  6 ++----
 mm/page_owner.c          | 36 ++++++++++++++++++++++--------------
 2 files changed, 24 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 22be4582faae..c8ca4954145c 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -16,14 +16,12 @@  struct page_ext_operations {
 
 #ifdef CONFIG_PAGE_EXTENSION
 
-enum page_ext_flags {
-	PAGE_EXT_OWNER,
-	PAGE_EXT_OWNER_ALLOCATED,
 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
+enum page_ext_flags {
 	PAGE_EXT_YOUNG,
 	PAGE_EXT_IDLE,
-#endif
 };
+#endif
 
 /*
  * Page Extension can be considered as an extended mem_map.
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 2d27f532df4c..f6e8ee8fa9c5 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -21,6 +21,11 @@ 
  */
 #define PAGE_OWNER_STACK_DEPTH (16)
 
+enum page_owner_flag {
+	PAGE_EXT_OWNER,
+	PAGE_EXT_OWNER_ALLOCATED,
+};
+
 struct page_owner {
 	unsigned short order;
 	short last_migrate_reason;
@@ -32,6 +37,7 @@  struct page_owner {
 	char comm[TASK_COMM_LEN];
 	pid_t pid;
 	pid_t tgid;
+	unsigned long flags;
 };
 
 static bool page_owner_enabled __initdata;
@@ -147,8 +153,8 @@  void __reset_page_owner(struct page *page, unsigned short order)
 
 	handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
 	for (i = 0; i < (1 << order); i++) {
-		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
 		page_owner = get_page_owner(page_ext);
+		__clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_owner->flags);
 		page_owner->free_handle = handle;
 		page_owner->free_ts_nsec = free_ts_nsec;
 		page_ext = page_ext_next(page_ext);
@@ -174,8 +180,8 @@  static inline void __set_page_owner_handle(struct page_ext *page_ext,
 		page_owner->ts_nsec = local_clock();
 		strscpy(page_owner->comm, current->comm,
 			sizeof(page_owner->comm));
-		__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
-		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
+		__set_bit(PAGE_EXT_OWNER, &page_owner->flags);
+		__set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_owner->flags);
 
 		page_ext = page_ext_next(page_ext);
 	}
@@ -264,8 +270,8 @@  void __folio_copy_owner(struct folio *newfolio, struct folio *old)
 	 * in that case we also don't need to explicitly clear the info from
 	 * the new page, which will be freed.
 	 */
-	__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
-	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
+	__set_bit(PAGE_EXT_OWNER, &new_page_owner->flags);
+	__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_page_owner->flags);
 	page_ext_put(new_ext);
 	page_ext_put(old_ext);
 }
@@ -325,10 +331,10 @@  void pagetypeinfo_showmixedcount_print(struct seq_file *m,
 			if (unlikely(!page_ext))
 				continue;
 
-			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
+			page_owner = get_page_owner(page_ext);
+			if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_owner->flags))
 				goto ext_put_continue;
 
-			page_owner = get_page_owner(page_ext);
 			page_mt = gfp_migratetype(page_owner->gfp_mask);
 			if (pageblock_mt != page_mt) {
 				if (is_migrate_cma(pageblock_mt))
@@ -467,13 +473,13 @@  void __dump_page_owner(const struct page *page)
 	gfp_mask = page_owner->gfp_mask;
 	mt = gfp_migratetype(gfp_mask);
 
-	if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
+	if (!test_bit(PAGE_EXT_OWNER, &page_owner->flags)) {
 		pr_alert("page_owner info is not present (never set?)\n");
 		page_ext_put(page_ext);
 		return;
 	}
 
-	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
+	if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_owner->flags))
 		pr_alert("page_owner tracks the page as allocated\n");
 	else
 		pr_alert("page_owner tracks the page as freed\n");
@@ -556,22 +562,22 @@  read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 		if (unlikely(!page_ext))
 			continue;
 
+		page_owner = get_page_owner(page_ext);
+
 		/*
 		 * Some pages could be missed by concurrent allocation or free,
 		 * because we don't hold the zone lock.
 		 */
-		if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+		if (!test_bit(PAGE_EXT_OWNER, &page_owner->flags))
 			goto ext_put_continue;
 
 		/*
 		 * Although we do have the info about past allocation of free
 		 * pages, it's not relevant for current memory usage.
 		 */
-		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
+		if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_owner->flags))
 			goto ext_put_continue;
 
-		page_owner = get_page_owner(page_ext);
-
 		/*
 		 * Don't print "tail" pages of high-order allocations as that
 		 * would inflate the stats.
@@ -640,6 +646,7 @@  static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 
 		for (; pfn < block_end_pfn; pfn++) {
 			struct page *page = pfn_to_page(pfn);
+			struct page_owner *page_owner;
 			struct page_ext *page_ext;
 
 			if (page_zone(page) != zone)
@@ -667,8 +674,9 @@  static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
 			if (unlikely(!page_ext))
 				continue;
 
+			page_owner = get_page_owner(page_ext);
 			/* Maybe overlapping zone */
-			if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+			if (test_bit(PAGE_EXT_OWNER, &page_owner->flags))
 				goto ext_put_continue;
 
 			/* Found early allocated page */