diff mbox series

[3/5] mm: migrate: skip shared exec THP for NUMA balancing

Message ID 20201103130334.13468-4-shy828301@gmail.com (mailing list archive)
State New, archived
Headers show
Series mm: misc migrate cleanup and improvement | expand

Commit Message

Yang Shi Nov. 3, 2020, 1:03 p.m. UTC
The NUMA balancing skip shared exec base page.  Since
CONFIG_READ_ONLY_THP_FOR_FS was introduced, there are probably shared
exec THP, so skip such THPs for NUMA balancing as well.

Signed-off-by: Yang Shi <shy828301@gmail.com>
---
 mm/migrate.c | 18 ++++++++++++++++--
 1 file changed, 16 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index c33c92495ead..9a32bb128f31 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2061,6 +2061,16 @@  bool pmd_trans_migrating(pmd_t pmd)
 	return PageLocked(page);
 }
 
+static inline bool is_shared_exec_page(struct vm_area_struct *vma,
+				       struct page *page)
+{
+	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
+	    (vma->vm_flags & VM_EXEC))
+		return true;
+
+	return false;
+}
+
 /*
  * Attempt to migrate a misplaced page to the specified destination
  * node. Caller is expected to have an elevated reference count on
@@ -2078,8 +2088,7 @@  int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
 	 * Don't migrate file pages that are mapped in multiple processes
 	 * with execute permissions as they are probably shared libraries.
 	 */
-	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
-	    (vma->vm_flags & VM_EXEC))
+	if (is_shared_exec_page(vma, page))
 		goto out;
 
 	/*
@@ -2134,6 +2143,10 @@  int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 	int page_lru = page_is_file_lru(page);
 	unsigned long start = address & HPAGE_PMD_MASK;
 
+	if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+	    is_shared_exec_page(vma, page))
+		goto out;
+
 	new_page = alloc_pages_node(node,
 		(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
 		HPAGE_PMD_ORDER);
@@ -2245,6 +2258,7 @@  int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 
 out_unlock:
 	unlock_page(page);
+out:
 	put_page(page);
 	return 0;
 }