diff mbox series

[v5,05/13] mm/khugepaged: pipe enum scan_result codes back to callers

Message ID 20220504214437.2850685-6-zokeefe@google.com (mailing list archive)
State New
Headers show
Series mm: userspace hugepage collapse | expand

Commit Message

Zach O'Keefe May 4, 2022, 9:44 p.m. UTC
Pipe enum scan_result codes back through return values of functions
downstream of khugepaged_scan_file() and khugepaged_scan_pmd() to
inform callers if the operation was successful, and if not, why.

Since khugepaged_scan_pmd()'s return value already has a specific
meaning (whether mmap_lock was unlocked or not), add a bool* argument
to khugepaged_scan_pmd() to retrieve this information.

Change khugepaged to take action based on the return values of
khugepaged_scan_file() and khugepaged_scan_pmd() instead of acting
deep within the collapsing functions themselves.

Signed-off-by: Zach O'Keefe <zokeefe@google.com>
---
 mm/khugepaged.c | 72 ++++++++++++++++++++++++++-----------------------
 1 file changed, 39 insertions(+), 33 deletions(-)

Comments

David Rientjes May 12, 2022, 8:02 p.m. UTC | #1
On Wed, 4 May 2022, Zach O'Keefe wrote:

> Pipe enum scan_result codes back through return values of functions
> downstream of khugepaged_scan_file() and khugepaged_scan_pmd() to
> inform callers if the operation was successful, and if not, why.
> 
> Since khugepaged_scan_pmd()'s return value already has a specific
> meaning (whether mmap_lock was unlocked or not), add a bool* argument
> to khugepaged_scan_pmd() to retrieve this information.
> 
> Change khugepaged to take action based on the return values of
> khugepaged_scan_file() and khugepaged_scan_pmd() instead of acting
> deep within the collapsing functions themselves.
> 
> Signed-off-by: Zach O'Keefe <zokeefe@google.com>

Acked-by: David Rientjes <rientjes@google.com>
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 6095fcb3f07c..1314caed65b0 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -732,13 +732,13 @@  static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
 		result = SCAN_SUCCEED;
 		trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 						    referenced, writable, result);
-		return 1;
+		return SCAN_SUCCEED;
 	}
 out:
 	release_pte_pages(pte, _pte, compound_pagelist);
 	trace_mm_collapse_huge_page_isolate(page, none_or_zero,
 					    referenced, writable, result);
-	return 0;
+	return result;
 }
 
 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
@@ -1097,9 +1097,9 @@  static int alloc_charge_hpage(struct mm_struct *mm, struct collapse_control *cc)
 	return SCAN_SUCCEED;
 }
 
-static void collapse_huge_page(struct mm_struct *mm, unsigned long address,
-			       int referenced, int unmapped,
-			       struct collapse_control *cc)
+static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
+			      int referenced, int unmapped,
+			      struct collapse_control *cc)
 {
 	LIST_HEAD(compound_pagelist);
 	pmd_t *pmd, _pmd;
@@ -1107,7 +1107,7 @@  static void collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	pgtable_t pgtable;
 	struct page *new_page;
 	spinlock_t *pmd_ptl, *pte_ptl;
-	int isolated = 0, result = 0;
+	int result = SCAN_FAIL;
 	struct vm_area_struct *vma;
 	struct mmu_notifier_range range;
 
@@ -1187,11 +1187,11 @@  static void collapse_huge_page(struct mm_struct *mm, unsigned long address,
 	mmu_notifier_invalidate_range_end(&range);
 
 	spin_lock(pte_ptl);
-	isolated = __collapse_huge_page_isolate(vma, address, pte,
-			&compound_pagelist);
+	result =  __collapse_huge_page_isolate(vma, address, pte,
+					       &compound_pagelist);
 	spin_unlock(pte_ptl);
 
-	if (unlikely(!isolated)) {
+	if (unlikely(result != SCAN_SUCCEED)) {
 		pte_unmap(pte);
 		spin_lock(pmd_ptl);
 		BUG_ON(!pmd_none(*pmd));
@@ -1239,24 +1239,23 @@  static void collapse_huge_page(struct mm_struct *mm, unsigned long address,
 
 	cc->hpage = NULL;
 
-	khugepaged_pages_collapsed++;
 	result = SCAN_SUCCEED;
 out_up_write:
 	mmap_write_unlock(mm);
 out_nolock:
 	if (!IS_ERR_OR_NULL(cc->hpage))
 		mem_cgroup_uncharge(page_folio(cc->hpage));
-	trace_mm_collapse_huge_page(mm, isolated, result);
-	return;
+	trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
+	return result;
 }
 
 static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
-			       unsigned long address,
+			       unsigned long address, bool *mmap_locked,
 			       struct collapse_control *cc)
 {
 	pmd_t *pmd;
 	pte_t *pte, *_pte;
-	int ret = 0, result = 0, referenced = 0;
+	int result = SCAN_FAIL, referenced = 0;
 	int none_or_zero = 0, shared = 0;
 	struct page *page = NULL;
 	unsigned long _address;
@@ -1391,18 +1390,19 @@  static int khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
 		result = SCAN_LACK_REFERENCED_PAGE;
 	} else {
 		result = SCAN_SUCCEED;
-		ret = 1;
 	}
 out_unmap:
 	pte_unmap_unlock(pte, ptl);
-	if (ret) {
+	if (result == SCAN_SUCCEED) {
 		/* collapse_huge_page will return with the mmap_lock released */
-		collapse_huge_page(mm, address, referenced, unmapped, cc);
+		*mmap_locked = false;
+		result = collapse_huge_page(mm, address, referenced,
+					    unmapped, cc);
 	}
 out:
 	trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
 				     none_or_zero, result, unmapped);
-	return ret;
+	return result;
 }
 
 static void collect_mm_slot(struct mm_slot *mm_slot)
@@ -1679,8 +1679,8 @@  static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *    + restore gaps in the page cache;
  *    + unlock and free huge page;
  */
-static void collapse_file(struct mm_struct *mm, struct file *file,
-			  pgoff_t start, struct collapse_control *cc)
+static int collapse_file(struct mm_struct *mm, struct file *file,
+			 pgoff_t start, struct collapse_control *cc)
 {
 	struct address_space *mapping = file->f_mapping;
 	struct page *new_page;
@@ -1982,8 +1982,6 @@  static void collapse_file(struct mm_struct *mm, struct file *file,
 		 */
 		retract_page_tables(mapping, start);
 		cc->hpage = NULL;
-
-		khugepaged_pages_collapsed++;
 	} else {
 		struct page *page;
 
@@ -2031,10 +2029,11 @@  static void collapse_file(struct mm_struct *mm, struct file *file,
 	if (!IS_ERR_OR_NULL(cc->hpage))
 		mem_cgroup_uncharge(page_folio(cc->hpage));
 	/* TODO: tracepoints */
+	return result;
 }
 
-static void khugepaged_scan_file(struct mm_struct *mm, struct file *file,
-				 pgoff_t start, struct collapse_control *cc)
+static int khugepaged_scan_file(struct mm_struct *mm, struct file *file,
+				pgoff_t start, struct collapse_control *cc)
 {
 	struct page *page = NULL;
 	struct address_space *mapping = file->f_mapping;
@@ -2107,15 +2106,16 @@  static void khugepaged_scan_file(struct mm_struct *mm, struct file *file,
 			result = SCAN_EXCEED_NONE_PTE;
 			count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
 		} else {
-			collapse_file(mm, file, start, cc);
+			result = collapse_file(mm, file, start, cc);
 		}
 	}
 
 	/* TODO: tracepoints */
+	return result;
 }
 #else
-static void khugepaged_scan_file(struct mm_struct *mm, struct file *file,
-				 pgoff_t start, struct collapse_control *cc)
+static int khugepaged_scan_file(struct mm_struct *mm, struct file *file, pgoff_t start,
+				struct collapse_control *cc)
 {
 	BUILD_BUG();
 }
@@ -2187,7 +2187,9 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 			goto skip;
 
 		while (khugepaged_scan.address < hend) {
-			int ret;
+			int result;
+			bool mmap_locked = true;
+
 			cond_resched();
 			if (unlikely(khugepaged_test_exit(mm)))
 				goto breakouterloop;
@@ -2201,17 +2203,21 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 						khugepaged_scan.address);
 
 				mmap_read_unlock(mm);
-				ret = 1;
-				khugepaged_scan_file(mm, file, pgoff, cc);
+				mmap_locked = false;
+				result = khugepaged_scan_file(mm, file, pgoff,
+							      cc);
 				fput(file);
 			} else {
-				ret = khugepaged_scan_pmd(mm, vma,
-						khugepaged_scan.address, cc);
+				result = khugepaged_scan_pmd(mm, vma,
+							     khugepaged_scan.address,
+							     &mmap_locked, cc);
 			}
+			if (result == SCAN_SUCCEED)
+				++khugepaged_pages_collapsed;
 			/* move to next address */
 			khugepaged_scan.address += HPAGE_PMD_SIZE;
 			progress += HPAGE_PMD_NR;
-			if (ret)
+			if (!mmap_locked)
 				/* we released mmap_lock so break loop */
 				goto breakouterloop_mmap_lock;
 			if (progress >= pages)