diff mbox series

[v2,2/7] mm: thp: introduce transhuge_vma_size_ok() helper

Message ID 20220602182159.248387-3-shy828301@gmail.com (mailing list archive)
State New
Headers show
Series Cleanup transhuge_xxx helpers | expand

Commit Message

Yang Shi June 2, 2022, 6:21 p.m. UTC
There are couple of places that check whether the vma size is ok for
THP or not, they are open coded and duplicate, introduce
transhuge_vma_size_ok() helper to do the job.

Signed-off-by: Yang Shi <shy828301@gmail.com>
---
 include/linux/huge_mm.h | 17 +++++++++++++++++
 mm/huge_memory.c        |  5 +----
 mm/khugepaged.c         | 12 ++++++------
 3 files changed, 24 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 648cb3ce7099..a8f61db47f2a 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -116,6 +116,18 @@  extern struct kobj_attribute shmem_enabled_attr;
 
 extern unsigned long transparent_hugepage_flags;
 
+/*
+ * The vma size has to be large enough to hold an aligned HPAGE_PMD_SIZE area.
+ */
+static inline bool transhuge_vma_size_ok(struct vm_area_struct *vma)
+{
+	if (round_up(vma->vm_start, HPAGE_PMD_SIZE) <
+	    (vma->vm_end & HPAGE_PMD_MASK))
+		return true;
+
+	return false;
+}
+
 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
 		unsigned long addr)
 {
@@ -345,6 +357,11 @@  static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
 	return false;
 }
 
+static inline bool transhuge_vma_size_ok(struct vm_area_struct *vma)
+{
+	return false;
+}
+
 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
 		unsigned long addr)
 {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 48182c8fe151..36ada544e494 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -71,10 +71,7 @@  unsigned long huge_zero_pfn __read_mostly = ~0UL;
 
 bool transparent_hugepage_active(struct vm_area_struct *vma)
 {
-	/* The addr is used to check if the vma size fits */
-	unsigned long addr = (vma->vm_end & HPAGE_PMD_MASK) - HPAGE_PMD_SIZE;
-
-	if (!transhuge_vma_suitable(vma, addr))
+	if (!transhuge_vma_size_ok(vma))
 		return false;
 	if (vma_is_anonymous(vma))
 		return __transparent_hugepage_enabled(vma);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 84b9cf4b9be9..d0f8020164fc 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -454,6 +454,9 @@  bool hugepage_vma_check(struct vm_area_struct *vma,
 				vma->vm_pgoff, HPAGE_PMD_NR))
 		return false;
 
+	if (!transhuge_vma_size_ok(vma))
+		return false;
+
 	/* Enabled via shmem mount options or sysfs settings. */
 	if (shmem_file(vma->vm_file))
 		return shmem_huge_enabled(vma);
@@ -512,9 +515,7 @@  void khugepaged_enter_vma(struct vm_area_struct *vma,
 			  unsigned long vm_flags)
 {
 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
-	    khugepaged_enabled() &&
-	    (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
-	     (vma->vm_end & HPAGE_PMD_MASK))) {
+	    khugepaged_enabled()) {
 		if (hugepage_vma_check(vma, vm_flags))
 			__khugepaged_enter(vma->vm_mm);
 	}
@@ -2142,10 +2143,9 @@  static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
 			progress++;
 			continue;
 		}
-		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+
+		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
 		hend = vma->vm_end & HPAGE_PMD_MASK;
-		if (hstart >= hend)
-			goto skip;
 		if (khugepaged_scan.address > hend)
 			goto skip;
 		if (khugepaged_scan.address < hstart)