diff mbox series

[RFC,v2,2/2] mm: shmem: use mTHP interface to control huge orders for tmpfs

Message ID bcfa80f6293affdebb7e7bf70200133b65e73a6b.1727338549.git.baolin.wang@linux.alibaba.com (mailing list archive)
State New
Headers show
Series Support large folios for tmpfs | expand

Commit Message

Baolin Wang Sept. 26, 2024, 8:27 a.m. UTC
For the huge orders allowed by writable mmap() faults on tmpfs,
the mTHP interface is used to control the allowable huge orders,
while 'huge_shmem_orders_inherit' maintains backward compatibility
with top-level interface.

For the huge orders allowed by write() and fallocate() paths on tmpfs,
getting a highest order hint based on the size of write and fallocate
paths, then will try each allowable huge orders filtered by the mTHP
interfaces if set.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
 mm/memory.c |  4 ++--
 mm/shmem.c  | 51 ++++++++++++++++++++++++++-------------------------
 2 files changed, 28 insertions(+), 27 deletions(-)
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 2366578015ad..99dd75b84605 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5098,10 +5098,10 @@  vm_fault_t finish_fault(struct vm_fault *vmf)
 
 	/*
 	 * Using per-page fault to maintain the uffd semantics, and same
-	 * approach also applies to non-anonymous-shmem faults to avoid
+	 * approach also applies to non shmem/tmpfs faults to avoid
 	 * inflating the RSS of the process.
 	 */
-	if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
+	if (!vma_is_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
 		nr_pages = 1;
 	} else if (nr_pages > 1) {
 		pgoff_t idx = folio_page_idx(folio, page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 6dece90ff421..569d3ab37161 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1721,31 +1721,6 @@  unsigned long shmem_allowable_huge_orders(struct inode *inode,
 	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
 		return 0;
 
-	global_huge = shmem_huge_global_enabled(inode, index, write_end,
-					shmem_huge_force, vma, vm_flags);
-	if (!vma || !vma_is_anon_shmem(vma)) {
-		size_t len;
-
-		/*
-		 * For tmpfs, if top level huge page is enabled, we just allow
-		 * PMD sized THP to keep interface backward compatibility.
-		 */
-		if (global_huge)
-			return BIT(HPAGE_PMD_ORDER);
-
-		if (!write_end)
-			return 0;
-
-		/*
-		 * Otherwise, get a highest order hint based on the size of
-		 * write and fallocate paths, then will try each allowable
-		 * huge orders.
-		 */
-		len = write_end - (index << PAGE_SHIFT);
-		order = shmem_mapping_size_order(inode->i_mapping, index, len);
-		return order > 0 ? BIT(order + 1) - 1 : 0;
-	}
-
 	/*
 	 * Following the 'deny' semantics of the top level, force the huge
 	 * option off from all mounts.
@@ -1776,9 +1751,35 @@  unsigned long shmem_allowable_huge_orders(struct inode *inode,
 	if (vm_flags & VM_HUGEPAGE)
 		mask |= READ_ONCE(huge_shmem_orders_madvise);
 
+	global_huge = shmem_huge_global_enabled(inode, index, write_end,
+						shmem_huge_force, vma, vm_flags);
 	if (global_huge)
 		mask |= READ_ONCE(huge_shmem_orders_inherit);
 
+	/*
+	 * For the huge orders allowed by writable mmap() faults on tmpfs,
+	 * the mTHP interface is used to control the allowable huge orders,
+	 * while 'huge_shmem_orders_inherit' maintains backward compatibility
+	 * with top-level interface.
+	 *
+	 * For the huge orders allowed by write() and fallocate() paths on tmpfs,
+	 * get a highest order hint based on the size of write and fallocate
+	 * paths, then will try each allowable huge orders filtered by the mTHP
+	 * interfaces if set.
+	 */
+	if (!vma && !global_huge) {
+		size_t len;
+
+		if (!write_end)
+			return 0;
+
+		len = write_end - (index << PAGE_SHIFT);
+		order = shmem_mapping_size_order(inode->i_mapping, index, len);
+		if (!mask)
+			return order > 0 ? BIT(order + 1) - 1 : 0;
+
+		mask &= BIT(order + 1) - 1;
+	}
 	return THP_ORDERS_ALL_FILE_DEFAULT & mask;
 }