diff mbox series

[v2,10/14] clear_huge_page: use uncached path

Message ID 20211020170305.376118-11-ankur.a.arora@oracle.com (mailing list archive)
State New
Headers show
Series Use uncached stores while clearing huge pages | expand

Commit Message

Ankur Arora Oct. 20, 2021, 5:03 p.m. UTC
Uncached stores are suitable for circumstances where the region written
to is not expected to be read again soon, or the region written to is
large enough that there's no expectation that we will find the data in
the cache.

Add a new helper clear_subpage_uncached(), which handles the uncached
clearing path for huge and gigantic pages.

This path is always invoked for gigantic pages, for huge pages only if
pages_per_huge_page is larger than the architectural threshold or if
the user gives an explicit hint (say for a bulk transfer.)

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 include/linux/mm.h |  3 ++-
 mm/huge_memory.c   |  3 ++-
 mm/hugetlb.c       |  3 ++-
 mm/memory.c        | 46 ++++++++++++++++++++++++++++++++++++++++++----
 4 files changed, 48 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/mm.h b/include/linux/mm.h
index 49a97f817eb2..3e8ddec2aba2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3164,7 +3164,8 @@  enum mf_action_page_type {
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
 extern void clear_huge_page(struct page *page,
 			    unsigned long addr_hint,
-			    unsigned int pages_per_huge_page);
+			    unsigned int pages_per_huge_page,
+			    bool hint_uncached);
 extern void copy_user_huge_page(struct page *dst, struct page *src,
 				unsigned long addr_hint,
 				struct vm_area_struct *vma,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5e9ef0fc261e..ffd4b07285ba 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -600,6 +600,7 @@  static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 	pgtable_t pgtable;
 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
 	vm_fault_t ret = 0;
+	bool uncached = false;
 
 	VM_BUG_ON_PAGE(!PageCompound(page), page);
 
@@ -617,7 +618,7 @@  static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
 		goto release;
 	}
 
-	clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
+	clear_huge_page(page, vmf->address, HPAGE_PMD_NR, uncached);
 	/*
 	 * The memory barrier inside __SetPageUptodate makes sure that
 	 * clear_huge_page writes become visible before the set_pmd_at()
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 95dc7b83381f..a920b1133cdb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4874,6 +4874,7 @@  static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 	spinlock_t *ptl;
 	unsigned long haddr = address & huge_page_mask(h);
 	bool new_page, new_pagecache_page = false;
+	bool uncached = false;
 
 	/*
 	 * Currently, we are forced to kill the process in the event the
@@ -4928,7 +4929,7 @@  static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
 			spin_unlock(ptl);
 			goto out;
 		}
-		clear_huge_page(page, address, pages_per_huge_page(h));
+		clear_huge_page(page, address, pages_per_huge_page(h), uncached);
 		__SetPageUptodate(page);
 		new_page = true;
 
diff --git a/mm/memory.c b/mm/memory.c
index 9f6059520985..ef365948f595 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5347,6 +5347,27 @@  static inline void process_huge_page(
 	}
 }
 
+/*
+ * clear_subpage_uncached: clear the page in an uncached fashion.
+ */
+static void clear_subpage_uncached(unsigned long addr, int idx, void *arg)
+{
+	__incoherent void *kaddr;
+	struct page *page = arg;
+
+	page = page + idx;
+
+	/*
+	 * Do the kmap explicitly here since clear_user_page_uncached()
+	 * only handles __incoherent addresses.
+	 *
+	 * Caller is responsible for making the region coherent again.
+	 */
+	kaddr = (__incoherent void *)kmap_atomic(page);
+	clear_user_page_uncached(kaddr, addr + idx * PAGE_SIZE, page);
+	kunmap_atomic((__force void *)kaddr);
+}
+
 static void clear_gigantic_page(struct page *page,
 				unsigned long addr,
 				unsigned int pages_per_huge_page)
@@ -5358,7 +5379,8 @@  static void clear_gigantic_page(struct page *page,
 	for (i = 0; i < pages_per_huge_page;
 	     i++, p = mem_map_next(p, page, i)) {
 		cond_resched();
-		clear_user_highpage(p, addr + i * PAGE_SIZE);
+
+		clear_subpage_uncached(addr + i * PAGE_SIZE, 0, p);
 	}
 }
 
@@ -5369,18 +5391,34 @@  static void clear_subpage(unsigned long addr, int idx, void *arg)
 	clear_user_highpage(page + idx, addr);
 }
 
-void clear_huge_page(struct page *page,
-		     unsigned long addr_hint, unsigned int pages_per_huge_page)
+void clear_huge_page(struct page *page, unsigned long addr_hint,
+			unsigned int pages_per_huge_page, bool uncached)
 {
 	unsigned long addr = addr_hint &
 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
 
 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
 		clear_gigantic_page(page, addr, pages_per_huge_page);
+
+		/* Gigantic page clearing always uses __incoherent. */
+		clear_page_uncached_make_coherent();
 		return;
 	}
 
-	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+	/*
+	 * The uncached path is typically slower for small extents so take it
+	 * only if the user provides an explicit hint or if the extent is large
+	 * enough that there are no cache expectations.
+	 */
+	if (uncached ||
+		clear_page_prefer_uncached(pages_per_huge_page * PAGE_SIZE)) {
+		process_huge_page(addr_hint, pages_per_huge_page,
+					clear_subpage_uncached, page);
+
+		clear_page_uncached_make_coherent();
+	} else {
+		process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
+	}
 }
 
 static void copy_user_gigantic_page(struct page *dst, struct page *src,