diff mbox series

[v3,15/21] mm/clear_page: add clear_page_non_caching_threshold()

Message ID 20220606203725.1313715-11-ankur.a.arora@oracle.com (mailing list archive)
State New
Headers show
Series huge page clearing optimizations | expand

Commit Message

Ankur Arora June 6, 2022, 8:37 p.m. UTC
Introduce clear_page_non_caching_threshold_pages which specifies the
threshold above which clear_page_incoherent() is used.

The ideal threshold value depends on the CPU uarch and where the
performance curves for cached and non-cached stores intersect.

Typically this would depend on microarchitectural details and
the LLC-size. Here, we arbitrarily choose a default value of
8MB (CLEAR_PAGE_NON_CACHING_THRESHOLD), a reasonably large LLC.

Also define clear_page_prefer_incoherent() which provides the
interface for querying this.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 include/asm-generic/clear_page.h |  4 ++++
 include/linux/mm.h               |  6 ++++++
 mm/memory.c                      | 25 +++++++++++++++++++++++++
 3 files changed, 35 insertions(+)
diff mbox series

Patch

diff --git a/include/asm-generic/clear_page.h b/include/asm-generic/clear_page.h
index 0ebff70a60a9..b790000661ce 100644
--- a/include/asm-generic/clear_page.h
+++ b/include/asm-generic/clear_page.h
@@ -62,4 +62,8 @@  static inline void clear_page_make_coherent(void) { }
 #endif /* __ASSEMBLY__ */
 #endif /* __HAVE_ARCH_CLEAR_USER_PAGES_INCOHERENT */
 
+#ifndef __ASSEMBLY__
+extern unsigned long __init arch_clear_page_non_caching_threshold(void);
+#endif
+
 #endif /* __ASM_GENERIC_CLEAR_PAGE_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bc8f326be0ce..5084571b2fb6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3328,6 +3328,12 @@  static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
 				   (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
 }
 
+extern bool clear_page_prefer_non_caching(unsigned long extent);
+#else /* !(CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS) */
+static inline bool clear_page_prefer_non_caching(unsigned long extent)
+{
+	return false;
+}
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
diff --git a/mm/memory.c b/mm/memory.c
index 04c6bb5d75f6..b78b32a3e915 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5563,10 +5563,28 @@  EXPORT_SYMBOL(__might_fault);
 
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
 
+/*
+ * Default size beyond which huge page clearing uses the non-caching
+ * path. Size it for a reasonable sized LLC.
+ */
+#define CLEAR_PAGE_NON_CACHING_THRESHOLD	(8 << 20)
 static unsigned int __ro_after_init clear_page_unit = 1;
+
+static unsigned long __read_mostly clear_page_non_caching_threshold_pages =
+				CLEAR_PAGE_NON_CACHING_THRESHOLD / PAGE_SIZE;
+
+/* Arch code can override for a machine specific value. */
+unsigned long __weak __init arch_clear_page_non_caching_threshold(void)
+{
+	return CLEAR_PAGE_NON_CACHING_THRESHOLD;
+}
+
 static int __init setup_clear_page_params(void)
 {
 	clear_page_unit = 1 << min(MAX_ORDER - 1, ARCH_MAX_CLEAR_PAGES_ORDER);
+
+	clear_page_non_caching_threshold_pages =
+		arch_clear_page_non_caching_threshold() / PAGE_SIZE;
 	return 0;
 }
 
@@ -5576,6 +5594,13 @@  static int __init setup_clear_page_params(void)
  */
 late_initcall(setup_clear_page_params);
 
+bool clear_page_prefer_non_caching(unsigned long extent)
+{
+	unsigned long pages = extent / PAGE_SIZE;
+
+	return pages >= clear_page_non_caching_threshold_pages;
+}
+
 /*
  * Clear a page extent.
  *