@@ -45,6 +45,7 @@ enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_UNSUPPORTED,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+ TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
@@ -125,6 +126,7 @@ static inline bool hugepage_global_enabled(void)
{
return transparent_hugepage_flags &
((1<<TRANSPARENT_HUGEPAGE_FLAG) |
+ (1<<TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG) |
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
}
@@ -134,6 +136,12 @@ static inline bool hugepage_global_always(void)
(1<<TRANSPARENT_HUGEPAGE_FLAG);
}
+static inline bool hugepage_global_defer(void)
+{
+ return transparent_hugepage_flags &
+ (1<<TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG);
+}
+
static inline int highest_order(unsigned long orders)
{
return fls_long(orders) - 1;
@@ -243,13 +251,16 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
unsigned long tva_flags,
unsigned long orders)
{
+ if ((tva_flags & TVA_IN_PF) && hugepage_global_defer() &&
+ !(vm_flags & VM_HUGEPAGE))
+ return 0;
+
/* Optimization to check if required orders are enabled early. */
if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
unsigned long mask = READ_ONCE(huge_anon_orders_always);
-
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_anon_orders_madvise);
- if (hugepage_global_always() ||
+ if (hugepage_global_always() || hugepage_global_defer() ||
((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
mask |= READ_ONCE(huge_anon_orders_inherit);
@@ -294,12 +294,15 @@ static ssize_t enabled_show(struct kobject *kobj,
const char *output;
if (test_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags))
- output = "[always] madvise never";
+ output = "[always] madvise defer never";
else if (test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags))
- output = "always [madvise] never";
+ output = "always [madvise] defer never";
+ else if (test_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG,
+ &transparent_hugepage_flags))
+ output = "always madvise [defer] never";
else
- output = "always madvise [never]";
+ output = "always madvise defer [never]";
return sysfs_emit(buf, "%s\n", output);
}
@@ -312,13 +315,20 @@ static ssize_t enabled_store(struct kobject *kobj,
if (sysfs_streq(buf, "always")) {
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ } else if (sysfs_streq(buf, "defer")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, &transparent_hugepage_flags);
} else if (sysfs_streq(buf, "madvise")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
} else if (sysfs_streq(buf, "never")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG, &transparent_hugepage_flags);
} else
ret = -EINVAL;
@@ -817,18 +827,31 @@ static int __init setup_transparent_hugepage(char *str)
&transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG,
+ &transparent_hugepage_flags);
ret = 1;
+ } else if (!strcmp(str, "defer")) {
+ clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
+ &transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
+ &transparent_hugepage_flags);
+ set_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG,
+ &transparent_hugepage_flags);
} else if (!strcmp(str, "madvise")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG,
+ &transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
- &transparent_hugepage_flags);
+ &transparent_hugepage_flags);
ret = 1;
} else if (!strcmp(str, "never")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
+ clear_bit(TRANSPARENT_HUGEPAGE_DEFER_PF_INST_FLAG,
+ &transparent_hugepage_flags);
ret = 1;
}
out:
setting /transparent_hugepages/enabled=always allows applications to benefit from THPs without having to madvise. However, the pf handler takes very few considerations to decide weather or not to actually use a THP. This can lead to a lot of wasted memory. khugepaged only operates on memory that was either allocated with enabled=always or MADV_HUGEPAGE. Introduce the ability to set enabled=defer, which will prevent THPs from being allocated by the page fault handler unless madvise is set, leaving it up to khugepaged to decide which allocations will collapse to a THP. This should allow applications to benefits from THPs, while curbing some of the memory waste. Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Barry Song <baohua@kernel.org> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Lance Yang <ioworker0@gmail.com> Cc: Peter Xu <peterx@redhat.com> Cc: Zi Yan <ziy@nvidia.com> Cc: Rafael Aquini <aquini@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Nico Pache <npache@redhat.com> --- include/linux/huge_mm.h | 15 +++++++++++++-- mm/huge_memory.c | 31 +++++++++++++++++++++++++++---- 2 files changed, 40 insertions(+), 6 deletions(-)