@@ -143,8 +143,13 @@ static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
/*
* to be used on vmas which are known to support THP.
* Use transparent_hugepage_active otherwise
+ *
+ * madv_thp_vm_flags are used instead of vma->vm_flags for VM_NOHUGEPAGE
+ * and VM_HUGEPAGE. Principal use is ignoring VM_NOHUGEPAGE when in madvise
+ * collapse context.
*/
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma,
+ unsigned long madv_thp_vm_flags)
{
/*
@@ -153,7 +158,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
return false;
- if (!transhuge_vma_enabled(vma, vma->vm_flags))
+ if (!transhuge_vma_enabled(vma, madv_thp_vm_flags))
return false;
if (vma_is_temporary_stack(vma))
@@ -167,7 +172,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
if (transparent_hugepage_flags &
(1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
- return !!(vma->vm_flags & VM_HUGEPAGE);
+ return !!(madv_thp_vm_flags & VM_HUGEPAGE);
return false;
}
@@ -316,7 +321,8 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
return false;
}
-static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
+static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma,
+ unsigned long madv_thp_vm_flags)
{
return false;
}
@@ -83,7 +83,7 @@ bool transparent_hugepage_active(struct vm_area_struct *vma)
if (!transhuge_vma_suitable(vma, addr))
return false;
if (vma_is_anonymous(vma))
- return __transparent_hugepage_enabled(vma);
+ return __transparent_hugepage_enabled(vma, vma->vm_flags);
if (vma_is_shmem(vma))
return shmem_huge_enabled(vma);
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS))
@@ -4695,7 +4695,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
if (!vmf.pud)
return VM_FAULT_OOM;
retry_pud:
- if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
+ if (pud_none(*vmf.pud) &&
+ __transparent_hugepage_enabled(vma, vma->vm_flags)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -4726,7 +4727,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
if (pud_trans_unstable(vmf.pud))
goto retry_pud;
- if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
+ if (pmd_none(*vmf.pmd) &&
+ __transparent_hugepage_enabled(vma, vma->vm_flags)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
Later in the series, in madvise collapse context, we will want to optionally ignore MADV_NOHUGEPAGE. However, we'd also like to standardize on __transparent_hugepage_enabled() for determining anon thp eligibility. Add a new argument to __transparent_hugepage_enabled() which represents the vma flags to be used instead of those in vma->vm_flags for VM_[NO]HUGEPAGE checks. I.e. checks inside __transparent_hugepage_enabled() which previously didn't care about madvise settings, such as dax check, or stack check, are unaffected. Signed-off-by: Zach O'Keefe <zokeefe@google.com> --- include/linux/huge_mm.h | 14 ++++++++++---- mm/huge_memory.c | 2 +- mm/memory.c | 6 ++++-- 3 files changed, 15 insertions(+), 7 deletions(-)