@@ -626,6 +626,11 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
VM_BUG_ON_FOLIO(!folio_test_anon(folio), folio);
+ if (_pte == pte && (order != HPAGE_PMD_ORDER) && (folio_order(folio) == order) &&
+ test_bit(PG_head, &folio->page.flags) && !folio_test_partially_mapped(folio)) {
+ result = SCAN_PTE_MAPPED_THP;
+ goto out;
+ }
/* See hpage_collapse_scan_pmd(). */
if (folio_likely_mapped_shared(folio)) {
++shared;
@@ -1532,6 +1537,16 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
goto out_unmap;
}
+ /* Exit early: There is high chance of this due to faulting */
+ if (_pte == pte && (order != HPAGE_PMD_ORDER) && (folio_order(folio) == order) &&
+ test_bit(PG_head, &folio->page.flags) && !folio_test_partially_mapped(folio)) {
+ pte_unmap_unlock(pte, ptl);
+ _address = address + (PAGE_SIZE << order);
+ _pte = pte + (1UL << order);
+ result = SCAN_PTE_MAPPED_THP;
+ goto decide_order;
+ }
+
/*
* We treat a single page as shared if any part of the THP
* is shared. "False negatives" from
Since mTHP orders under consideration by khugepaged are also candidates for the fault handler, the case we hit frequently is that khugepaged scans a region for order-x, whereas an order-x folio was already installed by the fault handler there. Therefore, exit early; this prevents a timeout in the khugepaged selftest. Earlier this was not a problem because a PMD-hugepage will get checked by find_pmd_or_thp_or_none(), and the previous patch does not solve this problem because it will do the entire PTE scan to exit. Signed-off-by: Dev Jain <dev.jain@arm.com> --- mm/khugepaged.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+)