@@ -13,6 +13,7 @@
EM( SCAN_PMD_NULL, "pmd_null") \
EM( SCAN_PMD_NONE, "pmd_none") \
EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \
+ EM( SCAN_COW_PTE, "cowed_pte") \
EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \
@@ -31,6 +31,7 @@ enum scan_result {
SCAN_PMD_NULL,
SCAN_PMD_NONE,
SCAN_PMD_MAPPED,
+ SCAN_COW_PTE,
SCAN_EXCEED_NONE_PTE,
SCAN_EXCEED_SWAP_PTE,
SCAN_EXCEED_SHARED_PTE,
@@ -886,7 +887,7 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm,
return SCAN_PMD_MAPPED;
if (pmd_devmap(pmde))
return SCAN_PMD_NULL;
- if (pmd_bad(pmde))
+ if (pmd_write(pmde) && pmd_bad(pmde))
return SCAN_PMD_NULL;
return SCAN_SUCCEED;
}
@@ -937,6 +938,8 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
pte_unmap(vmf.pte);
continue;
}
+ if (break_cow_pte(vma, pmd, address))
+ return SCAN_COW_PTE;
ret = do_swap_page(&vmf);
/*
@@ -1049,6 +1052,9 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
if (result != SCAN_SUCCEED)
goto out_up_write;
+ /* We should already handled COW-ed PTE. */
+ VM_WARN_ON(test_bit(MMF_COW_PTE, &mm->flags) && !pmd_write(*pmd));
+
anon_vma_lock_write(vma->anon_vma);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
@@ -1159,6 +1165,13 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
+
+ /* Break COW PTE before we collapse the pages. */
+ if (break_cow_pte(vma, pmd, address)) {
+ result = SCAN_COW_PTE;
+ goto out;
+ }
+
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
@@ -1217,6 +1230,10 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
goto out_unmap;
}
+ /*
+ * If we only trigger the break COW PTE, the page usually
+ * still in COW mapping, which it still be shared.
+ */
if (page_mapcount(page) > 1) {
++shared;
if (cc->is_khugepaged &&
@@ -1512,6 +1529,11 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
goto drop_hpage;
}
+ /* We shouldn't let COW-ed PTE collapse. */
+ if (break_cow_pte(vma, pmd, haddr))
+ goto drop_hpage;
+ VM_WARN_ON(test_bit(MMF_COW_PTE, &mm->flags) && !pmd_write(*pmd));
+
/*
* We need to lock the mapping so that from here on, only GUP-fast and
* hardware page walks can access the parts of the page tables that
@@ -1717,6 +1739,11 @@ static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
result = SCAN_PTE_UFFD_WP;
goto unlock_next;
}
+ if (test_bit(MMF_COW_PTE, &mm->flags) &&
+ !pmd_write(*pmd)) {
+ result = SCAN_COW_PTE;
+ goto unlock_next;
+ }
collapse_and_free_pmd(mm, vma, addr, pmd);
if (!cc->is_khugepaged && is_target)
result = set_huge_pmd(vma, addr, pmd, hpage);
@@ -2154,6 +2181,11 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
swap = 0;
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
+ if (break_cow_pte(find_vma(mm, addr), NULL, addr)) {
+ result = SCAN_COW_PTE;
+ goto out;
+ }
+
rcu_read_lock();
xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
if (xas_retry(&xas, page))
@@ -2224,6 +2256,7 @@ static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
}
rcu_read_unlock();
+out:
if (result == SCAN_SUCCEED) {
if (cc->is_khugepaged &&
present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
We should not allow THP to collapse COW-ed PTE. So, break COW PTE before collapse_pte_mapped_thp() collapse to THP. Also, break COW PTE before khugepaged_scan_pmd() scan PTE. Signed-off-by: Chih-En Lin <shiyn.lin@gmail.com> --- include/trace/events/huge_memory.h | 1 + mm/khugepaged.c | 35 +++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-)