@@ -1721,6 +1721,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
spinlock_t *pml;
spinlock_t *ptl;
bool skipped_uffd = false;
+ pte_t *pte;
/*
* Check vma->anon_vma to exclude MAP_PRIVATE mappings that
@@ -1756,11 +1757,25 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
addr, addr + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
+ pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pgt_pmd, &ptl);
+ if (!pte) {
+ mmu_notifier_invalidate_range_end(&range);
+ continue;
+ }
+
pml = pmd_lock(mm, pmd);
- ptl = pte_lockptr(mm, pmd);
if (ptl != pml)
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+ if (unlikely(!pmd_same(pgt_pmd, pmdp_get_lockless(pmd)))) {
+ pte_unmap_unlock(pte, ptl);
+ if (ptl != pml)
+ spin_unlock(pml);
+ mmu_notifier_invalidate_range_end(&range);
+ continue;
+ }
+ pte_unmap(pte);
+
/*
* Huge page lock is still held, so normally the page table
* must remain empty; and we have already skipped anon_vma
In retract_page_tables(), we may modify the pmd entry after acquiring the pml and ptl, so we should also check whether the pmd entry is stable. Using pte_offset_map_rw_nolock() + pmd_same() to do it, and then we can also remove the calling of the pte_lockptr(). Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> --- mm/khugepaged.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-)