@@ -94,6 +94,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
+ pmd_t pmdval;
int ret;
pgd = pgd_offset(vma->vm_mm, address);
@@ -112,16 +113,22 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
if (pmd_none_or_clear_bad(pmd))
return 0;
+again:
/*
* This is called while another page table is mapped, so we
* must use the nested version. This also means we need to
* open-code the spin-locking.
*/
- pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl);
+ pte = pte_offset_map_maywrite_nolock(vma->vm_mm, pmd, address, &pmdval, &ptl);
if (!pte)
return 0;
do_pte_lock(ptl);
+ if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
+ do_pte_unlock(ptl);
+ pte_unmap(pte);
+ goto again;
+ }
ret = do_adjust_pte(vma, address, pfn, pte);
In do_adjust_pte(), we may modify the pte entry. At this time, the write lock of mmap_lock is not held, and the pte_same() check is not performed after the PTL held. The corresponding pmd entry may have been modified concurrently. Therefore, in order to ensure the stability if pmd entry, use pte_offset_map_maywrite_nolock() to replace pte_offset_map_nolock(), and do pmd_same() check after holding the PTL. Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> --- arch/arm/mm/fault-armv.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-)