@@ -94,6 +94,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
+ pmd_t pmdval;
int ret;
pgd = pgd_offset(vma->vm_mm, address);
@@ -112,16 +113,22 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
if (pmd_none_or_clear_bad(pmd))
return 0;
+again:
/*
* This is called while another page table is mapped, so we
* must use the nested version. This also means we need to
* open-code the spin-locking.
*/
- pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl);
+ pte = pte_offset_map_rw_nolock(vma->vm_mm, pmd, address, &pmdval, &ptl);
if (!pte)
return 0;
do_pte_lock(ptl);
+ if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
+ do_pte_unlock(ptl);
+ pte_unmap(pte);
+ goto again;
+ }
ret = do_adjust_pte(vma, address, pfn, pte);