@@ -3329,6 +3329,9 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct folio *folio = NULL;
+ if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma))
+ return VM_FAULT_RETRY;
+
if (likely(!unshare)) {
if (userfaultfd_pte_wp(vma, *vmf->pte)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -3644,6 +3647,8 @@ static vm_fault_t do_pte_missing(struct vm_fault *vmf)
{
if (vma_is_anonymous(vmf->vma))
return do_anonymous_page(vmf);
+ else if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+ return VM_FAULT_RETRY;
else
return do_fault(vmf);
}
@@ -4896,9 +4901,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
{
pte_t entry;
- if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma))
- return VM_FAULT_RETRY;
-
if (unlikely(pmd_none(*vmf->pmd))) {
/*
* Leave __pte_alloc() until later: because vm_ops->fault may
@@ -4957,6 +4959,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
return do_numa_page(vmf);
+ if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma))
+ return VM_FAULT_RETRY;
+
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
spin_lock(vmf->ptl);
entry = vmf->orig_pte;
Call do_pte_missing(), do_wp_page() and do_numa_page() under the VMA lock. The first two gain the check, but do_numa_page() looks safe to run outside the mmap lock. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/memory.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-)