@@ -4896,6 +4896,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
{
pte_t entry;
+ if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma))
+ return VM_FAULT_RETRY;
+
if (unlikely(pmd_none(*vmf->pmd))) {
/*
* Leave __pte_alloc() until later: because vm_ops->fault may
@@ -5014,9 +5017,6 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
p4d_t *p4d;
vm_fault_t ret;
- if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma))
- return VM_FAULT_RETRY;
-
pgd = pgd_offset(mm, address);
p4d = p4d_alloc(mm, pgd, address);
if (!p4d)
Push the check down from __handle_mm_fault(). There's a mild upside to this patch in that we'll allocate the page tables while under the VMA lock rather than the mmap lock, reducing the hold time on the mmap lock, since the retry will find the page tables already populated. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/memory.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)