diff mbox series

[5/6] mm: Move the FAULT_FLAG_VMA_LOCK check down from do_pte_missing()

Message ID 20230404135850.3673404-6-willy@infradead.org (mailing list archive)
State New
Headers show
Series Avoid the mmap lock for fault-around | expand

Commit Message

Matthew Wilcox April 4, 2023, 1:58 p.m. UTC
Perform the check at the start of do_read_fault(), do_cow_fault()
and do_shared_fault() instead.  Should be no performance change from
the last commit.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/memory.c | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index dc2baddc6040..9952bebd25b4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3647,8 +3647,6 @@  static vm_fault_t do_pte_missing(struct vm_fault *vmf)
 {
 	if (vma_is_anonymous(vmf->vma))
 		return do_anonymous_page(vmf);
-	else if (vmf->flags & FAULT_FLAG_VMA_LOCK)
-		return VM_FAULT_RETRY;
 	else
 		return do_fault(vmf);
 }
@@ -4523,6 +4521,8 @@  static vm_fault_t do_read_fault(struct vm_fault *vmf)
 {
 	vm_fault_t ret = 0;
 
+	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+		return VM_FAULT_RETRY;
 	/*
 	 * Let's call ->map_pages() first and use ->fault() as fallback
 	 * if page by the offset is not ready to be mapped (cold cache or
@@ -4550,6 +4550,9 @@  static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 	struct vm_area_struct *vma = vmf->vma;
 	vm_fault_t ret;
 
+	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+		return VM_FAULT_RETRY;
+
 	if (unlikely(anon_vma_prepare(vma)))
 		return VM_FAULT_OOM;
 
@@ -4589,6 +4592,9 @@  static vm_fault_t do_shared_fault(struct vm_fault *vmf)
 	struct vm_area_struct *vma = vmf->vma;
 	vm_fault_t ret, tmp;
 
+	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+		return VM_FAULT_RETRY;
+
 	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;