diff mbox series

[RFC,30/37] mm: enable speculative fault handling for supported file types.

Message ID 20210407014502.24091-31-michel@lespinasse.org (mailing list archive)
State New, archived
Headers show
Series [RFC,01/37] mmap locking API: mmap_lock_is_contended returns a bool | expand

Commit Message

Michel Lespinasse April 7, 2021, 1:44 a.m. UTC
Introduce vma_can_speculate(), which allows speculative handling for
VMAs mapping supported file types.

From do_handle_mm_fault(), speculative handling will follow through
__handle_mm_fault(), handle_pte_fault() and do_fault().

At this point, we expect speculative faults to continue through one of:
- do_read_fault(), fully implemented;
- do_cow_fault(), which might abort if missing anon vmas,
- do_shared_fault(), not implemented yet
  (would require ->page_mkwrite() changes).

vma_can_speculate() provides an early abort for the do_shared_fault() case,
limiting the time spent on trying that unimplemented case.

Signed-off-by: Michel Lespinasse <michel@lespinasse.org>
---
 arch/x86/mm/fault.c |  3 ++-
 include/linux/mm.h  | 14 ++++++++++++++
 mm/memory.c         | 17 ++++++++++++-----
 3 files changed, 28 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index fbf265f56a06..48b86911a6df 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1324,7 +1324,8 @@  void do_user_addr_fault(struct pt_regs *regs,
 		goto spf_abort;
 	rcu_read_lock();
 	vma = find_vma(mm, address);
-	if (!vma || vma->vm_start > address || !vma_is_anonymous(vma)) {
+	if (!vma || vma->vm_start > address ||
+	    !vma_can_speculate(vma, flags)) {
 		rcu_read_unlock();
 		goto spf_abort;
 	}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b4c0c10e434e..edb809e9036b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -700,6 +700,20 @@  static inline bool vma_is_accessible(struct vm_area_struct *vma)
 	return vma->vm_flags & VM_ACCESS_FLAGS;
 }
 
+static inline bool vma_can_speculate(struct vm_area_struct *vma,
+		unsigned int flags)
+{
+	if (vma_is_anonymous(vma))
+		return true;
+	if (!vma->vm_ops->speculative)
+		return false;
+	if (!(flags & FAULT_FLAG_WRITE))
+		return true;
+	if (!(vma->vm_flags & VM_SHARED))
+		return true;
+	return false;
+}
+
 #ifdef CONFIG_SHMEM
 /*
  * The vma_is_shmem is not inline because it is used only by slow
diff --git a/mm/memory.c b/mm/memory.c
index a20e13d84145..074945faf1ab 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4090,8 +4090,12 @@  static vm_fault_t do_cow_fault(struct vm_fault *vmf)
 	struct vm_area_struct *vma = vmf->vma;
 	vm_fault_t ret;
 
-	if (unlikely(anon_vma_prepare(vma)))
-		return VM_FAULT_OOM;
+	if (unlikely(!vma->anon_vma)) {
+		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
+			return VM_FAULT_RETRY;
+		if (__anon_vma_prepare(vma))
+			return VM_FAULT_OOM;
+	}
 
 	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
 	if (!vmf->cow_page)
@@ -4128,6 +4132,8 @@  static vm_fault_t do_shared_fault(struct vm_fault *vmf)
 	struct vm_area_struct *vma = vmf->vma;
 	vm_fault_t ret, tmp;
 
+	VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
+
 	ret = __do_fault(vmf);
 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
 		return ret;
@@ -4172,12 +4178,12 @@  static vm_fault_t do_fault(struct vm_fault *vmf)
 	struct mm_struct *vm_mm = vma->vm_mm;
 	vm_fault_t ret;
 
-	VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
-
 	/*
 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
 	 */
 	if (!vma->vm_ops->fault) {
+		VM_BUG_ON(vmf->flags & FAULT_FLAG_SPECULATIVE);
+
 		/*
 		 * If we find a migration pmd entry or a none pmd entry, which
 		 * should never happen, return SIGBUS
@@ -4739,7 +4745,8 @@  vm_fault_t do_handle_mm_fault(struct vm_area_struct *vma,
 {
 	vm_fault_t ret;
 
-	VM_BUG_ON((flags & FAULT_FLAG_SPECULATIVE) && !vma_is_anonymous(vma));
+	VM_BUG_ON((flags & FAULT_FLAG_SPECULATIVE) &&
+		  !vma_can_speculate(vma, flags));
 
 	__set_current_state(TASK_RUNNING);