diff mbox series

[RFC,5/6] mm: use lightweight reclaim on FAULT_FLAG_RETRY_NOWAIT

Message ID 20210225072910.2811795-6-namit@vmware.com (mailing list archive)
State New, archived
Headers show
Series x86: prefetch_page() vDSO call | expand

Commit Message

Nadav Amit Feb. 25, 2021, 7:29 a.m. UTC
From: Nadav Amit <namit@vmware.com>

When FAULT_FLAG_RETRY_NOWAIT is set, the caller arguably wants only a
lightweight reclaim to avoid a long reclamation, which would not respect
the "NOWAIT" semantic. Regard the request in swap and file-backed
page-faults accordingly during the first try.

Cc: Andy Lutomirski <luto@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: x86@kernel.org
Signed-off-by: Nadav Amit <namit@vmware.com>
---
 mm/memory.c | 32 ++++++++++++++++++++++----------
 1 file changed, 22 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 13b9cf36268f..70899c92a9e6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2679,18 +2679,31 @@  static inline bool cow_user_page(struct page *dst, struct page *src,
 	return ret;
 }
 
-static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
+static gfp_t massage_page_gfp_mask(gfp_t gfp_mask, unsigned long vmf_flags)
 {
-	struct file *vm_file = vma->vm_file;
+	if (fault_flag_allow_retry_first(vmf_flags) &&
+	    (vmf_flags & FAULT_FLAG_RETRY_NOWAIT))
+		gfp_mask |= __GFP_NORETRY | __GFP_NOWARN;
 
-	if (vm_file)
-		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
+	return gfp_mask;
+}
+
+static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma,
+				  unsigned long flags)
+{
+	struct file *vm_file = vma->vm_file;
+	gfp_t gfp_mask;
 
 	/*
 	 * Special mappings (e.g. VDSO) do not have any file so fake
 	 * a default GFP_KERNEL for them.
 	 */
-	return GFP_KERNEL;
+	if (!vm_file)
+		return GFP_KERNEL;
+
+	gfp_mask = mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
+
+	return massage_page_gfp_mask(gfp_mask, flags);
 }
 
 /*
@@ -3253,6 +3266,7 @@  EXPORT_SYMBOL(unmap_mapping_range);
  */
 vm_fault_t do_swap_page(struct vm_fault *vmf)
 {
+	gfp_t gfp_mask = massage_page_gfp_mask(GFP_HIGHUSER_MOVABLE, vmf->flags);
 	struct vm_area_struct *vma = vmf->vma;
 	struct page *page = NULL, *swapcache;
 	swp_entry_t entry;
@@ -3293,8 +3307,7 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
 		    __swap_count(entry) == 1) {
 			/* skip swapcache */
-			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
-							vmf->address);
+			page = alloc_page_vma(gfp_mask, vma, vmf->address);
 			if (page) {
 				int err;
 
@@ -3320,8 +3333,7 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 				swap_readpage(page, true);
 			}
 		} else {
-			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
-						vmf);
+			page = swapin_readahead(entry, gfp_mask, vmf);
 			swapcache = page;
 		}
 
@@ -4452,7 +4464,7 @@  static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 		.address = address & PAGE_MASK,
 		.flags = flags,
 		.pgoff = linear_page_index(vma, address),
-		.gfp_mask = __get_fault_gfp_mask(vma),
+		.gfp_mask = __get_fault_gfp_mask(vma, flags),
 	};
 	unsigned int dirty = flags & FAULT_FLAG_WRITE;
 	struct mm_struct *mm = vma->vm_mm;