diff mbox series

mm/filemap: In page fault retry path skip filemap_map_pages() if no read-ahead pages

Message ID 20240731065128.50971-1-rongqianfeng@vivo.com (mailing list archive)
State New
Headers show
Series mm/filemap: In page fault retry path skip filemap_map_pages() if no read-ahead pages | expand

Commit Message

Rong Qianfeng July 31, 2024, 6:51 a.m. UTC
In filemap_fault(), if don't want read-ahead, the process is as follows:

First, __filemap_get_folio alloc one new folio, because PG_uptodate is not
set, filemap_read_folio() will be called later to read data into the folio.
Secondly, before returning, it will check whether the per vma lock or
mmap_lock is released. If the lock is released, VM_FAULT_RETRY is returned,
which means that the page fault path needs retry again. Finally,
filemap_map_pages() is called in do_fault_around() to establish a page
table mapping for the previous folio.

Because filemap_read_folio() just read the data of one folio, without
read-ahead pages, it is no needs to go through the do_fault_around() again
in the page fault retry path.

Signed-off-by: Rong Qianfeng <rongqianfeng@vivo.com>
---
 mm/filemap.c | 23 +++++++++++++++++++----
 1 file changed, 19 insertions(+), 4 deletions(-)

 #endif
 
 	/* If we don't want any read-ahead, don't bother */
-	if (vm_flags & VM_RAND_READ)
-		return fpin;
-	if (!ra->ra_pages)
+	if (!want_readahead(vm_flags, ra))
 		return fpin;
 
 	if (vm_flags & VM_SEQ_READ) {
@@ -3191,7 +3198,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
 	unsigned int mmap_miss;
 
 	/* If we don't want any read-ahead, don't bother */
-	if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
+	if (!want_readahead(vmf->vma->vm_flags, ra))
 		return fpin;
 
 	mmap_miss = READ_ONCE(ra->mmap_miss);
@@ -3612,6 +3619,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
 	unsigned long rss = 0;
 	unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved, folio_type;
 
+	/*
+	 * If no other read-ahead pages, return zero will
+	 * call __do_fault() to end the page fault path.
+	 */
+	if ((vmf->flags & FAULT_FLAG_TRIED) &&
+	    !want_readahead(vma->vm_flags, &file->f_ra))
+		return 0;
+
 	rcu_read_lock();
 	folio = next_uptodate_folio(&xas, mapping, end_pgoff);
 	if (!folio)
diff mbox series

Patch

diff --git a/mm/filemap.c b/mm/filemap.c
index d62150418b91..f29adf5cf081
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3105,6 +3105,15 @@  static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
 	return 1;
 }
 
+static inline bool want_readahead(unsigned long vm_flags, struct file_ra_state *ra)
+{
+	if (vm_flags & VM_RAND_READ || !ra->ra_pages)
+		return false;
+
+	return true;
+}
+
 /*
  * Synchronous readahead happens when we don't even find a page in the page
  * cache at all.  We don't want to perform IO under the mmap sem, so if we have
@@ -3141,9 +3150,7 @@  static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)