diff mbox series

[24/24] mm/swap: change swapin_readahead to swapin_page_fault

Message ID 20231119194740.94101-25-ryncsn@gmail.com (mailing list archive)
State New
Headers show
Series Swapin path refactor for optimization and bugfix | expand

Commit Message

Kairui Song Nov. 19, 2023, 7:47 p.m. UTC
From: Kairui Song <kasong@tencent.com>

Now swapin_readahead is only called from direct page fault path, so
rename it and drop the gfp argument, since there is only one caller
always using the same flag for userspace page fault.

Signed-off-by: Kairui Song <kasong@tencent.com>
---
 mm/memory.c     |  4 ++--
 mm/swap.h       |  6 +++---
 mm/swap_state.c | 15 +++++++++------
 3 files changed, 14 insertions(+), 11 deletions(-)
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 620fa87557fd..4907a5b1b75b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3844,8 +3844,8 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 		goto out;
 	}
 
-	page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
-				vmf, &cache_result);
+	page = swapin_page_fault(entry, GFP_HIGHUSER_MOVABLE,
+				 vmf, &cache_result);
 	if (IS_ERR_OR_NULL(page)) {
 		/*
 		 * Back out if somebody else faulted in this pte
diff --git a/mm/swap.h b/mm/swap.h
index 4374bf11ca41..2f8f8befff89 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -56,8 +56,8 @@  struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 				     struct mempolicy *mpol, pgoff_t ilx,
 				     struct mm_struct *mm, bool *new_page_allocated);
-struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
-			      struct vm_fault *vmf, enum swap_cache_result *result);
+struct page *swapin_page_fault(swp_entry_t entry, gfp_t flag,
+			       struct vm_fault *vmf, enum swap_cache_result *result);
 struct page *swapin_page_non_fault(swp_entry_t entry, gfp_t gfp_mask,
 				   struct mempolicy *mpol, pgoff_t ilx,
 				   struct mm_struct *mm,
@@ -91,7 +91,7 @@  static inline void show_swap_cache_info(void)
 {
 }
 
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+static inline struct page *swapin_page_fault(swp_entry_t swp, gfp_t gfp_mask,
 			struct vm_fault *vmf, enum swap_cache_result *result)
 {
 	return NULL;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 362a6f674b36..2f51d2e64e59 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -899,7 +899,7 @@  static struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
 }
 
 /**
- * swapin_readahead - swap in pages in hope we need them soon
+ * swapin_page_fault - swap in a page from page fault context
  * @entry: swap entry of this memory
  * @gfp_mask: memory allocation flags
  * @vmf: fault information
@@ -911,8 +911,8 @@  static struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
  * it will read ahead blocks by cluster-based(ie, physical disk based)
  * or vma-based(ie, virtual address based on faulty address) readahead.
  */
-struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
-			      struct vm_fault *vmf, enum swap_cache_result *result)
+struct page *swapin_page_fault(swp_entry_t entry, gfp_t gfp_mask,
+			       struct vm_fault *vmf, enum swap_cache_result *result)
 {
 	struct swap_info_struct *si;
 	struct mempolicy *mpol;
@@ -936,15 +936,18 @@  struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
 	if (swap_use_no_readahead(si, swp_offset(entry))) {
 		*result = SWAP_CACHE_BYPASS;
-		page = swapin_no_readahead(entry, gfp_mask, mpol, ilx, vmf->vma->vm_mm);
+		page = swapin_no_readahead(entry, GFP_HIGHUSER_MOVABLE,
+					   mpol, ilx, vmf->vma->vm_mm);
 		if (shadow)
 			workingset_refault(page_folio(page), shadow);
 	} else {
 		*result = SWAP_CACHE_MISS;
 		if (swap_use_vma_readahead(si))
-			page = swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf);
+			page = swap_vma_readahead(entry, GFP_HIGHUSER_MOVABLE,
+						  mpol, ilx, vmf);
 		else
-			page = swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
+			page = swap_cluster_readahead(entry, GFP_HIGHUSER_MOVABLE,
+						      mpol, ilx, vmf->vma->vm_mm);
 	}
 	mpol_cond_put(mpol);
 done: