diff mbox series

[12/24] mm/swap: simplify arguments for swap_cache_get_folio

Message ID 20231119194740.94101-13-ryncsn@gmail.com (mailing list archive)
State New
Headers show
Series Swapin path refactor for optimization and bugfix | expand

Commit Message

Kairui Song Nov. 19, 2023, 7:47 p.m. UTC
From: Kairui Song <kasong@tencent.com>

There are only two caller now, simplify the arguments.

Signed-off-by: Kairui Song <kasong@tencent.com>
---
 mm/shmem.c      |  2 +-
 mm/swap.h       |  2 +-
 mm/swap_state.c | 15 +++++++--------
 3 files changed, 9 insertions(+), 10 deletions(-)

Comments

Chris Li Nov. 21, 2023, 4:36 p.m. UTC | #1
On Sun, Nov 19, 2023 at 11:48 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> There are only two caller now, simplify the arguments.

I don't think this patch is needed. It will not have a real impact on
the resulting kernel.

>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
>  mm/shmem.c      |  2 +-
>  mm/swap.h       |  2 +-
>  mm/swap_state.c | 15 +++++++--------
>  3 files changed, 9 insertions(+), 10 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 0d1ce70bce38..72239061c655 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -1875,7 +1875,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
>         }
>
>         /* Look it up and read it in.. */
> -       folio = swap_cache_get_folio(swap, NULL, 0);
> +       folio = swap_cache_get_folio(swap, NULL);
>         if (!folio) {
>                 /* Or update major stats only when swapin succeeds?? */
>                 if (fault_type) {
> diff --git a/mm/swap.h b/mm/swap.h
> index ac9136eee690..e43e965f123f 100644
> --- a/mm/swap.h
> +++ b/mm/swap.h
> @@ -47,7 +47,7 @@ void delete_from_swap_cache(struct folio *folio);
>  void clear_shadow_from_swap_cache(int type, unsigned long begin,
>                                   unsigned long end);
>  struct folio *swap_cache_get_folio(swp_entry_t entry,
> -               struct vm_area_struct *vma, unsigned long addr);
> +                                  struct vm_fault *vmf);
>  struct folio *filemap_get_incore_folio(struct address_space *mapping,
>                 pgoff_t index);
>
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index e96d63bf8a22..91461e26a8cc 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -334,8 +334,7 @@ static inline bool swap_use_vma_readahead(struct swap_info_struct *si)
>   *
>   * Caller must lock the swap device or hold a reference to keep it valid.
>   */
> -struct folio *swap_cache_get_folio(swp_entry_t entry,
> -               struct vm_area_struct *vma, unsigned long addr)
> +struct folio *swap_cache_get_folio(swp_entry_t entry, struct vm_fault *vmf)

I actually prefer the original code. The vm_fault is a much heavier object.
vma and address is really what this function needs minimally. I
consider vma and address,
even though two arguments are simpler than vm_fault struct.

It is possible some other non fault patch wants to lookup the swap_cache,
then construct a vmf just for the vma and address is kind of unnatural.

Chris

>  {
>         struct folio *folio;
>
> @@ -352,22 +351,22 @@ struct folio *swap_cache_get_folio(swp_entry_t entry,
>                         return folio;
>
>                 readahead = folio_test_clear_readahead(folio);
> -               if (vma && vma_ra) {
> +               if (vmf && vma_ra) {
>                         unsigned long ra_val;
>                         int win, hits;
>
> -                       ra_val = GET_SWAP_RA_VAL(vma);
> +                       ra_val = GET_SWAP_RA_VAL(vmf->vma);
>                         win = SWAP_RA_WIN(ra_val);
>                         hits = SWAP_RA_HITS(ra_val);
>                         if (readahead)
>                                 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
> -                       atomic_long_set(&vma->swap_readahead_info,
> -                                       SWAP_RA_VAL(addr, win, hits));
> +                       atomic_long_set(&vmf->vma->swap_readahead_info,
> +                                       SWAP_RA_VAL(vmf->address, win, hits));
>                 }
>
>                 if (readahead) {
>                         count_vm_event(SWAP_RA_HIT);
> -                       if (!vma || !vma_ra)
> +                       if (!vmf || !vma_ra)
>                                 atomic_inc(&swapin_readahead_hits);
>                 }
>         } else {
> @@ -926,7 +925,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
>         struct page *page;
>         pgoff_t ilx;
>
> -       folio = swap_cache_get_folio(entry, vmf->vma, vmf->address);
> +       folio = swap_cache_get_folio(entry, vmf);
>         if (folio) {
>                 page = folio_file_page(folio, swp_offset(entry));
>                 cache_result = SWAP_CACHE_HIT;
> --
> 2.42.0
>
>
diff mbox series

Patch

diff --git a/mm/shmem.c b/mm/shmem.c
index 0d1ce70bce38..72239061c655 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1875,7 +1875,7 @@  static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
 	}
 
 	/* Look it up and read it in.. */
-	folio = swap_cache_get_folio(swap, NULL, 0);
+	folio = swap_cache_get_folio(swap, NULL);
 	if (!folio) {
 		/* Or update major stats only when swapin succeeds?? */
 		if (fault_type) {
diff --git a/mm/swap.h b/mm/swap.h
index ac9136eee690..e43e965f123f 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -47,7 +47,7 @@  void delete_from_swap_cache(struct folio *folio);
 void clear_shadow_from_swap_cache(int type, unsigned long begin,
 				  unsigned long end);
 struct folio *swap_cache_get_folio(swp_entry_t entry,
-		struct vm_area_struct *vma, unsigned long addr);
+				   struct vm_fault *vmf);
 struct folio *filemap_get_incore_folio(struct address_space *mapping,
 		pgoff_t index);
 
diff --git a/mm/swap_state.c b/mm/swap_state.c
index e96d63bf8a22..91461e26a8cc 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -334,8 +334,7 @@  static inline bool swap_use_vma_readahead(struct swap_info_struct *si)
  *
  * Caller must lock the swap device or hold a reference to keep it valid.
  */
-struct folio *swap_cache_get_folio(swp_entry_t entry,
-		struct vm_area_struct *vma, unsigned long addr)
+struct folio *swap_cache_get_folio(swp_entry_t entry, struct vm_fault *vmf)
 {
 	struct folio *folio;
 
@@ -352,22 +351,22 @@  struct folio *swap_cache_get_folio(swp_entry_t entry,
 			return folio;
 
 		readahead = folio_test_clear_readahead(folio);
-		if (vma && vma_ra) {
+		if (vmf && vma_ra) {
 			unsigned long ra_val;
 			int win, hits;
 
-			ra_val = GET_SWAP_RA_VAL(vma);
+			ra_val = GET_SWAP_RA_VAL(vmf->vma);
 			win = SWAP_RA_WIN(ra_val);
 			hits = SWAP_RA_HITS(ra_val);
 			if (readahead)
 				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
-			atomic_long_set(&vma->swap_readahead_info,
-					SWAP_RA_VAL(addr, win, hits));
+			atomic_long_set(&vmf->vma->swap_readahead_info,
+					SWAP_RA_VAL(vmf->address, win, hits));
 		}
 
 		if (readahead) {
 			count_vm_event(SWAP_RA_HIT);
-			if (!vma || !vma_ra)
+			if (!vmf || !vma_ra)
 				atomic_inc(&swapin_readahead_hits);
 		}
 	} else {
@@ -926,7 +925,7 @@  struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	struct page *page;
 	pgoff_t ilx;
 
-	folio = swap_cache_get_folio(entry, vmf->vma, vmf->address);
+	folio = swap_cache_get_folio(entry, vmf);
 	if (folio) {
 		page = folio_file_page(folio, swp_offset(entry));
 		cache_result = SWAP_CACHE_HIT;