diff mbox series

[03/24] mm/swap: move no readahead swapin code to a stand alone helper

Message ID 20231119194740.94101-4-ryncsn@gmail.com (mailing list archive)
State New
Headers show
Series Swapin path refactor for optimization and bugfix | expand

Commit Message

Kairui Song Nov. 19, 2023, 7:47 p.m. UTC
From: Kairui Song <kasong@tencent.com>

No feature change, simply move the routine to a standalone function to
be used later. The error path handling is copied from the "out_page"
label, to make the code change minimized for easier reviewing.

Signed-off-by: Kairui Song <kasong@tencent.com>
---
 mm/memory.c     | 33 +++++----------------------------
 mm/swap.h       |  2 ++
 mm/swap_state.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 55 insertions(+), 28 deletions(-)

Comments

Matthew Wilcox (Oracle) Nov. 19, 2023, 9 p.m. UTC | #1
On Mon, Nov 20, 2023 at 03:47:19AM +0800, Kairui Song wrote:
> +			/* skip swapcache and readahead */
> +			page = swapin_no_readahead(entry, GFP_HIGHUSER_MOVABLE,
> +						vmf);
> +			if (page)
> +				folio = page_folio(page);

I think this should rather be:

			folio = swapin_no_readahead(entry,
					GFP_HIGHUSER_MOVABLE, vma);
			page = &folio->page;

and have swapin_no_readahead() work entirely in terms of folios.
Kairui Song Nov. 20, 2023, 11:14 a.m. UTC | #2
Matthew Wilcox <willy@infradead.org> 于2023年11月20日周一 05:00写道:
>
> On Mon, Nov 20, 2023 at 03:47:19AM +0800, Kairui Song wrote:
> > +                     /* skip swapcache and readahead */
> > +                     page = swapin_no_readahead(entry, GFP_HIGHUSER_MOVABLE,
> > +                                             vmf);
> > +                     if (page)
> > +                             folio = page_folio(page);
>
> I think this should rather be:
>
>                         folio = swapin_no_readahead(entry,
>                                         GFP_HIGHUSER_MOVABLE, vma);
>                         page = &folio->page;
>
> and have swapin_no_readahead() work entirely in terms of folios.
>

Thanks for the review!

Good suggestion, I was actually thinking about converting more swapin
function to use folio in later series, since that involved more
changes, and this series is getting a bit too long.
I'll try to see if I can reorganize the series to cover that too in a
sensible way.
Dan Carpenter Nov. 20, 2023, 2:55 p.m. UTC | #3
Hi Kairui,

kernel test robot noticed the following build warnings:

https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Kairui-Song/mm-swap-fix-a-potential-undefined-behavior-issue/20231120-035926
base:   https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-everything
patch link:    https://lore.kernel.org/r/20231119194740.94101-4-ryncsn%40gmail.com
patch subject: [PATCH 03/24] mm/swap: move no readahead swapin code to a stand alone helper
config: um-randconfig-r081-20231120 (https://download.01.org/0day-ci/archive/20231120/202311201135.iG2KLShW-lkp@intel.com/config)
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project.git f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce: (https://download.01.org/0day-ci/archive/20231120/202311201135.iG2KLShW-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Reported-by: Dan Carpenter <error27@gmail.com>
| Closes: https://lore.kernel.org/r/202311201135.iG2KLShW-lkp@intel.com/

smatch warnings:
mm/swap_state.c:881 swapin_no_readahead() warn: use 'gfp_mask' here instead of GFP_KERNEL?

vim +/gfp_mask +881 mm/swap_state.c

19f582d2684e47 Kairui Song 2023-11-20  856  /**
19f582d2684e47 Kairui Song 2023-11-20  857   * swapin_no_readahead - swap in pages skipping swap cache and readahead
19f582d2684e47 Kairui Song 2023-11-20  858   * @entry: swap entry of this memory
19f582d2684e47 Kairui Song 2023-11-20  859   * @gfp_mask: memory allocation flags
19f582d2684e47 Kairui Song 2023-11-20  860   * @vmf: fault information
19f582d2684e47 Kairui Song 2023-11-20  861   *
19f582d2684e47 Kairui Song 2023-11-20  862   * Returns the struct page for entry and addr after the swap entry is read
19f582d2684e47 Kairui Song 2023-11-20  863   * in.
19f582d2684e47 Kairui Song 2023-11-20  864   */
19f582d2684e47 Kairui Song 2023-11-20  865  struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
19f582d2684e47 Kairui Song 2023-11-20  866  				 struct vm_fault *vmf)
19f582d2684e47 Kairui Song 2023-11-20  867  {
19f582d2684e47 Kairui Song 2023-11-20  868  	struct vm_area_struct *vma = vmf->vma;
19f582d2684e47 Kairui Song 2023-11-20  869  	struct page *page = NULL;
19f582d2684e47 Kairui Song 2023-11-20  870  	struct folio *folio;
19f582d2684e47 Kairui Song 2023-11-20  871  	void *shadow = NULL;
19f582d2684e47 Kairui Song 2023-11-20  872  
19f582d2684e47 Kairui Song 2023-11-20  873  	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
19f582d2684e47 Kairui Song 2023-11-20  874  				vma, vmf->address, false);
19f582d2684e47 Kairui Song 2023-11-20  875  	if (folio) {
19f582d2684e47 Kairui Song 2023-11-20  876  		__folio_set_locked(folio);
19f582d2684e47 Kairui Song 2023-11-20  877  		__folio_set_swapbacked(folio);
19f582d2684e47 Kairui Song 2023-11-20  878  
19f582d2684e47 Kairui Song 2023-11-20  879  		if (mem_cgroup_swapin_charge_folio(folio,
19f582d2684e47 Kairui Song 2023-11-20  880  					vma->vm_mm, GFP_KERNEL,

s/GFP_KERNEL/gfp_mask/?

19f582d2684e47 Kairui Song 2023-11-20 @881  					entry)) {
19f582d2684e47 Kairui Song 2023-11-20  882  			folio_unlock(folio);
19f582d2684e47 Kairui Song 2023-11-20  883  			folio_put(folio);
19f582d2684e47 Kairui Song 2023-11-20  884  			return NULL;
19f582d2684e47 Kairui Song 2023-11-20  885  		}
19f582d2684e47 Kairui Song 2023-11-20  886  		mem_cgroup_swapin_uncharge_swap(entry);
19f582d2684e47 Kairui Song 2023-11-20  887  
19f582d2684e47 Kairui Song 2023-11-20  888  		shadow = get_shadow_from_swap_cache(entry);
19f582d2684e47 Kairui Song 2023-11-20  889  		if (shadow)
19f582d2684e47 Kairui Song 2023-11-20  890  			workingset_refault(folio, shadow);
19f582d2684e47 Kairui Song 2023-11-20  891  
19f582d2684e47 Kairui Song 2023-11-20  892  		folio_add_lru(folio);
19f582d2684e47 Kairui Song 2023-11-20  893  
19f582d2684e47 Kairui Song 2023-11-20  894  		/* To provide entry to swap_readpage() */
19f582d2684e47 Kairui Song 2023-11-20  895  		folio->swap = entry;
19f582d2684e47 Kairui Song 2023-11-20  896  		page = &folio->page;
19f582d2684e47 Kairui Song 2023-11-20  897  		swap_readpage(page, true, NULL);
19f582d2684e47 Kairui Song 2023-11-20  898  		folio->private = NULL;
19f582d2684e47 Kairui Song 2023-11-20  899  	}
19f582d2684e47 Kairui Song 2023-11-20  900  
19f582d2684e47 Kairui Song 2023-11-20  901  	return page;
19f582d2684e47 Kairui Song 2023-11-20  902  }
Chris Li Nov. 21, 2023, 5:34 a.m. UTC | #4
On Sun, Nov 19, 2023 at 11:48 AM Kairui Song <ryncsn@gmail.com> wrote:
>
> From: Kairui Song <kasong@tencent.com>
>
> No feature change, simply move the routine to a standalone function to
> be used later. The error path handling is copied from the "out_page"
> label, to make the code change minimized for easier reviewing.
>
> Signed-off-by: Kairui Song <kasong@tencent.com>
> ---
>  mm/memory.c     | 33 +++++----------------------------
>  mm/swap.h       |  2 ++
>  mm/swap_state.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 55 insertions(+), 28 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index 70ffa867b1be..fba4a5229163 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -3794,7 +3794,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>         swp_entry_t entry;
>         pte_t pte;
>         vm_fault_t ret = 0;
> -       void *shadow = NULL;
>
>         if (!pte_unmap_same(vmf))
>                 goto out;
> @@ -3858,33 +3857,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
>         if (!folio) {
>                 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
>                     __swap_count(entry) == 1) {
> -                       /* skip swapcache */
> -                       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
> -                                               vma, vmf->address, false);
> -                       if (folio) {
> -                               __folio_set_locked(folio);
> -                               __folio_set_swapbacked(folio);
> -
> -                               if (mem_cgroup_swapin_charge_folio(folio,
> -                                                       vma->vm_mm, GFP_KERNEL,
> -                                                       entry)) {
> -                                       ret = VM_FAULT_OOM;
> -                                       goto out_page;
> -                               }
> -                               mem_cgroup_swapin_uncharge_swap(entry);
> -
> -                               shadow = get_shadow_from_swap_cache(entry);
> -                               if (shadow)
> -                                       workingset_refault(folio, shadow);
> -
> -                               folio_add_lru(folio);
> -                               page = &folio->page;
> -
> -                               /* To provide entry to swap_readpage() */
> -                               folio->swap = entry;
> -                               swap_readpage(page, true, NULL);
> -                               folio->private = NULL;
> -                       }
> +                       /* skip swapcache and readahead */
> +                       page = swapin_no_readahead(entry, GFP_HIGHUSER_MOVABLE,
> +                                               vmf);

A minor point,  swapin_no_readahead() is expressed in negative words.

Better use positive words to express what the function does rather
than what the function does not do.
I am terrible at naming functions myself. I can think of something
along the lines of:
swapin_direct (no cache).
swapin_minimal?
swapin_entry_only?

Please suggest better names for basic, bare minimum.

Chris
Kairui Song Nov. 22, 2023, 5:33 p.m. UTC | #5
Chris Li <chrisl@kernel.org> 于2023年11月21日周二 13:35写道:
>
> On Sun, Nov 19, 2023 at 11:48 AM Kairui Song <ryncsn@gmail.com> wrote:
> >
> > From: Kairui Song <kasong@tencent.com>
> >
> > No feature change, simply move the routine to a standalone function to
> > be used later. The error path handling is copied from the "out_page"
> > label, to make the code change minimized for easier reviewing.
> >
> > Signed-off-by: Kairui Song <kasong@tencent.com>
> > ---
> >  mm/memory.c     | 33 +++++----------------------------
> >  mm/swap.h       |  2 ++
> >  mm/swap_state.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++++
> >  3 files changed, 55 insertions(+), 28 deletions(-)
> >
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 70ffa867b1be..fba4a5229163 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -3794,7 +3794,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >         swp_entry_t entry;
> >         pte_t pte;
> >         vm_fault_t ret = 0;
> > -       void *shadow = NULL;
> >
> >         if (!pte_unmap_same(vmf))
> >                 goto out;
> > @@ -3858,33 +3857,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> >         if (!folio) {
> >                 if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
> >                     __swap_count(entry) == 1) {
> > -                       /* skip swapcache */
> > -                       folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
> > -                                               vma, vmf->address, false);
> > -                       if (folio) {
> > -                               __folio_set_locked(folio);
> > -                               __folio_set_swapbacked(folio);
> > -
> > -                               if (mem_cgroup_swapin_charge_folio(folio,
> > -                                                       vma->vm_mm, GFP_KERNEL,
> > -                                                       entry)) {
> > -                                       ret = VM_FAULT_OOM;
> > -                                       goto out_page;
> > -                               }
> > -                               mem_cgroup_swapin_uncharge_swap(entry);
> > -
> > -                               shadow = get_shadow_from_swap_cache(entry);
> > -                               if (shadow)
> > -                                       workingset_refault(folio, shadow);
> > -
> > -                               folio_add_lru(folio);
> > -                               page = &folio->page;
> > -
> > -                               /* To provide entry to swap_readpage() */
> > -                               folio->swap = entry;
> > -                               swap_readpage(page, true, NULL);
> > -                               folio->private = NULL;
> > -                       }
> > +                       /* skip swapcache and readahead */
> > +                       page = swapin_no_readahead(entry, GFP_HIGHUSER_MOVABLE,
> > +                                               vmf);
>
> A minor point,  swapin_no_readahead() is expressed in negative words.
>
> Better use positive words to express what the function does rather
> than what the function does not do.
> I am terrible at naming functions myself. I can think of something
> along the lines of:
> swapin_direct (no cache).
> swapin_minimal?
> swapin_entry_only?
>
> Please suggest better names for basic, bare minimum.

Thanks for the suggestions, I prefer swapin_direct here, will update
with this name and also make it return a folio directly.
diff mbox series

Patch

diff --git a/mm/memory.c b/mm/memory.c
index 70ffa867b1be..fba4a5229163 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3794,7 +3794,6 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	swp_entry_t entry;
 	pte_t pte;
 	vm_fault_t ret = 0;
-	void *shadow = NULL;
 
 	if (!pte_unmap_same(vmf))
 		goto out;
@@ -3858,33 +3857,11 @@  vm_fault_t do_swap_page(struct vm_fault *vmf)
 	if (!folio) {
 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
 		    __swap_count(entry) == 1) {
-			/* skip swapcache */
-			folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
-						vma, vmf->address, false);
-			if (folio) {
-				__folio_set_locked(folio);
-				__folio_set_swapbacked(folio);
-
-				if (mem_cgroup_swapin_charge_folio(folio,
-							vma->vm_mm, GFP_KERNEL,
-							entry)) {
-					ret = VM_FAULT_OOM;
-					goto out_page;
-				}
-				mem_cgroup_swapin_uncharge_swap(entry);
-
-				shadow = get_shadow_from_swap_cache(entry);
-				if (shadow)
-					workingset_refault(folio, shadow);
-
-				folio_add_lru(folio);
-				page = &folio->page;
-
-				/* To provide entry to swap_readpage() */
-				folio->swap = entry;
-				swap_readpage(page, true, NULL);
-				folio->private = NULL;
-			}
+			/* skip swapcache and readahead */
+			page = swapin_no_readahead(entry, GFP_HIGHUSER_MOVABLE,
+						vmf);
+			if (page)
+				folio = page_folio(page);
 		} else {
 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
 						vmf);
diff --git a/mm/swap.h b/mm/swap.h
index 73c332ee4d91..ea4be4791394 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -56,6 +56,8 @@  struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
 				    struct mempolicy *mpol, pgoff_t ilx);
 struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
 			      struct vm_fault *vmf);
+struct page *swapin_no_readahead(swp_entry_t entry, gfp_t flag,
+				 struct vm_fault *vmf);
 
 static inline unsigned int folio_swap_flags(struct folio *folio)
 {
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 85d9e5806a6a..ac4fa404eaa7 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -853,6 +853,54 @@  static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
 	return page;
 }
 
+/**
+ * swapin_no_readahead - swap in pages skipping swap cache and readahead
+ * @entry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
+ * @vmf: fault information
+ *
+ * Returns the struct page for entry and addr after the swap entry is read
+ * in.
+ */
+struct page *swapin_no_readahead(swp_entry_t entry, gfp_t gfp_mask,
+				 struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct page *page = NULL;
+	struct folio *folio;
+	void *shadow = NULL;
+
+	folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
+				vma, vmf->address, false);
+	if (folio) {
+		__folio_set_locked(folio);
+		__folio_set_swapbacked(folio);
+
+		if (mem_cgroup_swapin_charge_folio(folio,
+					vma->vm_mm, GFP_KERNEL,
+					entry)) {
+			folio_unlock(folio);
+			folio_put(folio);
+			return NULL;
+		}
+		mem_cgroup_swapin_uncharge_swap(entry);
+
+		shadow = get_shadow_from_swap_cache(entry);
+		if (shadow)
+			workingset_refault(folio, shadow);
+
+		folio_add_lru(folio);
+
+		/* To provide entry to swap_readpage() */
+		folio->swap = entry;
+		page = &folio->page;
+		swap_readpage(page, true, NULL);
+		folio->private = NULL;
+	}
+
+	return page;
+}
+
 /**
  * swapin_readahead - swap in pages in hope we need them soon
  * @entry: swap entry of this memory