diff mbox series

[1/2] mm,swap: extract swap single page readahead into its own function

Message ID 20200922020148.3261797-2-riel@surriel.com (mailing list archive)
State New, archived
Headers show
Series mm,swap: skip swap readahead for instant IO (like zswap) | expand

Commit Message

Rik van Riel Sept. 22, 2020, 2:01 a.m. UTC
Split swap single page readahead into its own function, to make
the next patch easier to read. No functional changes.

Signed-off-by: Rik van Riel <riel@surriel.com>
---
 mm/swap_state.c | 40 +++++++++++++++++++++++++---------------
 1 file changed, 25 insertions(+), 15 deletions(-)

Comments

Christoph Hellwig Sept. 23, 2020, 6:32 a.m. UTC | #1
On Mon, Sep 21, 2020 at 10:01:47PM -0400, Rik van Riel wrote:
> +static struct page *swap_cluster_read_one(swp_entry_t entry,
> +		unsigned long offset, gfp_t gfp_mask,
> +		struct vm_area_struct *vma, unsigned long addr, bool readahead)
> +{
> +	bool page_allocated;
> +	struct page *page;
> +
> +	page =__read_swap_cache_async(swp_entry(swp_type(entry), offset),
> +				      gfp_mask, vma, addr, &page_allocated);

Missing whitespace after the "=".

> +	if (!page)
> +		return NULL;
> +	if (page_allocated) {
> +		swap_readpage(page, false);
> +		if (readahead) {
> +			SetPageReadahead(page);
> +			count_vm_event(SWAP_RA);
> +		}
> +	}
> +	put_page(page);
> +	return page;
> +}

I think swap_vma_readahead can be switched to your new helper
pretty trivially as well, as could many of the users of
read_swap_cache_async.
Hillf Danton Sept. 23, 2020, 8:02 a.m. UTC | #2
On Wed, 23 Sep 2020 07:32:43 +0100 Christoph Hellwig wrote:
> On Mon, Sep 21, 2020 at 10:01:47PM -0400, Rik van Riel wrote:
> > +static struct page *swap_cluster_read_one(swp_entry_t entry,
> > +		unsigned long offset, gfp_t gfp_mask,
> > +		struct vm_area_struct *vma, unsigned long addr, bool readahead)
> > +{
> > +	bool page_allocated;
> > +	struct page *page;
> > +
> > +	page =__read_swap_cache_async(swp_entry(swp_type(entry), offset),
> > +				      gfp_mask, vma, addr, &page_allocated);
> 
> Missing whitespace after the "=".
> 
> > +	if (!page)
> > +		return NULL;
> > +	if (page_allocated) {
> > +		swap_readpage(page, false);
> > +		if (readahead) {
> > +			SetPageReadahead(page);
> > +			count_vm_event(SWAP_RA);
> > +		}
> > +	}
> > +	put_page(page);
> > +	return page;
> > +}

Check if put_page() makes page a hot potato at the call site
on the v2 spin.
> 
> I think swap_vma_readahead can be switched to your new helper
> pretty trivially as well, as could many of the users of
> read_swap_cache_async.
diff mbox series

Patch

diff --git a/mm/swap_state.c b/mm/swap_state.c
index c16eebb81d8b..aacb9ba53f63 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -594,6 +594,28 @@  static unsigned long swapin_nr_pages(unsigned long offset)
 	return pages;
 }
 
+static struct page *swap_cluster_read_one(swp_entry_t entry,
+		unsigned long offset, gfp_t gfp_mask,
+		struct vm_area_struct *vma, unsigned long addr, bool readahead)
+{
+	bool page_allocated;
+	struct page *page;
+
+	page =__read_swap_cache_async(swp_entry(swp_type(entry), offset),
+				      gfp_mask, vma, addr, &page_allocated);
+	if (!page)
+		return NULL;
+	if (page_allocated) {
+		swap_readpage(page, false);
+		if (readahead) {
+			SetPageReadahead(page);
+			count_vm_event(SWAP_RA);
+		}
+	}
+	put_page(page);
+	return page;
+}
+
 /**
  * swap_cluster_readahead - swap in pages in hope we need them soon
  * @entry: swap entry of this memory
@@ -615,14 +637,13 @@  static unsigned long swapin_nr_pages(unsigned long offset)
 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 				struct vm_fault *vmf)
 {
-	struct page *page;
 	unsigned long entry_offset = swp_offset(entry);
 	unsigned long offset = entry_offset;
 	unsigned long start_offset, end_offset;
 	unsigned long mask;
 	struct swap_info_struct *si = swp_swap_info(entry);
 	struct blk_plug plug;
-	bool do_poll = true, page_allocated;
+	bool do_poll = true;
 	struct vm_area_struct *vma = vmf->vma;
 	unsigned long addr = vmf->address;
 
@@ -649,19 +670,8 @@  struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	blk_start_plug(&plug);
 	for (offset = start_offset; offset <= end_offset ; offset++) {
 		/* Ok, do the async read-ahead now */
-		page = __read_swap_cache_async(
-			swp_entry(swp_type(entry), offset),
-			gfp_mask, vma, addr, &page_allocated);
-		if (!page)
-			continue;
-		if (page_allocated) {
-			swap_readpage(page, false);
-			if (offset != entry_offset) {
-				SetPageReadahead(page);
-				count_vm_event(SWAP_RA);
-			}
-		}
-		put_page(page);
+		swap_cluster_read_one(entry, offset, gfp_mask, vma, addr,
+				      offset != entry_offset);
 	}
 	blk_finish_plug(&plug);