diff mbox series

[1/2] iomap: simplify iomap_readpage_actor

Message ID 20210720084320.184877-1-hch@lst.de (mailing list archive)
State Superseded
Headers show
Series [1/2] iomap: simplify iomap_readpage_actor | expand

Commit Message

Christoph Hellwig July 20, 2021, 8:43 a.m. UTC
Now that the outstanding reads are counted in bytes, there is no need
to use the low-level __bio_try_merge_page API, we can switch back to
always using bio_add_page and simply iomap_readpage_actor again.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/iomap/buffered-io.c | 13 +++----------
 1 file changed, 3 insertions(+), 10 deletions(-)

Comments

Matthew Wilcox July 20, 2021, 12:27 p.m. UTC | #1
On Tue, Jul 20, 2021 at 10:43:19AM +0200, Christoph Hellwig wrote:
> Now that the outstanding reads are counted in bytes, there is no need
> to use the low-level __bio_try_merge_page API, we can switch back to
> always using bio_add_page and simply iomap_readpage_actor again.

I don't think this quite works.  You need to check the return value
from bio_add_page(), otherwise you can be in a situation where you try
to add a page to the last bvec and it's not contiguous, so it fails.

I was imagining something more like this:

-       bool same_page = false, is_contig = false;
+       bool is_contig = false;
...
        /* Try to merge into a previous segment if we can */
        sector = iomap_sector(iomap, pos);
-       if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
-               if (__bio_try_merge_page(ctx->bio, page, plen, poff,
-                               &same_page))
-                       goto done;
-               is_contig = true;
-       }
+       if (ctx->bio && bio_end_sector(ctx->bio) == sector)
+               is_contig = bio_add_page(ctx->bio, page, plen, poff) > 0;

-       if (!is_contig || bio_full(ctx->bio, plen)) {
+       if (!is_contig) {
                gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
                gfp_t orig_gfp = gfp;
...
                bio_set_dev(ctx->bio, iomap->bdev);
                ctx->bio->bi_end_io = iomap_read_end_io;
+               bio_add_page(ctx->bio, page, plen, poff);
        }

-       bio_add_page(ctx->bio, page, plen, poff);
 done:
        /*
Christoph Hellwig July 20, 2021, 1:26 p.m. UTC | #2
On Tue, Jul 20, 2021 at 01:27:24PM +0100, Matthew Wilcox wrote:
> On Tue, Jul 20, 2021 at 10:43:19AM +0200, Christoph Hellwig wrote:
> > Now that the outstanding reads are counted in bytes, there is no need
> > to use the low-level __bio_try_merge_page API, we can switch back to
> > always using bio_add_page and simply iomap_readpage_actor again.
> 
> I don't think this quite works.  You need to check the return value
> from bio_add_page(), otherwise you can be in a situation where you try
> to add a page to the last bvec and it's not contiguous, so it fails.

Indeed.  While bio_full covers the number of vectors, if we run out of
bytes in bi_size this won't work.

I think we can do this version, but I haven't tested it yet:

---
From 4198cd5805d45f83c9029743ad5d0ce774a4e0f8 Mon Sep 17 00:00:00 2001
From: Christoph Hellwig <hch@lst.de>
Date: Mon, 12 Jul 2021 11:00:58 +0200
Subject: iomap: simplify iomap_readpage_actor

Now that the outstanding reads are counted in bytes, there is no need
to use the low-level __bio_try_merge_page API, we can switch back to
always using bio_add_page and simply iomap_readpage_actor again.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/iomap/buffered-io.c | 16 ++++------------
 1 file changed, 4 insertions(+), 12 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 87ccb3438becd9..712b6513a0c449 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -241,7 +241,6 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	struct iomap_readpage_ctx *ctx = data;
 	struct page *page = ctx->cur_page;
 	struct iomap_page *iop;
-	bool same_page = false, is_contig = false;
 	loff_t orig_pos = pos;
 	unsigned poff, plen;
 	sector_t sector;
@@ -268,16 +267,10 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	if (iop)
 		atomic_add(plen, &iop->read_bytes_pending);
 
-	/* Try to merge into a previous segment if we can */
 	sector = iomap_sector(iomap, pos);
-	if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
-		if (__bio_try_merge_page(ctx->bio, page, plen, poff,
-				&same_page))
-			goto done;
-		is_contig = true;
-	}
-
-	if (!is_contig || bio_full(ctx->bio, plen)) {
+	if (!ctx->bio ||
+	    bio_end_sector(ctx->bio) != sector ||
+	    bio_add_page(ctx->bio, page, plen, poff) != plen) {
 		gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
 		gfp_t orig_gfp = gfp;
 		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -301,9 +294,8 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		ctx->bio->bi_iter.bi_sector = sector;
 		bio_set_dev(ctx->bio, iomap->bdev);
 		ctx->bio->bi_end_io = iomap_read_end_io;
+		__bio_add_page(ctx->bio, page, plen, poff);
 	}
-
-	bio_add_page(ctx->bio, page, plen, poff);
 done:
 	/*
 	 * Move the caller beyond our range so that it keeps making progress.
Matthew Wilcox July 20, 2021, 1:41 p.m. UTC | #3
On Tue, Jul 20, 2021 at 03:26:44PM +0200, Christoph Hellwig wrote:
> I think we can do this version, but I haven't tested it yet:

I see no problems with this version.

Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 87ccb3438becd9..4eaadbd265fcfa 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -241,7 +241,6 @@  iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	struct iomap_readpage_ctx *ctx = data;
 	struct page *page = ctx->cur_page;
 	struct iomap_page *iop;
-	bool same_page = false, is_contig = false;
 	loff_t orig_pos = pos;
 	unsigned poff, plen;
 	sector_t sector;
@@ -268,16 +267,10 @@  iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 	if (iop)
 		atomic_add(plen, &iop->read_bytes_pending);
 
-	/* Try to merge into a previous segment if we can */
 	sector = iomap_sector(iomap, pos);
-	if (ctx->bio && bio_end_sector(ctx->bio) == sector) {
-		if (__bio_try_merge_page(ctx->bio, page, plen, poff,
-				&same_page))
-			goto done;
-		is_contig = true;
-	}
-
-	if (!is_contig || bio_full(ctx->bio, plen)) {
+	if (!ctx->bio ||
+	    bio_full(ctx->bio, plen) ||
+	    bio_end_sector(ctx->bio) != sector) {
 		gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
 		gfp_t orig_gfp = gfp;
 		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);