diff mbox series

[v15,08/17] iomap: Pass the iomap_page into iomap_set_range_uptodate

Message ID 20210719184001.1750630-9-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Folio support in block + iomap layers | expand

Commit Message

Matthew Wilcox (Oracle) July 19, 2021, 6:39 p.m. UTC
All but one caller already has the iomap_page, and we can avoid getting
it again.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
---
 fs/iomap/buffered-io.c | 25 +++++++++++++------------
 1 file changed, 13 insertions(+), 12 deletions(-)

Comments

Christoph Hellwig July 20, 2021, 6:57 a.m. UTC | #1
On Mon, Jul 19, 2021 at 07:39:52PM +0100, Matthew Wilcox (Oracle) wrote:
> All but one caller already has the iomap_page, and we can avoid getting
> it again.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Reviewed-by: Darrick J. Wong <djwong@kernel.org>

In general reloading it from a single pointer should be faster than
passing an extra argument.  What is the rationale here?
Matthew Wilcox (Oracle) July 20, 2021, 11:36 a.m. UTC | #2
On Tue, Jul 20, 2021 at 08:57:06AM +0200, Christoph Hellwig wrote:
> On Mon, Jul 19, 2021 at 07:39:52PM +0100, Matthew Wilcox (Oracle) wrote:
> > All but one caller already has the iomap_page, and we can avoid getting
> > it again.
> > 
> > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> > Reviewed-by: Darrick J. Wong <djwong@kernel.org>
> 
> In general reloading it from a single pointer should be faster than
> passing an extra argument.  What is the rationale here?

I'm not sure we can make that determination in general; it's going to
depend on the number of registers the CPU has, the calling conventions,
how much inlining the compiler chooses to do, etc.

The compiler is actually _forced_ to reload the iop from the page
by the test_bit() in PagePrivate() / folio_test_private().  I want
to kill off that bit, but until I have time to work on that, we
don't have a good workaround.
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 0d7b6ef4c5cc..62d8ebcda429 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -134,11 +134,9 @@  iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
 	*lenp = plen;
 }
 
-static void
-iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_iop_set_range_uptodate(struct page *page,
+		struct iomap_page *iop, unsigned off, unsigned len)
 {
-	struct folio *folio = page_folio(page);
-	struct iomap_page *iop = to_iomap_page(folio);
 	struct inode *inode = page->mapping->host;
 	unsigned first = off >> inode->i_blkbits;
 	unsigned last = (off + len - 1) >> inode->i_blkbits;
@@ -151,14 +149,14 @@  iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
 	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
 }
 
-static void
-iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_set_range_uptodate(struct page *page,
+		struct iomap_page *iop, unsigned off, unsigned len)
 {
 	if (PageError(page))
 		return;
 
-	if (page_has_private(page))
-		iomap_iop_set_range_uptodate(page, off, len);
+	if (iop)
+		iomap_iop_set_range_uptodate(page, iop, off, len);
 	else
 		SetPageUptodate(page);
 }
@@ -174,7 +172,8 @@  iomap_read_page_end_io(struct bio_vec *bvec, int error)
 		ClearPageUptodate(page);
 		SetPageError(page);
 	} else {
-		iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+		iomap_set_range_uptodate(page, iop, bvec->bv_offset,
+						bvec->bv_len);
 	}
 
 	if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
@@ -256,7 +255,7 @@  iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 
 	if (iomap_block_needs_zeroing(inode, iomap, pos)) {
 		zero_user(page, poff, plen);
-		iomap_set_range_uptodate(page, poff, plen);
+		iomap_set_range_uptodate(page, iop, poff, plen);
 		goto done;
 	}
 
@@ -585,7 +584,7 @@  __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
 			if (status)
 				return status;
 		}
-		iomap_set_range_uptodate(page, poff, plen);
+		iomap_set_range_uptodate(page, iop, poff, plen);
 	} while ((block_start += plen) < block_end);
 
 	return 0;
@@ -647,6 +646,8 @@  iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 		size_t copied, struct page *page)
 {
+	struct folio *folio = page_folio(page);
+	struct iomap_page *iop = to_iomap_page(folio);
 	flush_dcache_page(page);
 
 	/*
@@ -662,7 +663,7 @@  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 	 */
 	if (unlikely(copied < len && !PageUptodate(page)))
 		return 0;
-	iomap_set_range_uptodate(page, offset_in_page(pos), len);
+	iomap_set_range_uptodate(page, iop, offset_in_page(pos), len);
 	__set_page_dirty_nobuffers(page);
 	return copied;
 }