diff mbox series

[v14,097/138] iomap: Pass the iomap_page into iomap_set_range_uptodate

Message ID 20210715033704.692967-98-willy@infradead.org (mailing list archive)
State New
Headers show
Series Memory folios | expand

Commit Message

Matthew Wilcox July 15, 2021, 3:36 a.m. UTC
All but one caller already has the iomap_page, and we can avoid getting
it again.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 25 +++++++++++++------------
 1 file changed, 13 insertions(+), 12 deletions(-)

Comments

Darrick J. Wong July 15, 2021, 9:21 p.m. UTC | #1
On Thu, Jul 15, 2021 at 04:36:23AM +0100, Matthew Wilcox (Oracle) wrote:
> All but one caller already has the iomap_page, and we can avoid getting
> it again.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Took me a while to distinguish iomap_iop_set_range_uptodate and
iomap_set_range_uptodate, but yes, this looks pretty simple.

Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/iomap/buffered-io.c | 25 +++++++++++++------------
>  1 file changed, 13 insertions(+), 12 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 6b41019a51a3..fbe4ebc074ce 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -134,11 +134,9 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
>  	*lenp = plen;
>  }
>  
> -static void
> -iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
> +static void iomap_iop_set_range_uptodate(struct page *page,
> +		struct iomap_page *iop, unsigned off, unsigned len)
>  {
> -	struct folio *folio = page_folio(page);
> -	struct iomap_page *iop = to_iomap_page(folio);
>  	struct inode *inode = page->mapping->host;
>  	unsigned first = off >> inode->i_blkbits;
>  	unsigned last = (off + len - 1) >> inode->i_blkbits;
> @@ -151,14 +149,14 @@ iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
>  	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
>  }
>  
> -static void
> -iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
> +static void iomap_set_range_uptodate(struct page *page,
> +		struct iomap_page *iop, unsigned off, unsigned len)
>  {
>  	if (PageError(page))
>  		return;
>  
> -	if (page_has_private(page))
> -		iomap_iop_set_range_uptodate(page, off, len);
> +	if (iop)
> +		iomap_iop_set_range_uptodate(page, iop, off, len);
>  	else
>  		SetPageUptodate(page);
>  }
> @@ -174,7 +172,8 @@ iomap_read_page_end_io(struct bio_vec *bvec, int error)
>  		ClearPageUptodate(page);
>  		SetPageError(page);
>  	} else {
> -		iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
> +		iomap_set_range_uptodate(page, iop, bvec->bv_offset,
> +						bvec->bv_len);
>  	}
>  
>  	if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
> @@ -254,7 +253,7 @@ iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
>  
>  	if (iomap_block_needs_zeroing(inode, iomap, pos)) {
>  		zero_user(page, poff, plen);
> -		iomap_set_range_uptodate(page, poff, plen);
> +		iomap_set_range_uptodate(page, iop, poff, plen);
>  		goto done;
>  	}
>  
> @@ -583,7 +582,7 @@ __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
>  			if (status)
>  				return status;
>  		}
> -		iomap_set_range_uptodate(page, poff, plen);
> +		iomap_set_range_uptodate(page, iop, poff, plen);
>  	} while ((block_start += plen) < block_end);
>  
>  	return 0;
> @@ -645,6 +644,8 @@ iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
>  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>  		size_t copied, struct page *page)
>  {
> +	struct folio *folio = page_folio(page);
> +	struct iomap_page *iop = to_iomap_page(folio);
>  	flush_dcache_page(page);
>  
>  	/*
> @@ -660,7 +661,7 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
>  	 */
>  	if (unlikely(copied < len && !PageUptodate(page)))
>  		return 0;
> -	iomap_set_range_uptodate(page, offset_in_page(pos), len);
> +	iomap_set_range_uptodate(page, iop, offset_in_page(pos), len);
>  	__set_page_dirty_nobuffers(page);
>  	return copied;
>  }
> -- 
> 2.30.2
>
Matthew Wilcox July 16, 2021, 3:21 a.m. UTC | #2
On Thu, Jul 15, 2021 at 02:21:05PM -0700, Darrick J. Wong wrote:
> On Thu, Jul 15, 2021 at 04:36:23AM +0100, Matthew Wilcox (Oracle) wrote:
> > All but one caller already has the iomap_page, and we can avoid getting
> > it again.
> > 
> > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> 
> Took me a while to distinguish iomap_iop_set_range_uptodate and
> iomap_set_range_uptodate, but yes, this looks pretty simple.

Not my favourite naming, but it's a preexisting condition ;-)

Honestly I'd like to rename iomap to blkmap or something.
And iomap_page is now hilariously badly named.  But that's kind
of tangential to everything else here.
Darrick J. Wong July 16, 2021, 4:34 p.m. UTC | #3
On Fri, Jul 16, 2021 at 04:21:25AM +0100, Matthew Wilcox wrote:
> On Thu, Jul 15, 2021 at 02:21:05PM -0700, Darrick J. Wong wrote:
> > On Thu, Jul 15, 2021 at 04:36:23AM +0100, Matthew Wilcox (Oracle) wrote:
> > > All but one caller already has the iomap_page, and we can avoid getting
> > > it again.
> > > 
> > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> > 
> > Took me a while to distinguish iomap_iop_set_range_uptodate and
> > iomap_set_range_uptodate, but yes, this looks pretty simple.
> 
> Not my favourite naming, but it's a preexisting condition ;-)
> 
> Honestly I'd like to rename iomap to blkmap or something.
> And iomap_page is now hilariously badly named.  But that's kind
> of tangential to everything else here.

I guess we only use 'blkmap' in a few places in the kernel, and nobody's
going to confuse us with UFS.

Hmm, what kind of new name?

struct iomap_buffer_head *ibh;	/* NO */
struct iomap_folio_state *ifs;
struct iomap_state *is;		/* shorter, but what is 'state'? */
struct iomap_blkmap *ibm;	/* lolz */

I think iomap_blkmap sounds fine, since we're probably going to end up
exporting it (and therefore need a clear namespace) as soon as one of
the filesystems that uses page->private to stash per-page info wants to
use iomap for buffered io.

--D
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 6b41019a51a3..fbe4ebc074ce 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -134,11 +134,9 @@  iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
 	*lenp = plen;
 }
 
-static void
-iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_iop_set_range_uptodate(struct page *page,
+		struct iomap_page *iop, unsigned off, unsigned len)
 {
-	struct folio *folio = page_folio(page);
-	struct iomap_page *iop = to_iomap_page(folio);
 	struct inode *inode = page->mapping->host;
 	unsigned first = off >> inode->i_blkbits;
 	unsigned last = (off + len - 1) >> inode->i_blkbits;
@@ -151,14 +149,14 @@  iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
 	spin_unlock_irqrestore(&iop->uptodate_lock, flags);
 }
 
-static void
-iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_set_range_uptodate(struct page *page,
+		struct iomap_page *iop, unsigned off, unsigned len)
 {
 	if (PageError(page))
 		return;
 
-	if (page_has_private(page))
-		iomap_iop_set_range_uptodate(page, off, len);
+	if (iop)
+		iomap_iop_set_range_uptodate(page, iop, off, len);
 	else
 		SetPageUptodate(page);
 }
@@ -174,7 +172,8 @@  iomap_read_page_end_io(struct bio_vec *bvec, int error)
 		ClearPageUptodate(page);
 		SetPageError(page);
 	} else {
-		iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+		iomap_set_range_uptodate(page, iop, bvec->bv_offset,
+						bvec->bv_len);
 	}
 
 	if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
@@ -254,7 +253,7 @@  iomap_readpage_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 
 	if (iomap_block_needs_zeroing(inode, iomap, pos)) {
 		zero_user(page, poff, plen);
-		iomap_set_range_uptodate(page, poff, plen);
+		iomap_set_range_uptodate(page, iop, poff, plen);
 		goto done;
 	}
 
@@ -583,7 +582,7 @@  __iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, int flags,
 			if (status)
 				return status;
 		}
-		iomap_set_range_uptodate(page, poff, plen);
+		iomap_set_range_uptodate(page, iop, poff, plen);
 	} while ((block_start += plen) < block_end);
 
 	return 0;
@@ -645,6 +644,8 @@  iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 		size_t copied, struct page *page)
 {
+	struct folio *folio = page_folio(page);
+	struct iomap_page *iop = to_iomap_page(folio);
 	flush_dcache_page(page);
 
 	/*
@@ -660,7 +661,7 @@  static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
 	 */
 	if (unlikely(copied < len && !PageUptodate(page)))
 		return 0;
-	iomap_set_range_uptodate(page, offset_in_page(pos), len);
+	iomap_set_range_uptodate(page, iop, offset_in_page(pos), len);
 	__set_page_dirty_nobuffers(page);
 	return copied;
 }