diff mbox series

[v14,099/138] iomap: Convert bio completions to use folios

Message ID 20210715033704.692967-100-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Memory folios | expand

Commit Message

Matthew Wilcox July 15, 2021, 3:36 a.m. UTC
Use bio_for_each_folio() to iterate over each folio in the bio
instead of iterating over each page.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/iomap/buffered-io.c | 46 +++++++++++++++++-------------------------
 1 file changed, 18 insertions(+), 28 deletions(-)

Comments

Darrick J. Wong July 15, 2021, 9:30 p.m. UTC | #1
On Thu, Jul 15, 2021 at 04:36:25AM +0100, Matthew Wilcox (Oracle) wrote:
> Use bio_for_each_folio() to iterate over each folio in the bio
> instead of iterating over each page.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Neat conversion,
Reviewed-by: Darrick J. Wong <djwong@kernel.org>

--D

> ---
>  fs/iomap/buffered-io.c | 46 +++++++++++++++++-------------------------
>  1 file changed, 18 insertions(+), 28 deletions(-)
> 
> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
> index 707a96e36651..4732298f74e1 100644
> --- a/fs/iomap/buffered-io.c
> +++ b/fs/iomap/buffered-io.c
> @@ -161,36 +161,29 @@ static void iomap_set_range_uptodate(struct folio *folio,
>  		folio_mark_uptodate(folio);
>  }
>  
> -static void
> -iomap_read_page_end_io(struct bio_vec *bvec, int error)
> +static void iomap_finish_folio_read(struct folio *folio, size_t offset,
> +		size_t len, int error)
>  {
> -	struct page *page = bvec->bv_page;
> -	struct folio *folio = page_folio(page);
>  	struct iomap_page *iop = to_iomap_page(folio);
>  
>  	if (unlikely(error)) {
>  		folio_clear_uptodate(folio);
>  		folio_set_error(folio);
>  	} else {
> -		size_t off = (page - &folio->page) * PAGE_SIZE +
> -				bvec->bv_offset;
> -
> -		iomap_set_range_uptodate(folio, iop, off, bvec->bv_len);
> +		iomap_set_range_uptodate(folio, iop, offset, len);
>  	}
>  
> -	if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
> +	if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
>  		folio_unlock(folio);
>  }
>  
> -static void
> -iomap_read_end_io(struct bio *bio)
> +static void iomap_read_end_io(struct bio *bio)
>  {
>  	int error = blk_status_to_errno(bio->bi_status);
> -	struct bio_vec *bvec;
> -	struct bvec_iter_all iter_all;
> +	struct folio_iter fi;
>  
> -	bio_for_each_segment_all(bvec, bio, iter_all)
> -		iomap_read_page_end_io(bvec, error);
> +	bio_for_each_folio_all(fi, bio)
> +		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
>  	bio_put(bio);
>  }
>  
> @@ -1014,23 +1007,21 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
>  }
>  EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
>  
> -static void
> -iomap_finish_page_writeback(struct inode *inode, struct page *page,
> -		int error, unsigned int len)
> +static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
> +		size_t len, int error)
>  {
> -	struct folio *folio = page_folio(page);
>  	struct iomap_page *iop = to_iomap_page(folio);
>  
>  	if (error) {
> -		SetPageError(page);
> +		folio_set_error(folio);
>  		mapping_set_error(inode->i_mapping, -EIO);
>  	}
>  
> -	WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
> +	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
>  	WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
>  
>  	if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
> -		end_page_writeback(page);
> +		folio_end_writeback(folio);
>  }
>  
>  /*
> @@ -1049,8 +1040,7 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
>  	bool quiet = bio_flagged(bio, BIO_QUIET);
>  
>  	for (bio = &ioend->io_inline_bio; bio; bio = next) {
> -		struct bio_vec *bv;
> -		struct bvec_iter_all iter_all;
> +		struct folio_iter fi;
>  
>  		/*
>  		 * For the last bio, bi_private points to the ioend, so we
> @@ -1061,10 +1051,10 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
>  		else
>  			next = bio->bi_private;
>  
> -		/* walk each page on bio, ending page IO on them */
> -		bio_for_each_segment_all(bv, bio, iter_all)
> -			iomap_finish_page_writeback(inode, bv->bv_page, error,
> -					bv->bv_len);
> +		/* walk all folios in bio, ending page IO on them */
> +		bio_for_each_folio_all(fi, bio)
> +			iomap_finish_folio_write(inode, fi.folio, fi.length,
> +					error);
>  		bio_put(bio);
>  	}
>  	/* The ioend has been freed by bio_put() */
> -- 
> 2.30.2
>
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 707a96e36651..4732298f74e1 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -161,36 +161,29 @@  static void iomap_set_range_uptodate(struct folio *folio,
 		folio_mark_uptodate(folio);
 }
 
-static void
-iomap_read_page_end_io(struct bio_vec *bvec, int error)
+static void iomap_finish_folio_read(struct folio *folio, size_t offset,
+		size_t len, int error)
 {
-	struct page *page = bvec->bv_page;
-	struct folio *folio = page_folio(page);
 	struct iomap_page *iop = to_iomap_page(folio);
 
 	if (unlikely(error)) {
 		folio_clear_uptodate(folio);
 		folio_set_error(folio);
 	} else {
-		size_t off = (page - &folio->page) * PAGE_SIZE +
-				bvec->bv_offset;
-
-		iomap_set_range_uptodate(folio, iop, off, bvec->bv_len);
+		iomap_set_range_uptodate(folio, iop, offset, len);
 	}
 
-	if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
+	if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
 		folio_unlock(folio);
 }
 
-static void
-iomap_read_end_io(struct bio *bio)
+static void iomap_read_end_io(struct bio *bio)
 {
 	int error = blk_status_to_errno(bio->bi_status);
-	struct bio_vec *bvec;
-	struct bvec_iter_all iter_all;
+	struct folio_iter fi;
 
-	bio_for_each_segment_all(bvec, bio, iter_all)
-		iomap_read_page_end_io(bvec, error);
+	bio_for_each_folio_all(fi, bio)
+		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
 	bio_put(bio);
 }
 
@@ -1014,23 +1007,21 @@  vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
 }
 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
 
-static void
-iomap_finish_page_writeback(struct inode *inode, struct page *page,
-		int error, unsigned int len)
+static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+		size_t len, int error)
 {
-	struct folio *folio = page_folio(page);
 	struct iomap_page *iop = to_iomap_page(folio);
 
 	if (error) {
-		SetPageError(page);
+		folio_set_error(folio);
 		mapping_set_error(inode->i_mapping, -EIO);
 	}
 
-	WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
+	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
 	WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
 
 	if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
-		end_page_writeback(page);
+		folio_end_writeback(folio);
 }
 
 /*
@@ -1049,8 +1040,7 @@  iomap_finish_ioend(struct iomap_ioend *ioend, int error)
 	bool quiet = bio_flagged(bio, BIO_QUIET);
 
 	for (bio = &ioend->io_inline_bio; bio; bio = next) {
-		struct bio_vec *bv;
-		struct bvec_iter_all iter_all;
+		struct folio_iter fi;
 
 		/*
 		 * For the last bio, bi_private points to the ioend, so we
@@ -1061,10 +1051,10 @@  iomap_finish_ioend(struct iomap_ioend *ioend, int error)
 		else
 			next = bio->bi_private;
 
-		/* walk each page on bio, ending page IO on them */
-		bio_for_each_segment_all(bv, bio, iter_all)
-			iomap_finish_page_writeback(inode, bv->bv_page, error,
-					bv->bv_len);
+		/* walk all folios in bio, ending page IO on them */
+		bio_for_each_folio_all(fi, bio)
+			iomap_finish_folio_write(inode, fi.folio, fi.length,
+					error);
 		bio_put(bio);
 	}
 	/* The ioend has been freed by bio_put() */