diff mbox series

[RFC,1/7] block: share more code for bio addition helpers

Message ID e6be9d87a3ca1a2f9c27bce73fbab559c21c765f.1730037261.git.leon@kernel.org (mailing list archive)
State Not Applicable
Headers show
Series Block and NMMe PCI use of new DMA mapping API | expand

Commit Message

Leon Romanovsky Oct. 27, 2024, 2:21 p.m. UTC
From: Christoph Hellwig <hch@lst.de>

__bio_iov_iter_get_pages currently open codes adding pages to the bio,
which duplicates a lot of code from bio_add_page and
bio_add_zone_append_page.  Add bio_add_page_int and
bio_add_zone_append_page_int helpers that pass down the same_page
output argument so that __bio_iov_iter_get_pages can reuse the main
add bio to page helpers.

Note that I'd normally call these helpers __bio_add_page and
__bio_add_zone_append_page, but the former is already taken for an
exported API.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
---
 block/bio.c | 114 +++++++++++++++++++++++-----------------------------
 1 file changed, 51 insertions(+), 63 deletions(-)

Comments

Bart Van Assche Oct. 31, 2024, 8:55 p.m. UTC | #1
On 10/27/24 7:21 AM, Leon Romanovsky wrote:
> +static int bio_add_zone_append_page_int(struct bio *bio, struct page *page,
> +		unsigned int len, unsigned int offset, bool *same_page)
> +{
> +	struct block_device *bdev = bio->bi_bdev;
> +
> +	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
> +		return 0;
> +	if (WARN_ON_ONCE(!bdev_is_zoned(bdev)))
> +		return 0;
> +	return bio_add_hw_page(bdev_get_queue(bdev), bio, page, len, offset,
> +			bdev_max_zone_append_sectors(bdev), same_page);
> +}

Does "_int" stand for "_internal"? If so, please consider changing it
into "_impl". I think that will prevent that anyone confuses this suffix
with the "int" data type.

Thanks,

Bart.
diff mbox series

Patch

diff --git a/block/bio.c b/block/bio.c
index ac4d77c88932..2d3bc8bfb071 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1064,6 +1064,19 @@  int bio_add_pc_page(struct request_queue *q, struct bio *bio,
 }
 EXPORT_SYMBOL(bio_add_pc_page);
 
+static int bio_add_zone_append_page_int(struct bio *bio, struct page *page,
+		unsigned int len, unsigned int offset, bool *same_page)
+{
+	struct block_device *bdev = bio->bi_bdev;
+
+	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
+		return 0;
+	if (WARN_ON_ONCE(!bdev_is_zoned(bdev)))
+		return 0;
+	return bio_add_hw_page(bdev_get_queue(bdev), bio, page, len, offset,
+			bdev_max_zone_append_sectors(bdev), same_page);
+}
+
 /**
  * bio_add_zone_append_page - attempt to add page to zone-append bio
  * @bio: destination bio
@@ -1083,17 +1096,9 @@  EXPORT_SYMBOL(bio_add_pc_page);
 int bio_add_zone_append_page(struct bio *bio, struct page *page,
 			     unsigned int len, unsigned int offset)
 {
-	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 	bool same_page = false;
 
-	if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
-		return 0;
-
-	if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
-		return 0;
-
-	return bio_add_hw_page(q, bio, page, len, offset,
-			       queue_max_zone_append_sectors(q), &same_page);
+	return bio_add_zone_append_page_int(bio, page, len, offset, &same_page);
 }
 EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
 
@@ -1119,20 +1124,9 @@  void __bio_add_page(struct bio *bio, struct page *page,
 }
 EXPORT_SYMBOL_GPL(__bio_add_page);
 
-/**
- *	bio_add_page	-	attempt to add page(s) to bio
- *	@bio: destination bio
- *	@page: start page to add
- *	@len: vec entry length, may cross pages
- *	@offset: vec entry offset relative to @page, may cross pages
- *
- *	Attempt to add page(s) to the bio_vec maplist. This will only fail
- *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
- */
-int bio_add_page(struct bio *bio, struct page *page,
-		 unsigned int len, unsigned int offset)
+static int bio_add_page_int(struct bio *bio, struct page *page,
+		 unsigned int len, unsigned int offset, bool *same_page)
 {
-	bool same_page = false;
 
 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
 		return 0;
@@ -1141,7 +1135,7 @@  int bio_add_page(struct bio *bio, struct page *page,
 
 	if (bio->bi_vcnt > 0 &&
 	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
-				page, len, offset, &same_page)) {
+				page, len, offset, same_page)) {
 		bio->bi_iter.bi_size += len;
 		return len;
 	}
@@ -1151,6 +1145,24 @@  int bio_add_page(struct bio *bio, struct page *page,
 	__bio_add_page(bio, page, len, offset);
 	return len;
 }
+
+/**
+ * bio_add_page	- attempt to add page(s) to bio
+ * @bio: destination bio
+ * @page: start page to add
+ * @len: vec entry length, may cross pages
+ * @offset: vec entry offset relative to @page, may cross pages
+ *
+ * Attempt to add page(s) to the bio_vec maplist.  Will only fail if the
+ * bio is full, or it is incorrectly used on a cloned bio.
+ */
+int bio_add_page(struct bio *bio, struct page *page,
+		 unsigned int len, unsigned int offset)
+{
+	bool same_page = false;
+
+	return bio_add_page_int(bio, page, len, offset, &same_page);
+}
 EXPORT_SYMBOL(bio_add_page);
 
 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
@@ -1224,41 +1236,6 @@  void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
 	bio_set_flag(bio, BIO_CLONED);
 }
 
-static int bio_iov_add_folio(struct bio *bio, struct folio *folio, size_t len,
-			     size_t offset)
-{
-	bool same_page = false;
-
-	if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len))
-		return -EIO;
-
-	if (bio->bi_vcnt > 0 &&
-	    bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
-				folio_page(folio, 0), len, offset,
-				&same_page)) {
-		bio->bi_iter.bi_size += len;
-		if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
-			unpin_user_folio(folio, 1);
-		return 0;
-	}
-	bio_add_folio_nofail(bio, folio, len, offset);
-	return 0;
-}
-
-static int bio_iov_add_zone_append_folio(struct bio *bio, struct folio *folio,
-					 size_t len, size_t offset)
-{
-	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-	bool same_page = false;
-
-	if (bio_add_hw_folio(q, bio, folio, len, offset,
-			queue_max_zone_append_sectors(q), &same_page) != len)
-		return -EINVAL;
-	if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
-		unpin_user_folio(folio, 1);
-	return 0;
-}
-
 static unsigned int get_contig_folio_len(unsigned int *num_pages,
 					 struct page **pages, unsigned int i,
 					 struct folio *folio, size_t left,
@@ -1353,6 +1330,8 @@  static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 	for (left = size, i = 0; left > 0; left -= len, i += num_pages) {
 		struct page *page = pages[i];
 		struct folio *folio = page_folio(page);
+		struct page *first_page = folio_page(folio, 0);
+		bool same_page = false;
 
 		folio_offset = ((size_t)folio_page_idx(folio, page) <<
 			       PAGE_SHIFT) + offset;
@@ -1366,12 +1345,21 @@  static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 						   folio, left, offset);
 
 		if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
-			ret = bio_iov_add_zone_append_folio(bio, folio, len,
-					folio_offset);
-			if (ret)
+			if (bio_add_zone_append_page_int(bio, first_page, len,
+					folio_offset, &same_page) != len) {
+				ret = -EINVAL;
+				break;
+			}
+		} else {
+			if (bio_add_page_int(bio, folio_page(folio, 0), len,
+					folio_offset, &same_page) != len) {
+				ret = -EINVAL;
 				break;
-		} else
-			bio_iov_add_folio(bio, folio, len, folio_offset);
+			}
+		}
+
+		if (same_page && bio_flagged(bio, BIO_PAGE_PINNED))
+			unpin_user_folio(folio, 1);
 
 		offset = 0;
 	}