diff mbox series

[PATCHv2,1/3] block/bio: remove duplicate append pages code

Message ID 20220518171131.3525293-2-kbusch@fb.com (mailing list archive)
State New, archived
Headers show
Series direct io alignment relax | expand

Commit Message

Keith Busch May 18, 2022, 5:11 p.m. UTC
From: Keith Busch <kbusch@kernel.org>

The setup for getting pages are identical for zone append and normal IO.
Use common code for each.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 block/bio.c | 90 ++++++++++++++++++++++-------------------------------
 1 file changed, 38 insertions(+), 52 deletions(-)

Comments

Chaitanya Kulkarni May 18, 2022, 8:21 p.m. UTC | #1
On 5/18/22 10:11, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> The setup for getting pages are identical for zone append and normal IO.
> Use common code for each.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---
>

Looks good.

Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>

-ck
Bart Van Assche May 19, 2022, 4:28 a.m. UTC | #2
On 5/18/22 19:11, Keith Busch wrote:
> +	for (left = size, i = 0; left > 0; left -= len, i++) {
> +		struct page *page = pages[i];
> +		bool same_page = false;
> +
> +		len = min_t(size_t, PAGE_SIZE - offset, left);
> +		if (bio_add_hw_page(q, bio, page, len, offset,
> +				max_append_sectors, &same_page) != len) {
> +			bio_put_pages(pages + i, left, offset);
> +			ret = -EINVAL;
> +			break;
> +		}
> +		if (same_page)
> +			put_page(page);
> +		offset = 0;
> +	}

Consider renaming 'same_page' into 'merged'. I think that name reflects 
much better the purpose of that variable.

Thanks,

Bart.
Christoph Hellwig May 19, 2022, 7:32 a.m. UTC | #3
On Wed, May 18, 2022 at 10:11:29AM -0700, Keith Busch wrote:
> From: Keith Busch <kbusch@kernel.org>
> 
> The setup for getting pages are identical for zone append and normal IO.
> Use common code for each.

How about using even more common code and avoiding churn at the same
time?  Something like:

diff --git a/block/bio.c b/block/bio.c
index a3893d80dccc9..15da722ed26d1 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1158,6 +1158,37 @@ static void bio_put_pages(struct page **pages, size_t size, size_t off)
 		put_page(pages[i]);
 }
 
+static int bio_iov_add_page(struct bio *bio, struct page *page,
+		unsigned int len, unsigned int offset)
+{
+	bool same_page = false;
+
+	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
+		if (WARN_ON_ONCE(bio_full(bio, len)))
+			return -EINVAL;
+		__bio_add_page(bio, page, len, offset);
+		return 0;
+	}
+
+	if (same_page)
+		put_page(page);
+	return 0;
+}
+
+static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
+		unsigned int len, unsigned int offset)
+{
+	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+	bool same_page = false;
+
+	if (bio_add_hw_page(q, bio, page, len, offset,
+			queue_max_zone_append_sectors(q), &same_page) != len)
+		return -EINVAL;
+	if (same_page)
+		put_page(page);
+	return 0;
+}
+
 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
 
 /**
@@ -1176,7 +1207,6 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
 	struct page **pages = (struct page **)bv;
-	bool same_page = false;
 	ssize_t size, left;
 	unsigned len, i;
 	size_t offset;
@@ -1195,18 +1225,18 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 
 	for (left = size, i = 0; left > 0; left -= len, i++) {
 		struct page *page = pages[i];
+		int ret;
 
 		len = min_t(size_t, PAGE_SIZE - offset, left);
+		if (bio_op(bio) == REQ_OP_ZONE_APPEND)	
+			ret = bio_iov_add_zone_append_page(bio, page, len,
+					offset);
+		else
+			ret = bio_iov_add_page(bio, page, len, offset);
 
-		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
-			if (same_page)
-				put_page(page);
-		} else {
-			if (WARN_ON_ONCE(bio_full(bio, len))) {
-				bio_put_pages(pages + i, left, offset);
-				return -EINVAL;
-			}
-			__bio_add_page(bio, page, len, offset);
+		if (ret) {
+			bio_put_pages(pages + i, left, offset);
+			return ret;
 		}
 		offset = 0;
 	}
@@ -1215,54 +1245,6 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 	return 0;
 }
 
-static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
-{
-	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
-	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
-	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
-	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
-	struct page **pages = (struct page **)bv;
-	ssize_t size, left;
-	unsigned len, i;
-	size_t offset;
-	int ret = 0;
-
-	if (WARN_ON_ONCE(!max_append_sectors))
-		return 0;
-
-	/*
-	 * Move page array up in the allocated memory for the bio vecs as far as
-	 * possible so that we can start filling biovecs from the beginning
-	 * without overwriting the temporary page array.
-	 */
-	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
-	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
-
-	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
-	if (unlikely(size <= 0))
-		return size ? size : -EFAULT;
-
-	for (left = size, i = 0; left > 0; left -= len, i++) {
-		struct page *page = pages[i];
-		bool same_page = false;
-
-		len = min_t(size_t, PAGE_SIZE - offset, left);
-		if (bio_add_hw_page(q, bio, page, len, offset,
-				max_append_sectors, &same_page) != len) {
-			bio_put_pages(pages + i, left, offset);
-			ret = -EINVAL;
-			break;
-		}
-		if (same_page)
-			put_page(page);
-		offset = 0;
-	}
-
-	iov_iter_advance(iter, size - left);
-	return ret;
-}
-
 /**
  * bio_iov_iter_get_pages - add user or kernel pages to a bio
  * @bio: bio to add pages to
@@ -1297,10 +1279,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 	}
 
 	do {
-		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
-			ret = __bio_iov_append_get_pages(bio, iter);
-		else
-			ret = __bio_iov_iter_get_pages(bio, iter);
+		ret = __bio_iov_iter_get_pages(bio, iter);
 	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
 
 	/* don't account direct I/O as memory stall */
Keith Busch May 19, 2022, 2:19 p.m. UTC | #4
On Thu, May 19, 2022 at 09:32:56AM +0200, Christoph Hellwig wrote:
> On Wed, May 18, 2022 at 10:11:29AM -0700, Keith Busch wrote:
> > From: Keith Busch <kbusch@kernel.org>
> > 
> > The setup for getting pages are identical for zone append and normal IO.
> > Use common code for each.
> 
> How about using even more common code and avoiding churn at the same
> time?  Something like:

Yes, I'll fold this in.
 
> diff --git a/block/bio.c b/block/bio.c
> index a3893d80dccc9..15da722ed26d1 100644
> --- a/block/bio.c
> +++ b/block/bio.c
> @@ -1158,6 +1158,37 @@ static void bio_put_pages(struct page **pages, size_t size, size_t off)
>  		put_page(pages[i]);
>  }
>  
> +static int bio_iov_add_page(struct bio *bio, struct page *page,
> +		unsigned int len, unsigned int offset)
> +{
> +	bool same_page = false;
> +
> +	if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
> +		if (WARN_ON_ONCE(bio_full(bio, len)))
> +			return -EINVAL;
> +		__bio_add_page(bio, page, len, offset);
> +		return 0;
> +	}
> +
> +	if (same_page)
> +		put_page(page);
> +	return 0;
> +}
> +
> +static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
> +		unsigned int len, unsigned int offset)
> +{
> +	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
> +	bool same_page = false;
> +
> +	if (bio_add_hw_page(q, bio, page, len, offset,
> +			queue_max_zone_append_sectors(q), &same_page) != len)
> +		return -EINVAL;
> +	if (same_page)
> +		put_page(page);
> +	return 0;
> +}
> +
>  #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
>  
>  /**
> @@ -1176,7 +1207,6 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
>  	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
>  	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
>  	struct page **pages = (struct page **)bv;
> -	bool same_page = false;
>  	ssize_t size, left;
>  	unsigned len, i;
>  	size_t offset;
> @@ -1195,18 +1225,18 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
>  
>  	for (left = size, i = 0; left > 0; left -= len, i++) {
>  		struct page *page = pages[i];
> +		int ret;
>  
>  		len = min_t(size_t, PAGE_SIZE - offset, left);
> +		if (bio_op(bio) == REQ_OP_ZONE_APPEND)	
> +			ret = bio_iov_add_zone_append_page(bio, page, len,
> +					offset);
> +		else
> +			ret = bio_iov_add_page(bio, page, len, offset);
>  
> -		if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
> -			if (same_page)
> -				put_page(page);
> -		} else {
> -			if (WARN_ON_ONCE(bio_full(bio, len))) {
> -				bio_put_pages(pages + i, left, offset);
> -				return -EINVAL;
> -			}
> -			__bio_add_page(bio, page, len, offset);
> +		if (ret) {
> +			bio_put_pages(pages + i, left, offset);
> +			return ret;
>  		}
>  		offset = 0;
>  	}
> @@ -1215,54 +1245,6 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
>  	return 0;
>  }
>  
> -static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
> -{
> -	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
> -	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
> -	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
> -	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
> -	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
> -	struct page **pages = (struct page **)bv;
> -	ssize_t size, left;
> -	unsigned len, i;
> -	size_t offset;
> -	int ret = 0;
> -
> -	if (WARN_ON_ONCE(!max_append_sectors))
> -		return 0;
> -
> -	/*
> -	 * Move page array up in the allocated memory for the bio vecs as far as
> -	 * possible so that we can start filling biovecs from the beginning
> -	 * without overwriting the temporary page array.
> -	 */
> -	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
> -	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
> -
> -	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
> -	if (unlikely(size <= 0))
> -		return size ? size : -EFAULT;
> -
> -	for (left = size, i = 0; left > 0; left -= len, i++) {
> -		struct page *page = pages[i];
> -		bool same_page = false;
> -
> -		len = min_t(size_t, PAGE_SIZE - offset, left);
> -		if (bio_add_hw_page(q, bio, page, len, offset,
> -				max_append_sectors, &same_page) != len) {
> -			bio_put_pages(pages + i, left, offset);
> -			ret = -EINVAL;
> -			break;
> -		}
> -		if (same_page)
> -			put_page(page);
> -		offset = 0;
> -	}
> -
> -	iov_iter_advance(iter, size - left);
> -	return ret;
> -}
> -
>  /**
>   * bio_iov_iter_get_pages - add user or kernel pages to a bio
>   * @bio: bio to add pages to
> @@ -1297,10 +1279,7 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
>  	}
>  
>  	do {
> -		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
> -			ret = __bio_iov_append_get_pages(bio, iter);
> -		else
> -			ret = __bio_iov_iter_get_pages(bio, iter);
> +		ret = __bio_iov_iter_get_pages(bio, iter);
>  	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
>  
>  	/* don't account direct I/O as memory stall */
diff mbox series

Patch

diff --git a/block/bio.c b/block/bio.c
index a3893d80dccc..320514a47527 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1158,6 +1158,39 @@  static void bio_put_pages(struct page **pages, size_t size, size_t off)
 		put_page(pages[i]);
 }
 
+static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter,
+				      struct page **pages, ssize_t size,
+				      size_t offset)
+{
+	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
+	unsigned len, i;
+	ssize_t left;
+	int ret = 0;
+
+	if (WARN_ON_ONCE(!max_append_sectors))
+		return 0;
+
+	for (left = size, i = 0; left > 0; left -= len, i++) {
+		struct page *page = pages[i];
+		bool same_page = false;
+
+		len = min_t(size_t, PAGE_SIZE - offset, left);
+		if (bio_add_hw_page(q, bio, page, len, offset,
+				max_append_sectors, &same_page) != len) {
+			bio_put_pages(pages + i, left, offset);
+			ret = -EINVAL;
+			break;
+		}
+		if (same_page)
+			put_page(page);
+		offset = 0;
+	}
+
+	iov_iter_advance(iter, size - left);
+	return ret;
+}
+
 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
 
 /**
@@ -1193,6 +1226,10 @@  static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 	if (unlikely(size <= 0))
 		return size ? size : -EFAULT;
 
+	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
+		return __bio_iov_append_get_pages(bio, iter, pages, size,
+						  offset);
+
 	for (left = size, i = 0; left > 0; left -= len, i++) {
 		struct page *page = pages[i];
 
@@ -1215,54 +1252,6 @@  static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 	return 0;
 }
 
-static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
-{
-	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
-	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
-	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-	unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
-	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
-	struct page **pages = (struct page **)bv;
-	ssize_t size, left;
-	unsigned len, i;
-	size_t offset;
-	int ret = 0;
-
-	if (WARN_ON_ONCE(!max_append_sectors))
-		return 0;
-
-	/*
-	 * Move page array up in the allocated memory for the bio vecs as far as
-	 * possible so that we can start filling biovecs from the beginning
-	 * without overwriting the temporary page array.
-	 */
-	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
-	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
-
-	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
-	if (unlikely(size <= 0))
-		return size ? size : -EFAULT;
-
-	for (left = size, i = 0; left > 0; left -= len, i++) {
-		struct page *page = pages[i];
-		bool same_page = false;
-
-		len = min_t(size_t, PAGE_SIZE - offset, left);
-		if (bio_add_hw_page(q, bio, page, len, offset,
-				max_append_sectors, &same_page) != len) {
-			bio_put_pages(pages + i, left, offset);
-			ret = -EINVAL;
-			break;
-		}
-		if (same_page)
-			put_page(page);
-		offset = 0;
-	}
-
-	iov_iter_advance(iter, size - left);
-	return ret;
-}
-
 /**
  * bio_iov_iter_get_pages - add user or kernel pages to a bio
  * @bio: bio to add pages to
@@ -1297,10 +1286,7 @@  int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 	}
 
 	do {
-		if (bio_op(bio) == REQ_OP_ZONE_APPEND)
-			ret = __bio_iov_append_get_pages(bio, iter);
-		else
-			ret = __bio_iov_iter_get_pages(bio, iter);
+		ret = __bio_iov_iter_get_pages(bio, iter);
 	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
 
 	/* don't account direct I/O as memory stall */