diff mbox series

[3/4] block: properly handle REQ_OP_ZONE_APPEND in __bio_split_to_limits

Message ID 20240826173820.1690925-4-hch@lst.de (mailing list archive)
State New
Headers show
Series [1/4] block: rework bio splitting | expand

Commit Message

Christoph Hellwig Aug. 26, 2024, 5:37 p.m. UTC
Currently REQ_OP_ZONE_APPEND is handled by the bio_split_rw case in
__bio_split_to_limits.  This is harmful because REQ_OP_ZONE_APPEND
bios do not adhere to the soft max_limits value but instead use their
own capped version of max_hw_sectors, leading to incorrect splits that
later blow up in bio_split.

We still need the bio_split_rw logic to count nr_segs for blk-mq code,
so add a new wrapper that passes in the right limit, and turns any bio
that would need a split into an error as an additional debugging aid.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-merge.c | 20 ++++++++++++++++++++
 block/blk.h       |  4 ++++
 2 files changed, 24 insertions(+)

Comments

Damien Le Moal Aug. 26, 2024, 10:32 p.m. UTC | #1
On 8/27/24 02:37, Christoph Hellwig wrote:
> Currently REQ_OP_ZONE_APPEND is handled by the bio_split_rw case in
> __bio_split_to_limits.  This is harmful because REQ_OP_ZONE_APPEND
> bios do not adhere to the soft max_limits value but instead use their
> own capped version of max_hw_sectors, leading to incorrect splits that
> later blow up in bio_split.
> 
> We still need the bio_split_rw logic to count nr_segs for blk-mq code,
> so add a new wrapper that passes in the right limit, and turns any bio
> that would need a split into an error as an additional debugging aid.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  block/blk-merge.c | 20 ++++++++++++++++++++
>  block/blk.h       |  4 ++++
>  2 files changed, 24 insertions(+)
> 
> diff --git a/block/blk-merge.c b/block/blk-merge.c
> index c7222c4685e060..56769c4bcd799b 100644
> --- a/block/blk-merge.c
> +++ b/block/blk-merge.c
> @@ -378,6 +378,26 @@ struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
>  			get_max_io_size(bio, lim) << SECTOR_SHIFT));
>  }
>  
> +/*
> + * REQ_OP_ZONE_APPEND bios must never be split by the block layer.
> + *
> + * But we want the nr_segs calculation provided by bio_split_rw_at, and having
> + * a good sanity check that the submitter built the bio correctly is nice to
> + * have as well.
> + */
> +struct bio *bio_split_zone_append(struct bio *bio,
> +		const struct queue_limits *lim, unsigned *nr_segs)
> +{
> +	unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim);
> +	int split_sectors;
> +
> +	split_sectors = bio_split_rw_at(bio, lim, nr_segs,
> +			max_sectors << SECTOR_SHIFT);
> +	if (WARN_ON_ONCE(split_sectors > 0))
> +		split_sectors = -EINVAL;

ah! OK, I see why you used an int for the sector count now.
Hmmm... This is subtle and I am not a huge fan. But I see how that saves some
cleanup code in patch 1. So OK.

Feel free to add:

Reviewed-by: Damien Le Moal <dlemoal@kernel.org>

> +	return bio_submit_split(bio, split_sectors);
> +}
> +
>  /**
>   * bio_split_to_limits - split a bio to fit the queue limits
>   * @bio:     bio to be split
> diff --git a/block/blk.h b/block/blk.h
> index 0d8cd64c126064..61c2afa67daabb 100644
> --- a/block/blk.h
> +++ b/block/blk.h
> @@ -337,6 +337,8 @@ struct bio *bio_split_write_zeroes(struct bio *bio,
>  		const struct queue_limits *lim, unsigned *nsegs);
>  struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
>  		unsigned *nr_segs);
> +struct bio *bio_split_zone_append(struct bio *bio,
> +		const struct queue_limits *lim, unsigned *nr_segs);
>  
>  /*
>   * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
> @@ -375,6 +377,8 @@ static inline struct bio *__bio_split_to_limits(struct bio *bio,
>  			return bio_split_rw(bio, lim, nr_segs);
>  		*nr_segs = 1;
>  		return bio;
> +	case REQ_OP_ZONE_APPEND:
> +		return bio_split_zone_append(bio, lim, nr_segs);
>  	case REQ_OP_DISCARD:
>  	case REQ_OP_SECURE_ERASE:
>  		return bio_split_discard(bio, lim, nr_segs);
diff mbox series

Patch

diff --git a/block/blk-merge.c b/block/blk-merge.c
index c7222c4685e060..56769c4bcd799b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -378,6 +378,26 @@  struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
 			get_max_io_size(bio, lim) << SECTOR_SHIFT));
 }
 
+/*
+ * REQ_OP_ZONE_APPEND bios must never be split by the block layer.
+ *
+ * But we want the nr_segs calculation provided by bio_split_rw_at, and having
+ * a good sanity check that the submitter built the bio correctly is nice to
+ * have as well.
+ */
+struct bio *bio_split_zone_append(struct bio *bio,
+		const struct queue_limits *lim, unsigned *nr_segs)
+{
+	unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim);
+	int split_sectors;
+
+	split_sectors = bio_split_rw_at(bio, lim, nr_segs,
+			max_sectors << SECTOR_SHIFT);
+	if (WARN_ON_ONCE(split_sectors > 0))
+		split_sectors = -EINVAL;
+	return bio_submit_split(bio, split_sectors);
+}
+
 /**
  * bio_split_to_limits - split a bio to fit the queue limits
  * @bio:     bio to be split
diff --git a/block/blk.h b/block/blk.h
index 0d8cd64c126064..61c2afa67daabb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -337,6 +337,8 @@  struct bio *bio_split_write_zeroes(struct bio *bio,
 		const struct queue_limits *lim, unsigned *nsegs);
 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
 		unsigned *nr_segs);
+struct bio *bio_split_zone_append(struct bio *bio,
+		const struct queue_limits *lim, unsigned *nr_segs);
 
 /*
  * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
@@ -375,6 +377,8 @@  static inline struct bio *__bio_split_to_limits(struct bio *bio,
 			return bio_split_rw(bio, lim, nr_segs);
 		*nr_segs = 1;
 		return bio;
+	case REQ_OP_ZONE_APPEND:
+		return bio_split_zone_append(bio, lim, nr_segs);
 	case REQ_OP_DISCARD:
 	case REQ_OP_SECURE_ERASE:
 		return bio_split_discard(bio, lim, nr_segs);