diff mbox series

[v2,07/12] block: Make it easier to debug zoned write reordering

Message ID 20230407235822.1672286-8-bvanassche@acm.org (mailing list archive)
State New, archived
Headers show
Series Submit zoned writes in order | expand

Commit Message

Bart Van Assche April 7, 2023, 11:58 p.m. UTC
Issue a kernel warning if reordering could happen.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Damien Le Moal <damien.lemoal@opensource.wdc.com>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Mike Snitzer <snitzer@kernel.org>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/blk-mq.c | 4 ++++
 1 file changed, 4 insertions(+)

Comments

Damien Le Moal April 10, 2023, 8:06 a.m. UTC | #1
On 4/8/23 08:58, Bart Van Assche wrote:
> Issue a kernel warning if reordering could happen.
> 
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Damien Le Moal <damien.lemoal@opensource.wdc.com>
> Cc: Ming Lei <ming.lei@redhat.com>
> Cc: Mike Snitzer <snitzer@kernel.org>
> Signed-off-by: Bart Van Assche <bvanassche@acm.org>
> ---
>  block/blk-mq.c | 4 ++++
>  1 file changed, 4 insertions(+)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 562868dff43f..d89a0e6cf37d 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -2478,6 +2478,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
>  {
>  	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
>  
> +	WARN_ON_ONCE(rq->q->elevator && blk_rq_is_seq_zoned_write(rq));
> +
>  	spin_lock(&hctx->lock);
>  	if (at_head)
>  		list_add(&rq->queuelist, &hctx->dispatch);
> @@ -2570,6 +2572,8 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
>  	bool run_queue = true;
>  	int budget_token;
>  
> +	WARN_ON_ONCE(q->elevator && blk_rq_is_seq_zoned_write(rq));
> +
>  	/*
>  	 * RCU or SRCU read lock is needed before checking quiesced flag.
>  	 *

Looks OK, but I think it would be preferable to optimize
blk_rq_is_seq_zoned_write() to compile to be always false for kernels where
CONFIG_BLK_DEV_ZONED is not set. E.g.:

static inline bool blk_rq_is_seq_zoned_write(struct request *rq)
{
	if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) {
		switch (req_op(rq)) {
		case REQ_OP_WRITE:
		case REQ_OP_WRITE_ZEROES:
			return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq));
		case REQ_OP_ZONE_APPEND:
		default:
			return false;
		}
	}

	return false;
}
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 562868dff43f..d89a0e6cf37d 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2478,6 +2478,8 @@  void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
 {
 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
+	WARN_ON_ONCE(rq->q->elevator && blk_rq_is_seq_zoned_write(rq));
+
 	spin_lock(&hctx->lock);
 	if (at_head)
 		list_add(&rq->queuelist, &hctx->dispatch);
@@ -2570,6 +2572,8 @@  static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 	bool run_queue = true;
 	int budget_token;
 
+	WARN_ON_ONCE(q->elevator && blk_rq_is_seq_zoned_write(rq));
+
 	/*
 	 * RCU or SRCU read lock is needed before checking quiesced flag.
 	 *