diff mbox series

[PATCHv3,1/2] blk-mq: export __blk_mq_complete_request

Message ID 20200528153441.3501777-1-kbusch@kernel.org (mailing list archive)
State New, archived
Headers show
Series [PATCHv3,1/2] blk-mq: export __blk_mq_complete_request | expand

Commit Message

Keith Busch May 28, 2020, 3:34 p.m. UTC
For when drivers have a need to bypass error injection.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
v2->v3: Use _GPL export

 block/blk-mq.c         | 3 ++-
 include/linux/blk-mq.h | 1 +
 2 files changed, 3 insertions(+), 1 deletion(-)

Comments

Jens Axboe May 28, 2020, 3:36 p.m. UTC | #1
On 5/28/20 9:34 AM, Keith Busch wrote:
> For when drivers have a need to bypass error injection.

Acked-by: Jens Axboe <axboe@kernel.dk

Assuming this goes in through the NVMe tree.
Dongli Zhang May 28, 2020, 6:11 p.m. UTC | #2
On 5/28/20 8:34 AM, Keith Busch wrote:
> For when drivers have a need to bypass error injection.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---
> v2->v3: Use _GPL export
> 
>  block/blk-mq.c         | 3 ++-
>  include/linux/blk-mq.h | 1 +
>  2 files changed, 3 insertions(+), 1 deletion(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index cac11945f602..e62559ac7c45 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -556,7 +556,7 @@ static void __blk_mq_complete_request_remote(void *data)
>  	q->mq_ops->complete(rq);
>  }
>  
> -static void __blk_mq_complete_request(struct request *rq)
> +void __blk_mq_complete_request(struct request *rq)
>  {
>  	struct blk_mq_ctx *ctx = rq->mq_ctx;
>  	struct request_queue *q = rq->q;
> @@ -602,6 +602,7 @@ static void __blk_mq_complete_request(struct request *rq)
>  	}
>  	put_cpu();
>  }
> +EXPORT_SYMBOL_GPL(__blk_mq_complete_request);
>  
>  static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
>  	__releases(hctx->srcu)
> diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
> index d7307795439a..cfe7eac3764e 100644
> --- a/include/linux/blk-mq.h
> +++ b/include/linux/blk-mq.h
> @@ -494,6 +494,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
>  void blk_mq_kick_requeue_list(struct request_queue *q);
>  void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
>  bool blk_mq_complete_request(struct request *rq);
> +void __blk_mq_complete_request(struct request *rq);
>  bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
>  			   struct bio *bio, unsigned int nr_segs);
>  bool blk_mq_queue_stopped(struct request_queue *q);


The null block driver requires this as well.

Otherwise, the below would hang forever because timeout handler keeps failing.

# echo 100 > /sys/kernel/debug/fail_io_timeout/probability
# echo 1000 > /sys/kernel/debug/fail_io_timeout/times
# echo 1 > /sys/block/nullb0/io-timeout-fail
# dd if=/dev/zero of=/dev/nullb0 bs=512 count=1 oflag=direct

With below patch, the timeout handler is able to complete the IO.

diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index ce9e33603a4d..ba107b264fab 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1397,7 +1397,7 @@ static bool should_requeue_request(struct request *rq)
 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
 {
        pr_info("rq %p timed out\n", rq);
-       blk_mq_complete_request(rq);
+       __blk_mq_complete_request(rq);
        return BLK_EH_DONE;
 }

I would wait for the new exported API name until I send out the above.

Perhaps there are more drivers that require such kind of change, but in more
specific corner case.

Dongli Zhang
Daniel Wagner May 29, 2020, 7:28 a.m. UTC | #3
On Thu, May 28, 2020 at 08:34:40AM -0700, Keith Busch wrote:
> For when drivers have a need to bypass error injection.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>

Reviewed-by: Daniel Wagner <dwagner@suse.de>
Christoph Hellwig May 29, 2020, 12:27 p.m. UTC | #4
On Thu, May 28, 2020 at 09:36:23AM -0600, Jens Axboe wrote:
> On 5/28/20 9:34 AM, Keith Busch wrote:
> > For when drivers have a need to bypass error injection.
> 
> Acked-by: Jens Axboe <axboe@kernel.dk
> 
> Assuming this goes in through the NVMe tree.

Given that other drivers will need this as well, and the nvme queue for
5.8 is empty at the moment I think you should pick up the next version
once it has better naming and documentation.
Jens Axboe May 29, 2020, 2:18 p.m. UTC | #5
On 5/29/20 6:27 AM, Christoph Hellwig wrote:
> On Thu, May 28, 2020 at 09:36:23AM -0600, Jens Axboe wrote:
>> On 5/28/20 9:34 AM, Keith Busch wrote:
>>> For when drivers have a need to bypass error injection.
>>
>> Acked-by: Jens Axboe <axboe@kernel.dk
>>
>> Assuming this goes in through the NVMe tree.
> 
> Given that other drivers will need this as well, and the nvme queue for
> 5.8 is empty at the moment I think you should pick up the next version
> once it has better naming and documentation.

Sure
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index cac11945f602..e62559ac7c45 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -556,7 +556,7 @@  static void __blk_mq_complete_request_remote(void *data)
 	q->mq_ops->complete(rq);
 }
 
-static void __blk_mq_complete_request(struct request *rq)
+void __blk_mq_complete_request(struct request *rq)
 {
 	struct blk_mq_ctx *ctx = rq->mq_ctx;
 	struct request_queue *q = rq->q;
@@ -602,6 +602,7 @@  static void __blk_mq_complete_request(struct request *rq)
 	}
 	put_cpu();
 }
+EXPORT_SYMBOL_GPL(__blk_mq_complete_request);
 
 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
 	__releases(hctx->srcu)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index d7307795439a..cfe7eac3764e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -494,6 +494,7 @@  void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 bool blk_mq_complete_request(struct request *rq);
+void __blk_mq_complete_request(struct request *rq);
 bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
 			   struct bio *bio, unsigned int nr_segs);
 bool blk_mq_queue_stopped(struct request_queue *q);