diff mbox series

[PATCHv4,1/2] blk-mq: blk-mq: provide forced completion method

Message ID 20200529145200.3545747-1-kbusch@kernel.org (mailing list archive)
State New, archived
Headers show
Series [PATCHv4,1/2] blk-mq: blk-mq: provide forced completion method | expand

Commit Message

Keith Busch May 29, 2020, 2:51 p.m. UTC
Drivers may need to bypass error injection for error recovery. Rename
__blk_mq_complete_request() to blk_mq_force_complete_rq() and export
that function so drivers may skip potential fake timeouts after they've
reclaimed lost requests.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 block/blk-mq.c         | 15 +++++++++++++--
 include/linux/blk-mq.h |  1 +
 2 files changed, 14 insertions(+), 2 deletions(-)

Comments

Daniel Wagner May 29, 2020, 3:42 p.m. UTC | #1
On Fri, May 29, 2020 at 07:51:59AM -0700, Keith Busch wrote:
> Drivers may need to bypass error injection for error recovery. Rename
> __blk_mq_complete_request() to blk_mq_force_complete_rq() and export
> that function so drivers may skip potential fake timeouts after they've
> reclaimed lost requests.
> 
> Signed-off-by: Keith Busch <kbusch@kernel.org>

Reviewed-by: Daniel Wagner <dwagner@suse.de>
Jens Axboe May 29, 2020, 4:22 p.m. UTC | #2
On 5/29/20 8:51 AM, Keith Busch wrote:
> Drivers may need to bypass error injection for error recovery. Rename
> __blk_mq_complete_request() to blk_mq_force_complete_rq() and export
> that function so drivers may skip potential fake timeouts after they've
> reclaimed lost requests.

Applied 1-2, thanks Keith.
Alan Adamson May 29, 2020, 6:02 p.m. UTC | #3
Passes my tests, thanks.

Reviewed-by: Alan Adamson <alan.adamson@oracle.com>

On 5/29/20 7:51 AM, Keith Busch wrote:
> Drivers may need to bypass error injection for error recovery. Rename
> __blk_mq_complete_request() to blk_mq_force_complete_rq() and export
> that function so drivers may skip potential fake timeouts after they've
> reclaimed lost requests.
>
> Signed-off-by: Keith Busch <kbusch@kernel.org>
> ---
>   block/blk-mq.c         | 15 +++++++++++++--
>   include/linux/blk-mq.h |  1 +
>   2 files changed, 14 insertions(+), 2 deletions(-)
>
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index cac11945f602..560a114a82f8 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -556,7 +556,17 @@ static void __blk_mq_complete_request_remote(void *data)
>   	q->mq_ops->complete(rq);
>   }
>   
> -static void __blk_mq_complete_request(struct request *rq)
> +/**
> + * blk_mq_force_complete_rq() - Force complete the request, bypassing any error
> + * 				injection that could drop the completion.
> + * @rq: Request to be force completed
> + *
> + * Drivers should use blk_mq_complete_request() to complete requests in their
> + * normal IO path. For timeout error recovery, drivers may call this forced
> + * completion routine after they've reclaimed timed out requests to bypass
> + * potentially subsequent fake timeouts.
> + */
> +void blk_mq_force_complete_rq(struct request *rq)
>   {
>   	struct blk_mq_ctx *ctx = rq->mq_ctx;
>   	struct request_queue *q = rq->q;
> @@ -602,6 +612,7 @@ static void __blk_mq_complete_request(struct request *rq)
>   	}
>   	put_cpu();
>   }
> +EXPORT_SYMBOL_GPL(blk_mq_force_complete_rq);
>   
>   static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
>   	__releases(hctx->srcu)
> @@ -635,7 +646,7 @@ bool blk_mq_complete_request(struct request *rq)
>   {
>   	if (unlikely(blk_should_fake_timeout(rq->q)))
>   		return false;
> -	__blk_mq_complete_request(rq);
> +	blk_mq_force_complete_rq(rq);
>   	return true;
>   }
>   EXPORT_SYMBOL(blk_mq_complete_request);
> diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
> index d7307795439a..856bb10993cf 100644
> --- a/include/linux/blk-mq.h
> +++ b/include/linux/blk-mq.h
> @@ -494,6 +494,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
>   void blk_mq_kick_requeue_list(struct request_queue *q);
>   void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
>   bool blk_mq_complete_request(struct request *rq);
> +void blk_mq_force_complete_rq(struct request *rq);
>   bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
>   			   struct bio *bio, unsigned int nr_segs);
>   bool blk_mq_queue_stopped(struct request_queue *q);
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index cac11945f602..560a114a82f8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -556,7 +556,17 @@  static void __blk_mq_complete_request_remote(void *data)
 	q->mq_ops->complete(rq);
 }
 
-static void __blk_mq_complete_request(struct request *rq)
+/**
+ * blk_mq_force_complete_rq() - Force complete the request, bypassing any error
+ * 				injection that could drop the completion.
+ * @rq: Request to be force completed
+ *
+ * Drivers should use blk_mq_complete_request() to complete requests in their
+ * normal IO path. For timeout error recovery, drivers may call this forced
+ * completion routine after they've reclaimed timed out requests to bypass
+ * potentially subsequent fake timeouts.
+ */
+void blk_mq_force_complete_rq(struct request *rq)
 {
 	struct blk_mq_ctx *ctx = rq->mq_ctx;
 	struct request_queue *q = rq->q;
@@ -602,6 +612,7 @@  static void __blk_mq_complete_request(struct request *rq)
 	}
 	put_cpu();
 }
+EXPORT_SYMBOL_GPL(blk_mq_force_complete_rq);
 
 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
 	__releases(hctx->srcu)
@@ -635,7 +646,7 @@  bool blk_mq_complete_request(struct request *rq)
 {
 	if (unlikely(blk_should_fake_timeout(rq->q)))
 		return false;
-	__blk_mq_complete_request(rq);
+	blk_mq_force_complete_rq(rq);
 	return true;
 }
 EXPORT_SYMBOL(blk_mq_complete_request);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index d7307795439a..856bb10993cf 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -494,6 +494,7 @@  void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
 void blk_mq_kick_requeue_list(struct request_queue *q);
 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
 bool blk_mq_complete_request(struct request *rq);
+void blk_mq_force_complete_rq(struct request *rq);
 bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
 			   struct bio *bio, unsigned int nr_segs);
 bool blk_mq_queue_stopped(struct request_queue *q);