diff mbox

[4/5] skd: Avoid double completions in case of a timeout

Message ID 20170823175633.12680-5-bart.vanassche@wdc.com (mailing list archive)
State New, archived
Headers show

Commit Message

Bart Van Assche Aug. 23, 2017, 5:56 p.m. UTC
Avoid that normal request completion and the timeout handler can
run concurrently by calling blk_mq_complete_request() instead of
blk_mq_end_request() from skd_end_request(). Avoid that the block
layer can reuse a request while the firmware is still processing
it. Convert skd_softirq_done() to blk-mq. Pass the pointer to
skd_softirq_done() to the block layer core through
blk_mq_ops.complete instead of by calling blk_queue_softirq_done().
Pass the pointer to skd_timed_out() to the block layer core
through blk_mq_ops.timeout instead of by calling
blk_queue_timed_out(). The timeout handler has been tested as
follows:

    echo 1 > /sys/block/skd0/io-timeout-fail &&
    (cd /sys/kernel/debug/fail_io_timeout &&
      echo 100 > probability &&
      echo N > task-filter &&
      echo 1 > times)

Fixes: commit a74d5b76fab9 ("skd: Switch to block layer timeout mechanism")
Reported-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
---
 drivers/block/skd_main.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

Comments

Christoph Hellwig Aug. 23, 2017, 6:08 p.m. UTC | #1
Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>

Although as a follow on I would also move the debug printks
from skd_end_request to skd_softirq_done, then remove skd_end_request
and rename skd_softirq_done to skd_complete_rq to fit what other
drivers are doing a little more closely.
Bart Van Assche Aug. 24, 2017, 3:12 p.m. UTC | #2
On Wed, 2017-08-23 at 20:08 +0200, Christoph Hellwig wrote:
> Although as a follow on I would also move the debug printks

> from skd_end_request to skd_softirq_done, then remove skd_end_request

> and rename skd_softirq_done to skd_complete_rq to fit what other

> drivers are doing a little more closely.


Hello Christoph,

Thanks for the review of this patch series. I will include the above changes
when I post my next patch series for the skd driver.

Bart.
diff mbox

Patch

diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 0d6340884009..ff288f1a5dec 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -184,6 +184,7 @@  struct skd_request_context {
 
 	struct fit_comp_error_info err_info;
 
+	blk_status_t status;
 };
 
 struct skd_special_context {
@@ -596,19 +597,22 @@  static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 	return BLK_STS_OK;
 }
 
-static enum blk_eh_timer_return skd_timed_out(struct request *req)
+static enum blk_eh_timer_return skd_timed_out(struct request *req,
+					      bool reserved)
 {
 	struct skd_device *skdev = req->q->queuedata;
 
 	dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
 		blk_mq_unique_tag(req));
 
-	return BLK_EH_HANDLED;
+	return BLK_EH_RESET_TIMER;
 }
 
 static void skd_end_request(struct skd_device *skdev, struct request *req,
 			    blk_status_t error)
 {
+	struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
+
 	if (unlikely(error)) {
 		char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
 		u32 lba = (u32)blk_rq_pos(req);
@@ -621,19 +625,15 @@  static void skd_end_request(struct skd_device *skdev, struct request *req,
 		dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", req->tag,
 			error);
 
-	blk_mq_end_request(req, error);
+	skreq->status = error;
+	blk_mq_complete_request(req);
 }
 
-/* Only called in case of a request timeout */
 static void skd_softirq_done(struct request *req)
 {
-	struct skd_device *skdev = req->q->queuedata;
 	struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
-	unsigned long flags;
 
-	spin_lock_irqsave(&skdev->lock, flags);
-	skd_end_request(skdev, blk_mq_rq_from_pdu(skreq), BLK_STS_TIMEOUT);
-	spin_unlock_irqrestore(&skdev->lock, flags);
+	blk_mq_end_request(req, skreq->status);
 }
 
 static bool skd_preop_sg_list(struct skd_device *skdev,
@@ -2821,6 +2821,8 @@  static int skd_cons_sksb(struct skd_device *skdev)
 
 static const struct blk_mq_ops skd_mq_ops = {
 	.queue_rq	= skd_mq_queue_rq,
+	.complete	= skd_softirq_done,
+	.timeout	= skd_timed_out,
 	.init_request	= skd_init_request,
 	.exit_request	= skd_exit_request,
 };
@@ -2884,8 +2886,6 @@  static int skd_cons_disk(struct skd_device *skdev)
 	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
 
 	blk_queue_rq_timeout(q, 8 * HZ);
-	blk_queue_rq_timed_out(q, skd_timed_out);
-	blk_queue_softirq_done(q, skd_softirq_done);
 
 	spin_lock_irqsave(&skdev->lock, flags);
 	dev_dbg(&skdev->pdev->dev, "stopping queue\n");