diff mbox series

[12/13] blk-mq: use switch/case to improve readability in blk_mq_try_issue_list_directly

Message ID 20221223125223.1687670-13-shikemeng@huaweicloud.com (mailing list archive)
State New, archived
Headers show
Series A few bugfix and cleanup patches for blk-mq | expand

Commit Message

Kemeng Shi Dec. 23, 2022, 12:52 p.m. UTC
Use switch/case handle error as other function do to improve
readability in blk_mq_try_issue_list_directly.

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 block/blk-mq.c | 22 +++++++++++++---------
 1 file changed, 13 insertions(+), 9 deletions(-)

Comments

Christoph Hellwig Dec. 23, 2022, 5:53 a.m. UTC | #1
On Fri, Dec 23, 2022 at 08:52:22PM +0800, Kemeng Shi wrote:
> +			blk_mq_request_bypass_insert(rq, false, list_empty(list));

Please try to avoid the overly long line here.

That beng said blk_mq_request_bypass_insert is simply a horrible
API.  I think we should do something like this:


diff --git a/block/blk-flush.c b/block/blk-flush.c
index 53202eff545efb..b6157ae11df651 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -432,7 +432,8 @@ void blk_insert_flush(struct request *rq)
 	 */
 	if ((policy & REQ_FSEQ_DATA) &&
 	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
-		blk_mq_request_bypass_insert(rq, false, true);
+		blk_mq_request_bypass_insert(rq);
+		blk_mq_run_hw_queue(rq->mq_hctx, false);
 		return;
 	}
 
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 23d1a90fec4271..d49fe4503b09d7 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -437,12 +437,13 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
 		 * Simply queue flush rq to the front of hctx->dispatch so that
 		 * intensive flush workloads can benefit in case of NCQ HW.
 		 */
-		at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
-		blk_mq_request_bypass_insert(rq, at_head, false);
-		goto run;
-	}
-
-	if (e) {
+		spin_lock(&hctx->lock);
+		if ((rq->rq_flags & RQF_FLUSH_SEQ) || at_head)
+			list_add(&rq->queuelist, &hctx->dispatch);
+		else
+			list_add_tail(&rq->queuelist, &hctx->dispatch);
+		spin_unlock(&hctx->lock);
+	} else if (e) {
 		LIST_HEAD(list);
 
 		list_add(&rq->queuelist, &list);
@@ -453,7 +454,6 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
 		spin_unlock(&ctx->lock);
 	}
 
-run:
 	if (run_queue)
 		blk_mq_run_hw_queue(hctx, async);
 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c5cf0dbca1db8d..43bb9b36c90da7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1467,7 +1467,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
 		 * merge.
 		 */
 		if (rq->rq_flags & RQF_DONTPREP)
-			blk_mq_request_bypass_insert(rq, false, false);
+			blk_mq_request_bypass_insert(rq);
 		else
 			blk_mq_sched_insert_request(rq, true, false, false);
 	}
@@ -2504,26 +2504,17 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 /**
  * blk_mq_request_bypass_insert - Insert a request at dispatch list.
  * @rq: Pointer to request to be inserted.
- * @at_head: true if the request should be inserted at the head of the list.
- * @run_queue: If we should run the hardware queue after inserting the request.
  *
  * Should only be used carefully, when the caller knows we want to
  * bypass a potential IO scheduler on the target device.
  */
-void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
-				  bool run_queue)
+void blk_mq_request_bypass_insert(struct request *rq)
 {
 	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
 	spin_lock(&hctx->lock);
-	if (at_head)
-		list_add(&rq->queuelist, &hctx->dispatch);
-	else
-		list_add_tail(&rq->queuelist, &hctx->dispatch);
+	list_add_tail(&rq->queuelist, &hctx->dispatch);
 	spin_unlock(&hctx->lock);
-
-	if (run_queue)
-		blk_mq_run_hw_queue(hctx, false);
 }
 
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
@@ -2670,10 +2661,17 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 	blk_status_t ret =
 		__blk_mq_try_issue_directly(hctx, rq, false, true);
 
-	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
-		blk_mq_request_bypass_insert(rq, false, true);
-	else if (ret != BLK_STS_OK)
+	switch (ret) {
+	case BLK_STS_OK:
+		break;
+	case BLK_STS_RESOURCE:
+	case BLK_STS_DEV_RESOURCE:
+		blk_mq_request_bypass_insert(rq);
+		blk_mq_run_hw_queue(rq->mq_hctx, false);
+		break;
+	default:
 		blk_mq_end_request(rq, ret);
+	}
 }
 
 static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
@@ -2705,7 +2703,8 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
 			break;
 		case BLK_STS_RESOURCE:
 		case BLK_STS_DEV_RESOURCE:
-			blk_mq_request_bypass_insert(rq, false, true);
+			blk_mq_request_bypass_insert(rq);
+			blk_mq_run_hw_queue(rq->mq_hctx, false);
 			blk_mq_commit_rqs(hctx, &queued, from_schedule);
 			return;
 		default:
@@ -2818,8 +2817,9 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
 			errors++;
 			if (ret == BLK_STS_RESOURCE ||
 					ret == BLK_STS_DEV_RESOURCE) {
-				blk_mq_request_bypass_insert(rq, false,
-							list_empty(list));
+				blk_mq_request_bypass_insert(rq);
+				if (list_empty(list))
+					blk_mq_run_hw_queue(rq->mq_hctx, false);
 				break;
 			}
 			blk_mq_end_request(rq, ret);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index ef59fee62780d3..3733429561e1eb 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -61,8 +61,7 @@ void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
  */
 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
 				bool at_head);
-void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
-				  bool run_queue);
+void blk_mq_request_bypass_insert(struct request *rq);
 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 				struct list_head *list);
 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
Kemeng Shi Dec. 23, 2022, 6:57 a.m. UTC | #2
on 12/23/2022 1:53 PM, Christoph Hellwig wrote:
> On Fri, Dec 23, 2022 at 08:52:22PM +0800, Kemeng Shi wrote:
>> +			blk_mq_request_bypass_insert(rq, false, list_empty(list));
> 
> Please try to avoid the overly long line here.
Get it and I will fix this in next version. Thanks!
> That beng said blk_mq_request_bypass_insert is simply a horrible
> API.  I think we should do something like this:
I am not quite follow this. I guess this API is horrible for two possbile reasons:
1. It accepts two bool parameters which may be confusing betwwen them.
2. It adds additional checks for if we need to insert at head and if we need to run
queue which is already checked by caller.

Anyway, it seems another patch is needed for this, but I don't know proper way to
send this patch. Add your patch to this patchset or you want to send a single one
after this patchset.
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index a48f2a913295..2a3db9524974 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2789,16 +2789,20 @@  void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
 
 		list_del_init(&rq->queuelist);
 		ret = blk_mq_request_issue_directly(rq, list_empty(list));
-		if (ret != BLK_STS_OK) {
-			if (ret == BLK_STS_RESOURCE ||
-					ret == BLK_STS_DEV_RESOURCE) {
-				blk_mq_request_bypass_insert(rq, false,
-							list_empty(list));
-				break;
-			}
-			blk_mq_end_request(rq, ret);
-		} else
+		switch (ret) {
+		case BLK_STS_OK:
 			queued++;
+			break;
+		case BLK_STS_RESOURCE:
+		case BLK_STS_DEV_RESOURCE:
+			blk_mq_request_bypass_insert(rq, false, list_empty(list));
+			if (hctx->queue->mq_ops->commit_rqs && queued)
+				hctx->queue->mq_ops->commit_rqs(hctx);
+			return;
+		default:
+			blk_mq_end_request(rq, ret);
+			break;
+		}
 	}
 
 	/*