diff mbox series

[v4,07/14] blk-mq: make blk_mq_commit_rqs a general function for all commits

Message ID 20230118093726.3939160-7-shikemeng@huaweicloud.com (mailing list archive)
State New, archived
Headers show
Series A few bugfix and cleanup patches for blk-mq | expand

Commit Message

Kemeng Shi Jan. 18, 2023, 9:37 a.m. UTC
1. move blk_mq_commit_rqs forward before functions need commits.
2. add queued check and only commits request if any request was queued
in blk_mq_commit_rqs to keep commit behavior consistent and remove
unnecessary commit.
3. split the queued clearing from blk_mq_plug_commit_rqs as it is
not wanted general.
4. sync current caller of blk_mq_commit_rqs with new general
blk_mq_commit_rqs.
5. document rule for unusual cases which need explicit commit_rqs.

Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 block/blk-mq.c | 37 +++++++++++++++++++++++--------------
 1 file changed, 23 insertions(+), 14 deletions(-)

Comments

Christoph Hellwig Jan. 18, 2023, 5:37 p.m. UTC | #1
On Wed, Jan 18, 2023 at 05:37:19PM +0800, Kemeng Shi wrote:
> +/*
> + * blk_mq_commit_rqs will notify driver using bd->last that there is no
> + * more requests. (See comment in struct blk_mq_ops for commit_rqs for
> + * details)
> + * Attention, we should explicitly call this in unusual cases:
> + *  1) did not queue everything initially scheduled to queue
> + *  2) the last attempt to queue a request failed
> + */
> +static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
> +			      bool from_schedule)

Isn't from_schedule always false here as well now?

Otherwise looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
Kemeng Shi Jan. 19, 2023, 1:40 a.m. UTC | #2
on 1/19/2023 1:37 AM, Christoph Hellwig wrote:
> On Wed, Jan 18, 2023 at 05:37:19PM +0800, Kemeng Shi wrote:
>> +/*
>> + * blk_mq_commit_rqs will notify driver using bd->last that there is no
>> + * more requests. (See comment in struct blk_mq_ops for commit_rqs for
>> + * details)
>> + * Attention, we should explicitly call this in unusual cases:
>> + *  1) did not queue everything initially scheduled to queue
>> + *  2) the last attempt to queue a request failed
>> + */
>> +static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
>> +			      bool from_schedule)
> 
> Isn't from_schedule always false here as well now?
Hi Christoph ,
Yes, it's always false now. As blk_mq_commit_rqs is a general function
for all commits now, I keep the from_schedule for commits where
from_schedule maybe true in future. We can remove from_schedule now
and add it back when from_schedule is indeed needed. Both way is
acceptable for me. Please let me know which way do you prefer and I
will send a new version if needed.
Thanks.
> Otherwise looks good:
> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>
>
Kemeng Shi Jan. 28, 2023, 1:55 a.m. UTC | #3
on 1/19/2023 9:40 AM, Kemeng Shi wrote:
> 
> 
> on 1/19/2023 1:37 AM, Christoph Hellwig wrote:
>> On Wed, Jan 18, 2023 at 05:37:19PM +0800, Kemeng Shi wrote:
>>> +/*
>>> + * blk_mq_commit_rqs will notify driver using bd->last that there is no
>>> + * more requests. (See comment in struct blk_mq_ops for commit_rqs for
>>> + * details)
>>> + * Attention, we should explicitly call this in unusual cases:
>>> + *  1) did not queue everything initially scheduled to queue
>>> + *  2) the last attempt to queue a request failed
>>> + */
>>> +static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
>>> +			      bool from_schedule)
>>
>> Isn't from_schedule always false here as well now?
> Hi Christoph ,
> Yes, it's always false now. As blk_mq_commit_rqs is a general function
> for all commits now, I keep the from_schedule for commits where
> from_schedule maybe true in future. We can remove from_schedule now
> and add it back when from_schedule is indeed needed. Both way is
> acceptable for me. Please let me know which way do you prefer and I
> will send a new version if needed.
> Thanks.
>> Otherwise looks good:
>>
>> Reviewed-by: Christoph Hellwig <hch@lst.de>
>>
> 
Hi Christoph,
Sorry for bother. I'm not sure if you got this message. I plan to collect
reviewed-by from you for this patch if no futher work is informed.
Thanks.
Christoph Hellwig Feb. 6, 2023, 3:53 p.m. UTC | #4
Looks good:

Reviewed-by: Christoph Hellwig <hch@lst.de>
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 5d146ec9f8cb..159d9163c46c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2007,6 +2007,23 @@  static void blk_mq_release_budgets(struct request_queue *q,
 	}
 }
 
+/*
+ * blk_mq_commit_rqs will notify driver using bd->last that there is no
+ * more requests. (See comment in struct blk_mq_ops for commit_rqs for
+ * details)
+ * Attention, we should explicitly call this in unusual cases:
+ *  1) did not queue everything initially scheduled to queue
+ *  2) the last attempt to queue a request failed
+ */
+static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int queued,
+			      bool from_schedule)
+{
+	if (hctx->queue->mq_ops->commit_rqs && queued) {
+		trace_block_unplug(hctx->queue, queued, !from_schedule);
+		hctx->queue->mq_ops->commit_rqs(hctx);
+	}
+}
+
 /*
  * Returns true if we did some work AND can potentially do more.
  */
@@ -2555,16 +2572,6 @@  void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 	spin_unlock(&ctx->lock);
 }
 
-static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
-			      bool from_schedule)
-{
-	if (hctx->queue->mq_ops->commit_rqs) {
-		trace_block_unplug(hctx->queue, *queued, !from_schedule);
-		hctx->queue->mq_ops->commit_rqs(hctx);
-	}
-	*queued = 0;
-}
-
 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
 		unsigned int nr_segs)
 {
@@ -2700,8 +2707,10 @@  static void blk_mq_plug_issue_direct(struct blk_plug *plug)
 		blk_status_t ret;
 
 		if (hctx != rq->mq_hctx) {
-			if (hctx)
-				blk_mq_commit_rqs(hctx, &queued, false);
+			if (hctx) {
+				blk_mq_commit_rqs(hctx, queued, false);
+				queued = 0;
+			}
 			hctx = rq->mq_hctx;
 		}
 
@@ -2713,7 +2722,7 @@  static void blk_mq_plug_issue_direct(struct blk_plug *plug)
 		case BLK_STS_RESOURCE:
 		case BLK_STS_DEV_RESOURCE:
 			blk_mq_request_bypass_insert(rq, false, true);
-			blk_mq_commit_rqs(hctx, &queued, false);
+			blk_mq_commit_rqs(hctx, queued, false);
 			return;
 		default:
 			blk_mq_end_request(rq, ret);
@@ -2727,7 +2736,7 @@  static void blk_mq_plug_issue_direct(struct blk_plug *plug)
 	 * there was more coming, but that turned out to be a lie.
 	 */
 	if (errors)
-		blk_mq_commit_rqs(hctx, &queued, false);
+		blk_mq_commit_rqs(hctx, queued, false);
 }
 
 static void __blk_mq_flush_plug_list(struct request_queue *q,