diff mbox series

[6/9] blk-mq: don't set data->ctx and data->hctx in __blk_mq_alloc_request

Message ID 20200518063937.757218-7-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [1/9] blk-mq: split out a __blk_mq_get_driver_tag helper | expand

Commit Message

Christoph Hellwig May 18, 2020, 6:39 a.m. UTC
Now that blk_mq_alloc_request_hctx doesn't set ->ctx and ->hctx itself,
all setting of it can be done in the low-level blk_mq_get_tag helper.

Based on patch from Ming Lei <ming.lei@redhat.com>.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-mq-tag.c |  8 +++++++-
 block/blk-mq.c     | 15 +--------------
 block/blk-mq.h     |  4 ++--
 3 files changed, 10 insertions(+), 17 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index e5b17300ec882..b526f1f5a3bf3 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -101,13 +101,19 @@  static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
 
 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
 {
-	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
+	struct blk_mq_tags *tags;
 	struct sbitmap_queue *bt;
 	struct sbq_wait_state *ws;
 	DEFINE_SBQ_WAIT(wait);
 	unsigned int tag_offset;
 	int tag;
 
+	data->ctx = blk_mq_get_ctx(data->q);
+	data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, data->ctx);
+	tags = blk_mq_tags_from_data(data);
+	if (!(data->flags & BLK_MQ_REQ_INTERNAL))
+		blk_mq_tag_busy(data->hctx);
+
 	if (data->flags & BLK_MQ_REQ_RESERVED) {
 		if (unlikely(!tags->nr_reserved_tags)) {
 			WARN_ON_ONCE(1);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 540b5845cd1d3..74c2d8f61426c 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -276,7 +276,6 @@  static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
 	struct elevator_queue *e = q->elevator;
 	struct request *rq;
 	unsigned int tag;
-	bool clear_ctx_on_error = false;
 	req_flags_t rq_flags = 0;
 	u64 alloc_time_ns = 0;
 
@@ -284,13 +283,6 @@  static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
 	if (blk_queue_rq_alloc_time(q))
 		alloc_time_ns = ktime_get_ns();
 
-	if (likely(!data->ctx)) {
-		data->ctx = blk_mq_get_ctx(q);
-		clear_ctx_on_error = true;
-	}
-	if (likely(!data->hctx))
-		data->hctx = blk_mq_map_queue(q, data->cmd_flags,
-						data->ctx);
 	if (data->cmd_flags & REQ_NOWAIT)
 		data->flags |= BLK_MQ_REQ_NOWAIT;
 
@@ -306,16 +298,11 @@  static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
 		    e->type->ops.limit_depth &&
 		    !(data->flags & BLK_MQ_REQ_RESERVED))
 			e->type->ops.limit_depth(data->cmd_flags, data);
-	} else {
-		blk_mq_tag_busy(data->hctx);
 	}
 
 	tag = blk_mq_get_tag(data);
-	if (tag == BLK_MQ_TAG_FAIL) {
-		if (clear_ctx_on_error)
-			data->ctx = NULL;
+	if (tag == BLK_MQ_TAG_FAIL)
 		return NULL;
-	}
 
 	if (data->flags & BLK_MQ_REQ_INTERNAL) {
 		rq = data->hctx->sched_tags->static_rqs[tag];
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 82921b30b6afa..1338be9d51777 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -146,13 +146,13 @@  static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
 }
 
 struct blk_mq_alloc_data {
-	/* input parameter */
+	/* input parameters */
 	struct request_queue *q;
 	blk_mq_req_flags_t flags;
 	unsigned int shallow_depth;
 	unsigned int cmd_flags;
 
-	/* input & output parameter */
+	/* output parameters */
 	struct blk_mq_ctx *ctx;
 	struct blk_mq_hw_ctx *hctx;