@@ -270,17 +270,59 @@ static inline bool blk_mq_need_time_stamp(struct request *rq)
return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator;
}
-static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
- unsigned int tag, unsigned int op, u64 alloc_time_ns)
+static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
{
- struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
- struct request *rq = tags->static_rqs[tag];
+ struct request_queue *q = data->q;
+ struct elevator_queue *e = q->elevator;
+ struct request *rq;
+ unsigned int tag;
+ bool clear_ctx_on_error = false;
req_flags_t rq_flags = 0;
+ u64 alloc_time_ns = 0;
+
+ /* alloc_time includes depth and tag waits */
+ if (blk_queue_rq_alloc_time(q))
+ alloc_time_ns = ktime_get_ns();
+
+ if (likely(!data->ctx)) {
+ data->ctx = blk_mq_get_ctx(q);
+ clear_ctx_on_error = true;
+ }
+ if (likely(!data->hctx))
+ data->hctx = blk_mq_map_queue(q, data->cmd_flags,
+ data->ctx);
+ if (data->cmd_flags & REQ_NOWAIT)
+ data->flags |= BLK_MQ_REQ_NOWAIT;
+
+ if (e) {
+ data->flags |= BLK_MQ_REQ_INTERNAL;
+
+ /*
+ * Flush requests are special and go directly to the
+ * dispatch list. Don't include reserved tags in the
+ * limiting, as it isn't useful.
+ */
+ if (!op_is_flush(data->cmd_flags) &&
+ e->type->ops.limit_depth &&
+ !(data->flags & BLK_MQ_REQ_RESERVED))
+ e->type->ops.limit_depth(data->cmd_flags, data);
+ } else {
+ blk_mq_tag_busy(data->hctx);
+ }
+
+ tag = blk_mq_get_tag(data);
+ if (tag == BLK_MQ_TAG_FAIL) {
+ if (clear_ctx_on_error)
+ data->ctx = NULL;
+ return NULL;
+ }
if (data->flags & BLK_MQ_REQ_INTERNAL) {
+ rq = data->hctx->sched_tags->static_rqs[tag];
rq->tag = -1;
rq->internal_tag = tag;
} else {
+ rq = data->hctx->tags->static_rqs[tag];
if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
rq_flags = RQF_MQ_INFLIGHT;
atomic_inc(&data->hctx->nr_active);
@@ -295,7 +337,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->mq_ctx = data->ctx;
rq->mq_hctx = data->hctx;
rq->rq_flags = rq_flags;
- rq->cmd_flags = op;
+ rq->cmd_flags = data->cmd_flags;
if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT;
if (blk_queue_io_stat(data->q))
@@ -327,58 +369,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->end_io = NULL;
rq->end_io_data = NULL;
- data->ctx->rq_dispatched[op_is_sync(op)]++;
+ data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++;
refcount_set(&rq->ref, 1);
- return rq;
-}
-
-static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
-{
- struct request_queue *q = data->q;
- struct elevator_queue *e = q->elevator;
- struct request *rq;
- unsigned int tag;
- bool clear_ctx_on_error = false;
- u64 alloc_time_ns = 0;
-
- /* alloc_time includes depth and tag waits */
- if (blk_queue_rq_alloc_time(q))
- alloc_time_ns = ktime_get_ns();
-
- if (likely(!data->ctx)) {
- data->ctx = blk_mq_get_ctx(q);
- clear_ctx_on_error = true;
- }
- if (likely(!data->hctx))
- data->hctx = blk_mq_map_queue(q, data->cmd_flags,
- data->ctx);
- if (data->cmd_flags & REQ_NOWAIT)
- data->flags |= BLK_MQ_REQ_NOWAIT;
-
- if (e) {
- data->flags |= BLK_MQ_REQ_INTERNAL;
-
- /*
- * Flush requests are special and go directly to the
- * dispatch list. Don't include reserved tags in the
- * limiting, as it isn't useful.
- */
- if (!op_is_flush(data->cmd_flags) &&
- e->type->ops.limit_depth &&
- !(data->flags & BLK_MQ_REQ_RESERVED))
- e->type->ops.limit_depth(data->cmd_flags, data);
- } else {
- blk_mq_tag_busy(data->hctx);
- }
-
- tag = blk_mq_get_tag(data);
- if (tag == BLK_MQ_TAG_FAIL) {
- if (clear_ctx_on_error)
- data->ctx = NULL;
- return NULL;
- }
- rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags, alloc_time_ns);
if (!op_is_flush(data->cmd_flags)) {
rq->elv.icq = NULL;
if (e && e->type->ops.prepare_request) {
@@ -126,7 +126,7 @@ enum mq_rq_state {
* Try to put the fields that are referenced together in the same cacheline.
*
* If you modify this structure, make sure to update blk_rq_init() and
- * especially blk_mq_rq_ctx_init() to take care of the added fields.
+ * especially __blk_mq_alloc_request() to take care of the added fields.
*/
struct request {
struct request_queue *q;
There is no logical split between what gets initialized by which function, so just merge the two. Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/blk-mq.c | 103 +++++++++++++++++++---------------------- include/linux/blkdev.h | 2 +- 2 files changed, 49 insertions(+), 56 deletions(-)