@@ -622,14 +622,14 @@ static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
return 0;
}
-#define CTX_RQ_SEQ_OPS(name, type) \
+#define CTX_RQ_SEQ_OPS(name, __type) \
static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
__acquires(&ctx->lock) \
{ \
struct blk_mq_ctx *ctx = m->private; \
\
spin_lock(&ctx->lock); \
- return seq_list_start(&ctx->rq_lists[type], *pos); \
+ return seq_list_start(&ctx->type[__type].rq_list, *pos); \
} \
\
static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
@@ -637,7 +637,7 @@ static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
{ \
struct blk_mq_ctx *ctx = m->private; \
\
- return seq_list_next(v, &ctx->rq_lists[type], pos); \
+ return seq_list_next(v, &ctx->type[__type].rq_list, pos); \
} \
\
static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
@@ -313,7 +313,7 @@ static bool blk_mq_attempt_merge(struct request_queue *q,
lockdep_assert_held(&ctx->lock);
- if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) {
+ if (blk_mq_bio_list_merge(q, &ctx->type[type].rq_list, bio, nr_segs)) {
ctx->rq_merged++;
return true;
}
@@ -335,7 +335,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
type = hctx->type;
if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
- !list_empty_careful(&ctx->rq_lists[type])) {
+ !list_empty_careful(&ctx->type[type].rq_list)) {
/* default per sw-queue merge */
spin_lock(&ctx->lock);
ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs);
@@ -953,7 +953,7 @@ static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
enum hctx_type type = hctx->type;
spin_lock(&ctx->lock);
- list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
+ list_splice_tail_init(&ctx->type[type].rq_list, flush_data->list);
sbitmap_clear_bit(sb, bitnr);
spin_unlock(&ctx->lock);
return true;
@@ -985,13 +985,13 @@ static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
struct dispatch_rq_data *dispatch_data = data;
struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
- enum hctx_type type = hctx->type;
+ struct blk_mq_ctx_type *type = &ctx->type[hctx->type];
spin_lock(&ctx->lock);
- if (!list_empty(&ctx->rq_lists[type])) {
- dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
+ if (!list_empty(&type->rq_list)) {
+ dispatch_data->rq = list_entry_rq(type->rq_list.next);
list_del_init(&dispatch_data->rq->queuelist);
- if (list_empty(&ctx->rq_lists[type]))
+ if (list_empty(&type->rq_list))
sbitmap_clear_bit(sb, bitnr);
}
spin_unlock(&ctx->lock);
@@ -1648,9 +1648,9 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
trace_block_rq_insert(hctx->queue, rq);
if (at_head)
- list_add(&rq->queuelist, &ctx->rq_lists[type]);
+ list_add(&rq->queuelist, &ctx->type[type].rq_list);
else
- list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
+ list_add_tail(&rq->queuelist, &ctx->type[type].rq_list);
}
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
@@ -1701,7 +1701,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
}
spin_lock(&ctx->lock);
- list_splice_tail_init(list, &ctx->rq_lists[type]);
+ list_splice_tail_init(list, &ctx->type[type].rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
}
@@ -2256,8 +2256,8 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
type = hctx->type;
spin_lock(&ctx->lock);
- if (!list_empty(&ctx->rq_lists[type])) {
- list_splice_init(&ctx->rq_lists[type], &tmp);
+ if (!list_empty(&ctx->type[type].rq_list)) {
+ list_splice_init(&ctx->type[type].rq_list, &tmp);
blk_mq_hctx_clear_pending(hctx, ctx);
}
spin_unlock(&ctx->lock);
@@ -2437,7 +2437,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
- INIT_LIST_HEAD(&__ctx->rq_lists[k]);
+ INIT_LIST_HEAD(&__ctx->type[k].rq_list);
/*
* Set local node, IFF we have more than one hw queue. If
@@ -12,13 +12,17 @@ struct blk_mq_ctxs {
struct blk_mq_ctx __percpu *queue_ctx;
};
+struct blk_mq_ctx_type {
+ struct list_head rq_list;
+};
+
/**
* struct blk_mq_ctx - State for a software queue facing the submitting CPUs
*/
struct blk_mq_ctx {
struct {
spinlock_t lock;
- struct list_head rq_lists[HCTX_MAX_TYPES];
+ struct blk_mq_ctx_type type[HCTX_MAX_TYPES];
} ____cacheline_aligned_in_smp;
unsigned int cpu;
This only holds the dispatch list for now, and there should be no functional changes in this patch. This is in preparation for adding more items to the per-ctx type structure. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- block/blk-mq-debugfs.c | 6 +++--- block/blk-mq-sched.c | 4 ++-- block/blk-mq.c | 22 +++++++++++----------- block/blk-mq.h | 6 +++++- 4 files changed, 21 insertions(+), 17 deletions(-)