@@ -300,7 +300,7 @@ void blk_mq_sysfs_deinit(struct request_queue *q)
int cpu;
for_each_possible_cpu(cpu) {
- ctx = *per_cpu_ptr(q->queue_ctx, cpu);
+ ctx = q->queue_ctx[cpu];
kobject_put(&ctx->kobj);
}
kobject_put(q->mq_kobj);
@@ -322,7 +322,7 @@ int blk_mq_sysfs_init(struct request_queue *q)
ctx = kzalloc_node(sizeof(*ctx), GFP_KERNEL, cpu_to_node(cpu));
if (!ctx)
goto fail;
- *per_cpu_ptr(q->queue_ctx, cpu) = ctx;
+ q->queue_ctx[cpu] = ctx;
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
}
q->mq_kobj = &mq_kobj->kobj;
@@ -330,7 +330,7 @@ int blk_mq_sysfs_init(struct request_queue *q)
fail:
for_each_possible_cpu(cpu) {
- ctx = *per_cpu_ptr(q->queue_ctx, cpu);
+ ctx = q->queue_ctx[cpu];
if (ctx)
kobject_put(&ctx->kobj);
}
@@ -2299,7 +2299,7 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
unsigned int i, j;
for_each_possible_cpu(i) {
- struct blk_mq_ctx *__ctx = *per_cpu_ptr(q->queue_ctx, i);
+ struct blk_mq_ctx *__ctx = q->queue_ctx[i];
struct blk_mq_hw_ctx *hctx;
__ctx->cpu = i;
@@ -2385,7 +2385,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
set->map[0].mq_map[i] = 0;
}
- ctx = *per_cpu_ptr(q->queue_ctx, i);
+ ctx = q->queue_ctx[i];
for (j = 0; j < set->nr_maps; j++) {
hctx = blk_mq_map_queue_type(q, j, i);
@@ -2541,7 +2541,7 @@ void blk_mq_release(struct request_queue *q)
*/
blk_mq_sysfs_deinit(q);
- free_percpu(q->queue_ctx);
+ kfree(q->queue_ctx);
}
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
@@ -2731,7 +2731,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->poll_cb)
goto err_exit;
- q->queue_ctx = alloc_percpu(struct blk_mq_ctx *);
+ q->queue_ctx = kmalloc_array_node(nr_cpu_ids,
+ sizeof(struct blk_mq_ctx *),
+ GFP_KERNEL, set->numa_node);
if (!q->queue_ctx)
goto err_exit;
@@ -2798,7 +2800,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
err_sys_init:
blk_mq_sysfs_deinit(q);
err_percpu:
- free_percpu(q->queue_ctx);
+ kfree(q->queue_ctx);
err_exit:
q->mq_ops = NULL;
return ERR_PTR(-ENOMEM);
@@ -129,7 +129,7 @@ static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
- return *per_cpu_ptr(q->queue_ctx, cpu);
+ return q->queue_ctx[cpu];
}
/*
@@ -407,7 +407,7 @@ struct request_queue {
const struct blk_mq_ops *mq_ops;
/* sw queues */
- struct blk_mq_ctx __percpu **queue_ctx;
+ struct blk_mq_ctx **queue_ctx;
unsigned int nr_queues;
unsigned int queue_depth;
Now q->queue_ctx is just one read-mostly table for query the 'blk_mq_ctx' instance from one cpu index, it isn't necessary to allocate it as percpu variable. One simple array may be more efficient. Cc: Guenter Roeck <linux@roeck-us.net> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-mq-sysfs.c | 6 +++--- block/blk-mq.c | 12 +++++++----- block/blk-mq.h | 2 +- include/linux/blkdev.h | 2 +- 4 files changed, 12 insertions(+), 10 deletions(-)