@@ -414,8 +414,6 @@ struct request_queue *blk_alloc_queue(int node_id)
q->node = node_id;
- atomic_set(&q->shared_tag_info.active_tags, 0);
-
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, blk_timeout_work);
INIT_LIST_HEAD(&q->icq_list);
@@ -29,6 +29,11 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
users);
}
+void blk_mq_init_shared_tag_info(struct shared_tag_info *info)
+{
+ atomic_set(&info->active_tags, 0);
+}
+
/*
* If a previously inactive queue goes active, bump the active user count.
* We need to do this before try to allocate driver tag, then even if fail
@@ -3652,6 +3652,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
goto exit_flush_rq;
+ blk_mq_init_shared_tag_info(&hctx->shared_tag_info);
return 0;
exit_flush_rq:
@@ -3679,7 +3680,6 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
goto free_hctx;
- atomic_set(&hctx->shared_tag_info.active_tags, 0);
if (node == NUMA_NO_NODE)
node = set->numa_node;
hctx->numa_node = node;
@@ -4227,6 +4227,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (blk_mq_alloc_ctxs(q))
goto err_exit;
+ blk_mq_init_shared_tag_info(&q->shared_tag_info);
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
@@ -63,6 +63,7 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags,
unsigned int hctx_idx);
+void blk_mq_init_shared_tag_info(struct shared_tag_info *info);
/*
* CPU -> queue mappings