diff mbox series

[RFC,v2,3/8] blk-mq: add a helper to initialize shared_tag_info

Message ID 20231021154806.4019417-4-yukuai1@huaweicloud.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: improve tag fair sharing | expand

Commit Message

Yu Kuai Oct. 21, 2023, 3:48 p.m. UTC
From: Yu Kuai <yukuai3@huawei.com>

shared_tag_info is used for both request_queue and hctx, and follow up
patches will add more fields into the structure, add a helper to avoid
redundant code.

Also move initialization for request_queue from blk_alloc_queue() to
blk_mq_init_allocated_queue(), because shared_tag_info won't be used for
bio-based device. And move initialization for hctx from
blk_mq_alloc_hctx() to blk_mq_init_hctx(), because hctx can be reused.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 block/blk-core.c   | 2 --
 block/blk-mq-tag.c | 5 +++++
 block/blk-mq.c     | 3 ++-
 block/blk-mq.h     | 1 +
 4 files changed, 8 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index c028b047f5d5..756ca1109f6c 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -414,8 +414,6 @@  struct request_queue *blk_alloc_queue(int node_id)
 
 	q->node = node_id;
 
-	atomic_set(&q->shared_tag_info.active_tags, 0);
-
 	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
 	INIT_WORK(&q->timeout_work, blk_timeout_work);
 	INIT_LIST_HEAD(&q->icq_list);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index fe41a0d34fc0..2f91a7605d7a 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -29,6 +29,11 @@  static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
 			users);
 }
 
+void blk_mq_init_shared_tag_info(struct shared_tag_info *info)
+{
+	atomic_set(&info->active_tags, 0);
+}
+
 /*
  * If a previously inactive queue goes active, bump the active user count.
  * We need to do this before try to allocate driver tag, then even if fail
diff --git a/block/blk-mq.c b/block/blk-mq.c
index d85b9ad816d0..de5859dd9f52 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3652,6 +3652,7 @@  static int blk_mq_init_hctx(struct request_queue *q,
 	if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL))
 		goto exit_flush_rq;
 
+	blk_mq_init_shared_tag_info(&hctx->shared_tag_info);
 	return 0;
 
  exit_flush_rq:
@@ -3679,7 +3680,6 @@  blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
 	if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
 		goto free_hctx;
 
-	atomic_set(&hctx->shared_tag_info.active_tags, 0);
 	if (node == NUMA_NO_NODE)
 		node = set->numa_node;
 	hctx->numa_node = node;
@@ -4227,6 +4227,7 @@  int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 	if (blk_mq_alloc_ctxs(q))
 		goto err_exit;
 
+	blk_mq_init_shared_tag_info(&q->shared_tag_info);
 	/* init q->mq_kobj and sw queues' kobjects */
 	blk_mq_sysfs_init(q);
 
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 6f332dc122ff..ac58f2e22f20 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -63,6 +63,7 @@  struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
 void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
 			     struct blk_mq_tags *tags,
 			     unsigned int hctx_idx);
+void blk_mq_init_shared_tag_info(struct shared_tag_info *info);
 
 /*
  * CPU -> queue mappings