@@ -64,6 +64,7 @@ void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
+ struct tag_sharing *tag_sharing;
/*
* calling test_bit() prior to test_and_set_bit() is intentional,
@@ -75,13 +76,18 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return;
+
+ tag_sharing = &q->tag_sharing;
} else {
if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return;
+
+ tag_sharing = &hctx->tag_sharing;
}
spin_lock_irq(&tags->lock);
+ list_add(&tag_sharing->node, &tags->ctl.head);
WRITE_ONCE(tags->ctl.active_queues, tags->ctl.active_queues + 1);
spin_unlock_irq(&tags->lock);
}
@@ -111,6 +117,7 @@ static void __blk_mq_driver_tag_idle(struct blk_mq_hw_ctx *hctx)
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
+ struct tag_sharing *tag_sharing;
if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue;
@@ -118,12 +125,17 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE,
&q->queue_flags))
return;
+
+ tag_sharing = &q->tag_sharing;
} else {
if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return;
+
+ tag_sharing = &hctx->tag_sharing;
}
spin_lock_irq(&tags->lock);
+ list_del_init(&tag_sharing->node);
__blk_mq_driver_tag_idle(hctx);
WRITE_ONCE(tags->ctl.active_queues, tags->ctl.active_queues - 1);
WRITE_ONCE(tags->ctl.share_queues, tags->ctl.active_queues);
@@ -619,6 +631,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
tags->nr_tags = total_tags;
tags->nr_reserved_tags = reserved_tags;
spin_lock_init(&tags->lock);
+ INIT_LIST_HEAD(&tags->ctl.head);
if (blk_mq_init_bitmaps(&tags->bitmap_tags, &tags->breserved_tags,
total_tags, reserved_tags, node,
@@ -390,6 +390,7 @@ struct blk_mq_hw_ctx {
* assigned when a request is dispatched from a hardware queue.
*/
struct blk_mq_tags *tags;
+ struct tag_sharing tag_sharing;
/**
* @sched_tags: Tags owned by I/O scheduler. If there is an I/O
* scheduler associated with a request queue, a tag is assigned when
@@ -737,6 +738,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
struct tag_sharing_ctl {
unsigned int active_queues;
unsigned int share_queues;
+ struct list_head head;
};
/*
@@ -375,6 +375,10 @@ struct blk_independent_access_ranges {
struct blk_independent_access_range ia_range[];
};
+struct tag_sharing {
+ struct list_head node;
+};
+
struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
@@ -513,6 +517,7 @@ struct request_queue {
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
+ struct tag_sharing tag_sharing;
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;