@@ -45,6 +45,17 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
users);
}
+static void update_tag_sharing_busy(struct tag_sharing *tag_sharing)
+{
+ unsigned int count = atomic_inc_return(&tag_sharing->fail_count);
+ unsigned long last_period = READ_ONCE(tag_sharing->period);
+
+ if (time_after(jiffies, last_period + HZ) &&
+ cmpxchg_relaxed(&tag_sharing->period, last_period, jiffies) ==
+ last_period)
+ atomic_sub(count / 2, &tag_sharing->fail_count);
+}
+
void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
@@ -57,12 +68,16 @@ void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
if (test_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags) ||
- test_and_set_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags))
+ test_and_set_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags)) {
+ update_tag_sharing_busy(&q->tag_sharing);
return;
+ }
} else {
if (test_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state) ||
- test_and_set_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state))
+ test_and_set_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state)) {
+ update_tag_sharing_busy(&hctx->tag_sharing);
return;
+ }
}
spin_lock_irq(&tags->lock);
@@ -152,8 +167,11 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
}
spin_lock_irq(&tags->lock);
+
list_del_init(&tag_sharing->node);
tag_sharing->available_tags = tags->nr_tags;
+ atomic_set(&tag_sharing->fail_count, 0);
+
__blk_mq_driver_tag_idle(hctx);
WRITE_ONCE(tags->ctl.active_queues, tags->ctl.active_queues - 1);
WRITE_ONCE(tags->ctl.share_queues, tags->ctl.active_queues);
@@ -378,6 +378,8 @@ struct blk_independent_access_ranges {
struct tag_sharing {
struct list_head node;
unsigned int available_tags;
+ atomic_t fail_count;
+ unsigned long period;
};
struct request_queue {