@@ -45,7 +45,44 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
users);
}
-static void update_tag_sharing_busy(struct tag_sharing *tag_sharing)
+static void try_to_increase_available_tags(struct blk_mq_tags *tags,
+ struct tag_sharing *tag_sharing)
+{
+ unsigned int users = tags->ctl.share_queues;
+ unsigned int free_tags = 0;
+ unsigned int borrowed_tags = 0;
+ unsigned int nr_tags;
+ struct tag_sharing *tmp;
+
+ if (users <= 1)
+ return;
+
+ nr_tags = max((tags->nr_tags + tags->ctl.share_queues - 1) /
+ tags->ctl.share_queues, 4U);
+
+ list_for_each_entry(tmp, &tags->ctl.head, node) {
+ if (tmp == tag_sharing)
+ continue;
+
+ if (tmp->available_tags > nr_tags)
+ borrowed_tags += tmp->available_tags - nr_tags;
+ else if (atomic_read(&tmp->fail_count) <= nr_tags / 2)
+ free_tags += tmp->available_tags -
+ atomic_read(&tmp->active_tags);
+ }
+
+ /* can't borrow more tags */
+ if (free_tags <= borrowed_tags) {
+ WRITE_ONCE(tag_sharing->suspend, jiffies + HZ);
+ return;
+ }
+
+ /* try to borrow half of free tags */
+ tag_sharing->available_tags += (free_tags - borrowed_tags) / 2;
+}
+
+static void update_tag_sharing_busy(struct blk_mq_tags *tags,
+ struct tag_sharing *tag_sharing)
{
unsigned int count = atomic_inc_return(&tag_sharing->fail_count);
unsigned long last_period = READ_ONCE(tag_sharing->period);
@@ -53,7 +90,14 @@ static void update_tag_sharing_busy(struct tag_sharing *tag_sharing)
if (time_after(jiffies, last_period + HZ) &&
cmpxchg_relaxed(&tag_sharing->period, last_period, jiffies) ==
last_period)
- atomic_sub(count / 2, &tag_sharing->fail_count);
+ count = atomic_sub_return(count / 2, &tag_sharing->fail_count);
+
+ if (count >= tags->nr_tags &&
+ time_after(jiffies, READ_ONCE(tag_sharing->suspend))) {
+ spin_lock_irq(&tags->lock);
+ try_to_increase_available_tags(tags, tag_sharing);
+ spin_unlock_irq(&tags->lock);
+ }
}
void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
@@ -69,13 +113,13 @@ void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
if (test_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags) ||
test_and_set_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags)) {
- update_tag_sharing_busy(&q->tag_sharing);
+ update_tag_sharing_busy(tags, &q->tag_sharing);
return;
}
} else {
if (test_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state) ||
test_and_set_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state)) {
- update_tag_sharing_busy(&hctx->tag_sharing);
+ update_tag_sharing_busy(tags, &hctx->tag_sharing);
return;
}
}
@@ -381,6 +381,7 @@ struct tag_sharing {
atomic_t active_tags;
atomic_t fail_count;
unsigned long period;
+ unsigned long suspend;
};
struct request_queue {