diff mbox series

[RFC,5/7] blk-mq: record the number of times fail to get driver tag while sharing tags

Message ID 20230618160738.54385-6-yukuai1@huaweicloud.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: improve tag fair sharing | expand

Commit Message

Yu Kuai June 18, 2023, 4:07 p.m. UTC
From: Yu Kuai <yukuai3@huawei.com>

Add a atomic counter to record such times, such counter will be used to
adjust the number of tags assigned to active queues. And this counter will
degrade each seconds so that it will only represent io pressure
recently.

Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 block/blk-mq-tag.c     | 22 ++++++++++++++++++++--
 include/linux/blkdev.h |  2 ++
 2 files changed, 22 insertions(+), 2 deletions(-)

Comments

Bart Van Assche July 6, 2023, 6:18 p.m. UTC | #1
On 6/18/23 09:07, Yu Kuai wrote:
> +static void update_tag_sharing_busy(struct tag_sharing *tag_sharing)
> +{
> +	unsigned int count = atomic_inc_return(&tag_sharing->fail_count);
> +	unsigned long last_period = READ_ONCE(tag_sharing->period);
> +
> +	if (time_after(jiffies, last_period + HZ) &&
> +	    cmpxchg_relaxed(&tag_sharing->period, last_period, jiffies) ==
> +			    last_period)
> +		atomic_sub(count / 2, &tag_sharing->fail_count);
> +}

For new code, try_cmpxchg_relaxed() is preferred over cmpxchg_relaxed().

>   struct tag_sharing {
>   	struct list_head	node;
>   	unsigned int		available_tags;
> +	atomic_t		fail_count;
> +	unsigned long		period;
>   };

Please consider renaming "period" into "latest_reduction" or any other name
that make the purpose of this member clear.

Thanks,

Bart.
diff mbox series

Patch

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index e0137206c02b..5e5742c7277a 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -45,6 +45,17 @@  static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
 			users);
 }
 
+static void update_tag_sharing_busy(struct tag_sharing *tag_sharing)
+{
+	unsigned int count = atomic_inc_return(&tag_sharing->fail_count);
+	unsigned long last_period = READ_ONCE(tag_sharing->period);
+
+	if (time_after(jiffies, last_period + HZ) &&
+	    cmpxchg_relaxed(&tag_sharing->period, last_period, jiffies) ==
+			    last_period)
+		atomic_sub(count / 2, &tag_sharing->fail_count);
+}
+
 void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
 	struct blk_mq_tags *tags = hctx->tags;
@@ -57,12 +68,16 @@  void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
 		struct request_queue *q = hctx->queue;
 
 		if (test_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags) ||
-		    test_and_set_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags))
+		    test_and_set_bit(QUEUE_FLAG_HCTX_BUSY, &q->queue_flags)) {
+			update_tag_sharing_busy(&q->tag_sharing);
 			return;
+		}
 	} else {
 		if (test_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state) ||
-		    test_and_set_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state))
+		    test_and_set_bit(BLK_MQ_S_DTAG_BUSY, &hctx->state)) {
+			update_tag_sharing_busy(&hctx->tag_sharing);
 			return;
+		}
 	}
 
 	spin_lock_irq(&tags->lock);
@@ -152,8 +167,11 @@  void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 	}
 
 	spin_lock_irq(&tags->lock);
+
 	list_del_init(&tag_sharing->node);
 	tag_sharing->available_tags = tags->nr_tags;
+	atomic_set(&tag_sharing->fail_count, 0);
+
 	__blk_mq_driver_tag_idle(hctx);
 	WRITE_ONCE(tags->ctl.active_queues, tags->ctl.active_queues - 1);
 	WRITE_ONCE(tags->ctl.share_queues, tags->ctl.active_queues);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e5111bedfd8d..f3faaf5f6504 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -378,6 +378,8 @@  struct blk_independent_access_ranges {
 struct tag_sharing {
 	struct list_head	node;
 	unsigned int		available_tags;
+	atomic_t		fail_count;
+	unsigned long		period;
 };
 
 struct request_queue {