diff mbox series

[RFC,v2,1/4] block/mq-deadline: Revert "block/mq-deadline: Fix the tag reservation code"

Message ID 20241217024047.1091893-2-yukuai1@huaweicloud.com (mailing list archive)
State New
Headers show
Series lib/sbitmap: fix shallow_depth tag allocation | expand

Commit Message

Yu Kuai Dec. 17, 2024, 2:40 a.m. UTC
From: Yu Kuai <yukuai3@huawei.com>

This reverts commit 39823b47bbd40502632ffba90ebb34fff7c8b5e8.

1) Set min_shallow_depth to 1 will end up setting wake_batch to 1,
   and this will cause performance degradation in some high concurrency
   test, for both IO bandwidth and cpu usage.

   async_depth can be changed by sysfs, and the minimal value is 1. This
   is why min_shallow_depth is set to 1 at initialization to make sure
   functional is correct if async_depth is set to 1. However, sacrifice
   performance in the default scenario is not acceptable.

2) dd_to_word_depth() is supposed to scale down async_depth, however, user
   can set low nr_requests and sb->depth can be less than 1 << sb->shift,
   then dd_to_word_depth() will end up scale up async_depth.

Fixes: 39823b47bbd4 ("block/mq-deadline: Fix the tag reservation code")
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
 block/mq-deadline.c | 20 +++-----------------
 1 file changed, 3 insertions(+), 17 deletions(-)

Comments

Bart Van Assche Dec. 17, 2024, 9:39 p.m. UTC | #1
On 12/16/24 6:40 PM, Yu Kuai wrote:
> From: Yu Kuai <yukuai3@huawei.com>
> 
> This reverts commit 39823b47bbd40502632ffba90ebb34fff7c8b5e8.
> 
> 1) Set min_shallow_depth to 1 will end up setting wake_batch to 1,
>     and this will cause performance degradation in some high concurrency
>     test, for both IO bandwidth and cpu usage.
> 
>     async_depth can be changed by sysfs, and the minimal value is 1. This
>     is why min_shallow_depth is set to 1 at initialization to make sure
>     functional is correct if async_depth is set to 1. However, sacrifice
>     performance in the default scenario is not acceptable.
> 
> 2) dd_to_word_depth() is supposed to scale down async_depth, however, user
>     can set low nr_requests and sb->depth can be less than 1 << sb->shift,
>     then dd_to_word_depth() will end up scale up async_depth.

Although this patch fixes a performance regression, it breaks the
async_depth functionality. If we are going to break that functionality
temporarily, I propose to do something like this:

diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 20a8a3afb88b..4cc7b5db4669 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -487,37 +487,12 @@ static struct request *dd_dispatch_request(struct 
blk_mq_hw_ctx *hctx)
  	return rq;
  }

-/*
- * 'depth' is a number in the range 1..INT_MAX representing a number of
- * requests. Scale it with a factor (1 << bt->sb.shift) / 
q->nr_requests since
- * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
- * Values larger than q->nr_requests have the same effect as 
q->nr_requests.
- */
-static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int 
qdepth)
-{
-	struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
-	const unsigned int nrr = hctx->queue->nr_requests;
-
-	return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
-}
-
  /*
   * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
   * function is used by __blk_mq_get_tag().
   */
  static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
  {
-	struct deadline_data *dd = data->q->elevator->elevator_data;
-
-	/* Do not throttle synchronous reads. */
-	if (op_is_sync(opf) && !op_is_write(opf))
-		return;
-
-	/*
-	 * Throttle asynchronous requests and writes such that these requests
-	 * do not block the allocation of synchronous requests.
-	 */
-	data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
  }

  /* Called by blk_mq_update_nr_requests(). */
@@ -525,11 +500,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx 
*hctx)
  {
  	struct request_queue *q = hctx->queue;
  	struct deadline_data *dd = q->elevator->elevator_data;
-	struct blk_mq_tags *tags = hctx->sched_tags;

  	dd->async_depth = q->nr_requests;
-
-	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
  }

  /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
Yu Kuai Dec. 18, 2024, 1:16 a.m. UTC | #2
Hi,

在 2024/12/18 5:39, Bart Van Assche 写道:
> On 12/16/24 6:40 PM, Yu Kuai wrote:
>> From: Yu Kuai <yukuai3@huawei.com>
>>
>> This reverts commit 39823b47bbd40502632ffba90ebb34fff7c8b5e8.
>>
>> 1) Set min_shallow_depth to 1 will end up setting wake_batch to 1,
>>     and this will cause performance degradation in some high concurrency
>>     test, for both IO bandwidth and cpu usage.
>>
>>     async_depth can be changed by sysfs, and the minimal value is 1. This
>>     is why min_shallow_depth is set to 1 at initialization to make sure
>>     functional is correct if async_depth is set to 1. However, sacrifice
>>     performance in the default scenario is not acceptable.
>>
>> 2) dd_to_word_depth() is supposed to scale down async_depth, however, 
>> user
>>     can set low nr_requests and sb->depth can be less than 1 << 
>> sb->shift,
>>     then dd_to_word_depth() will end up scale up async_depth.
> 
> Although this patch fixes a performance regression, it breaks the
> async_depth functionality. If we are going to break that functionality
> temporarily, I propose to do something like this:

Yes, I'll split this patch, and merge the async_depth changes into the
last patch.

Thanks,
Kuai

> 
> diff --git a/block/mq-deadline.c b/block/mq-deadline.c
> index 20a8a3afb88b..4cc7b5db4669 100644
> --- a/block/mq-deadline.c
> +++ b/block/mq-deadline.c
> @@ -487,37 +487,12 @@ static struct request *dd_dispatch_request(struct 
> blk_mq_hw_ctx *hctx)
>       return rq;
>   }
> 
> -/*
> - * 'depth' is a number in the range 1..INT_MAX representing a number of
> - * requests. Scale it with a factor (1 << bt->sb.shift) / 
> q->nr_requests since
> - * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
> - * Values larger than q->nr_requests have the same effect as 
> q->nr_requests.
> - */
> -static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int 
> qdepth)
> -{
> -    struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
> -    const unsigned int nrr = hctx->queue->nr_requests;
> -
> -    return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
> -}
> -
>   /*
>    * Called by __blk_mq_alloc_request(). The shallow_depth value set by 
> this
>    * function is used by __blk_mq_get_tag().
>    */
>   static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
>   {
> -    struct deadline_data *dd = data->q->elevator->elevator_data;
> -
> -    /* Do not throttle synchronous reads. */
> -    if (op_is_sync(opf) && !op_is_write(opf))
> -        return;
> -
> -    /*
> -     * Throttle asynchronous requests and writes such that these requests
> -     * do not block the allocation of synchronous requests.
> -     */
> -    data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
>   }
> 
>   /* Called by blk_mq_update_nr_requests(). */
> @@ -525,11 +500,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx 
> *hctx)
>   {
>       struct request_queue *q = hctx->queue;
>       struct deadline_data *dd = q->elevator->elevator_data;
> -    struct blk_mq_tags *tags = hctx->sched_tags;
> 
>       dd->async_depth = q->nr_requests;
> -
> -    sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
>   }
> 
>   /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
> 
> .
>
diff mbox series

Patch

diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 91b3789f710e..1f0d175a941e 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -487,20 +487,6 @@  static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
 	return rq;
 }
 
-/*
- * 'depth' is a number in the range 1..INT_MAX representing a number of
- * requests. Scale it with a factor (1 << bt->sb.shift) / q->nr_requests since
- * 1..(1 << bt->sb.shift) is the range expected by sbitmap_get_shallow().
- * Values larger than q->nr_requests have the same effect as q->nr_requests.
- */
-static int dd_to_word_depth(struct blk_mq_hw_ctx *hctx, unsigned int qdepth)
-{
-	struct sbitmap_queue *bt = &hctx->sched_tags->bitmap_tags;
-	const unsigned int nrr = hctx->queue->nr_requests;
-
-	return ((qdepth << bt->sb.shift) + nrr - 1) / nrr;
-}
-
 /*
  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
  * function is used by __blk_mq_get_tag().
@@ -517,7 +503,7 @@  static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
 	 * Throttle asynchronous requests and writes such that these requests
 	 * do not block the allocation of synchronous requests.
 	 */
-	data->shallow_depth = dd_to_word_depth(data->hctx, dd->async_depth);
+	data->shallow_depth = dd->async_depth;
 }
 
 /* Called by blk_mq_update_nr_requests(). */
@@ -527,9 +513,9 @@  static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
 	struct deadline_data *dd = q->elevator->elevator_data;
 	struct blk_mq_tags *tags = hctx->sched_tags;
 
-	dd->async_depth = q->nr_requests;
+	dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
 
-	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
+	sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
 }
 
 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */