diff mbox series

bfq: Fix warning in bfqq_request_over_limit()

Message ID 20220407140738.9723-1-jack@suse.cz (mailing list archive)
State New, archived
Headers show
Series bfq: Fix warning in bfqq_request_over_limit() | expand

Commit Message

Jan Kara April 7, 2022, 2:07 p.m. UTC
People are occasionally reporting a warning bfqq_request_over_limit()
triggering reporting that BFQ's idea of cgroup hierarchy (and its depth)
does not match what generic blkcg code thinks. This can actually happen
when bfqq gets moved between BFQ groups while bfqq_request_over_limit()
is running. Make sure the code is safe against BFQ queue being moved to
a different BFQ group.

Fixes: 76f1df88bbc2 ("bfq: Limit number of requests consumed by each cgroup")
CC: stable@vger.kernel.org
Link: https://lore.kernel.org/all/CAJCQCtTw_2C7ZSz7as5Gvq=OmnDiio=HRkQekqWpKot84sQhFA@mail.gmail.com/
Reported-by: Chris Murphy <lists@colorremedies.com>
Reported-by: "yukuai (C)" <yukuai3@huawei.com>
Signed-off-by: Jan Kara <jack@suse.cz>
---
 block/bfq-iosched.c | 12 +++++++++---
 1 file changed, 9 insertions(+), 3 deletions(-)

Comments

Jan Kara April 29, 2022, 12:01 p.m. UTC | #1
On Thu 07-04-22 16:07:38, Jan Kara wrote:
> People are occasionally reporting a warning bfqq_request_over_limit()
> triggering reporting that BFQ's idea of cgroup hierarchy (and its depth)
> does not match what generic blkcg code thinks. This can actually happen
> when bfqq gets moved between BFQ groups while bfqq_request_over_limit()
> is running. Make sure the code is safe against BFQ queue being moved to
> a different BFQ group.
> 
> Fixes: 76f1df88bbc2 ("bfq: Limit number of requests consumed by each cgroup")
> CC: stable@vger.kernel.org
> Link: https://lore.kernel.org/all/CAJCQCtTw_2C7ZSz7as5Gvq=OmnDiio=HRkQekqWpKot84sQhFA@mail.gmail.com/
> Reported-by: Chris Murphy <lists@colorremedies.com>
> Reported-by: "yukuai (C)" <yukuai3@huawei.com>
> Signed-off-by: Jan Kara <jack@suse.cz>

Ping guys? Can we get the fix in please? It is pretty trivial...

								Honza

> ---
>  block/bfq-iosched.c | 12 +++++++++---
>  1 file changed, 9 insertions(+), 3 deletions(-)
> 
> diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
> index e47c75f1fa0f..272d48d8f326 100644
> --- a/block/bfq-iosched.c
> +++ b/block/bfq-iosched.c
> @@ -569,7 +569,7 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
>  	struct bfq_entity *entity = &bfqq->entity;
>  	struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
>  	struct bfq_entity **entities = inline_entities;
> -	int depth, level;
> +	int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
>  	int class_idx = bfqq->ioprio_class - 1;
>  	struct bfq_sched_data *sched_data;
>  	unsigned long wsum;
> @@ -578,15 +578,21 @@ static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
>  	if (!entity->on_st_or_in_serv)
>  		return false;
>  
> +retry:
> +	spin_lock_irq(&bfqd->lock);
>  	/* +1 for bfqq entity, root cgroup not included */
>  	depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
> -	if (depth > BFQ_LIMIT_INLINE_DEPTH) {
> +	if (depth > alloc_depth) {
> +		spin_unlock_irq(&bfqd->lock);
> +		if (entities != inline_entities)
> +			kfree(entities);
>  		entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
>  		if (!entities)
>  			return false;
> +		alloc_depth = depth;
> +		goto retry;
>  	}
>  
> -	spin_lock_irq(&bfqd->lock);
>  	sched_data = entity->sched_data;
>  	/* Gather our ancestors as we need to traverse them in reverse order */
>  	level = 0;
> -- 
> 2.34.1
>
Jens Axboe April 29, 2022, 12:45 p.m. UTC | #2
On Thu, 7 Apr 2022 16:07:38 +0200, Jan Kara wrote:
> People are occasionally reporting a warning bfqq_request_over_limit()
> triggering reporting that BFQ's idea of cgroup hierarchy (and its depth)
> does not match what generic blkcg code thinks. This can actually happen
> when bfqq gets moved between BFQ groups while bfqq_request_over_limit()
> is running. Make sure the code is safe against BFQ queue being moved to
> a different BFQ group.
> 
> [...]

Applied, thanks!

[1/1] bfq: Fix warning in bfqq_request_over_limit()
      commit: 09df6a75fffa68169c5ef9bef990cd7ba94f3eef

Best regards,
diff mbox series

Patch

diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index e47c75f1fa0f..272d48d8f326 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -569,7 +569,7 @@  static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
 	struct bfq_entity *entity = &bfqq->entity;
 	struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
 	struct bfq_entity **entities = inline_entities;
-	int depth, level;
+	int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH;
 	int class_idx = bfqq->ioprio_class - 1;
 	struct bfq_sched_data *sched_data;
 	unsigned long wsum;
@@ -578,15 +578,21 @@  static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
 	if (!entity->on_st_or_in_serv)
 		return false;
 
+retry:
+	spin_lock_irq(&bfqd->lock);
 	/* +1 for bfqq entity, root cgroup not included */
 	depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
-	if (depth > BFQ_LIMIT_INLINE_DEPTH) {
+	if (depth > alloc_depth) {
+		spin_unlock_irq(&bfqd->lock);
+		if (entities != inline_entities)
+			kfree(entities);
 		entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
 		if (!entities)
 			return false;
+		alloc_depth = depth;
+		goto retry;
 	}
 
-	spin_lock_irq(&bfqd->lock);
 	sched_data = entity->sched_data;
 	/* Gather our ancestors as we need to traverse them in reverse order */
 	level = 0;