diff mbox series

[1/7] blk-mq: sync wake_batch update and users number change

Message ID 20230209201116.579809-2-shikemeng@huaweicloud.com (mailing list archive)
State New, archived
Headers show
Series A few bugfix and cleanup patches to blk-mq | expand

Commit Message

Kemeng Shi Feb. 9, 2023, 8:11 p.m. UTC
Commit 180dccb0dba4f ("blk-mq: fix tag_get wait task can't be awakened")
added recalculation of wake_batch when active_queues changes to avoid io
hung.
Function blk_mq_tag_idle and blk_mq_tag_busy can be called concurrently,
then wake_batch maybe updated with old users number. For example, if
tag alloctions for two shared queue happen concurrently, blk_mq_tag_busy
maybe executed as following:
thread1  			thread2
atomic_inc_return
				atomic_inc_return
				blk_mq_update_wake_batch
blk_mq_update_wake_batch

1.Thread1 adds active_queues from zero to one.
2.Thread2 adds active_queues from one to two.
3.Thread2 calculates wake_batch with latest active_queues number two.
4.Thread1 calculates wake_batch with stale active_queues number one.
Then wake_batch is inconsistent with actual active_queues. If wake_batch
is calculated with active_queues number smaller than actual active_queues
number, wake_batch will be greater than it supposed to be and cause io
hung.

Sync wake_batch update and users number change to keep wake_batch
consistent with active_queues to fix this.

Fixes: 180dccb0dba4 ("blk-mq: fix tag_get wait task can't be awakened")
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 block/blk-mq-tag.c | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

Comments

Jan Kara Feb. 9, 2023, 1:43 p.m. UTC | #1
On Fri 10-02-23 04:11:10, Kemeng Shi wrote:
> Commit 180dccb0dba4f ("blk-mq: fix tag_get wait task can't be awakened")
> added recalculation of wake_batch when active_queues changes to avoid io
> hung.
> Function blk_mq_tag_idle and blk_mq_tag_busy can be called concurrently,
> then wake_batch maybe updated with old users number. For example, if
> tag alloctions for two shared queue happen concurrently, blk_mq_tag_busy
> maybe executed as following:
> thread1  			thread2
> atomic_inc_return
> 				atomic_inc_return
> 				blk_mq_update_wake_batch
> blk_mq_update_wake_batch
> 
> 1.Thread1 adds active_queues from zero to one.
> 2.Thread2 adds active_queues from one to two.
> 3.Thread2 calculates wake_batch with latest active_queues number two.
> 4.Thread1 calculates wake_batch with stale active_queues number one.
> Then wake_batch is inconsistent with actual active_queues. If wake_batch
> is calculated with active_queues number smaller than actual active_queues
> number, wake_batch will be greater than it supposed to be and cause io
> hung.
> 
> Sync wake_batch update and users number change to keep wake_batch
> consistent with active_queues to fix this.
> 
> Fixes: 180dccb0dba4 ("blk-mq: fix tag_get wait task can't be awakened")
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>

OK, luckily this extra spin_lock happens only when adding and removing a
busy queue which should be reasonably rare. So looks good to me. Feel free
to add:

Reviewed-by: Jan Kara <jack@suse.cz>

								Honza

> ---
>  block/blk-mq-tag.c | 11 +++++++++--
>  1 file changed, 9 insertions(+), 2 deletions(-)
> 
> diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
> index 9eb968e14d31..1d3135acfc98 100644
> --- a/block/blk-mq-tag.c
> +++ b/block/blk-mq-tag.c
> @@ -39,7 +39,9 @@ static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
>   */
>  void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
>  {
> +	struct blk_mq_tags *tags = hctx->tags;
>  	unsigned int users;
> +	unsigned long flags;
>  
>  	if (blk_mq_is_shared_tags(hctx->flags)) {
>  		struct request_queue *q = hctx->queue;
> @@ -53,9 +55,11 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
>  		set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
>  	}
>  
> -	users = atomic_inc_return(&hctx->tags->active_queues);
> +	spin_lock_irqsave(&tags->lock, flags);
> +	users = atomic_inc_return(&tags->active_queues);
>  
> -	blk_mq_update_wake_batch(hctx->tags, users);
> +	blk_mq_update_wake_batch(tags, users);
> +	spin_unlock_irqrestore(&tags->lock, flags);
>  }
>  
>  /*
> @@ -76,6 +80,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
>  {
>  	struct blk_mq_tags *tags = hctx->tags;
>  	unsigned int users;
> +	unsigned long flags;
>  
>  	if (blk_mq_is_shared_tags(hctx->flags)) {
>  		struct request_queue *q = hctx->queue;
> @@ -88,9 +93,11 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
>  			return;
>  	}
>  
> +	spin_lock_irqsave(&tags->lock, flags);
>  	users = atomic_dec_return(&tags->active_queues);
>  
>  	blk_mq_update_wake_batch(tags, users);
> +	spin_unlock_irqrestore(&tags->lock, flags);
>  
>  	blk_mq_tag_wakeup_all(tags, false);
>  }
> -- 
> 2.30.0
>
diff mbox series

Patch

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 9eb968e14d31..1d3135acfc98 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -39,7 +39,9 @@  static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
  */
 void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
+	struct blk_mq_tags *tags = hctx->tags;
 	unsigned int users;
+	unsigned long flags;
 
 	if (blk_mq_is_shared_tags(hctx->flags)) {
 		struct request_queue *q = hctx->queue;
@@ -53,9 +55,11 @@  void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 		set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
 	}
 
-	users = atomic_inc_return(&hctx->tags->active_queues);
+	spin_lock_irqsave(&tags->lock, flags);
+	users = atomic_inc_return(&tags->active_queues);
 
-	blk_mq_update_wake_batch(hctx->tags, users);
+	blk_mq_update_wake_batch(tags, users);
+	spin_unlock_irqrestore(&tags->lock, flags);
 }
 
 /*
@@ -76,6 +80,7 @@  void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 {
 	struct blk_mq_tags *tags = hctx->tags;
 	unsigned int users;
+	unsigned long flags;
 
 	if (blk_mq_is_shared_tags(hctx->flags)) {
 		struct request_queue *q = hctx->queue;
@@ -88,9 +93,11 @@  void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 			return;
 	}
 
+	spin_lock_irqsave(&tags->lock, flags);
 	users = atomic_dec_return(&tags->active_queues);
 
 	blk_mq_update_wake_batch(tags, users);
+	spin_unlock_irqrestore(&tags->lock, flags);
 
 	blk_mq_tag_wakeup_all(tags, false);
 }