diff mbox series

[2/2] blk-mq: covert blk_mq_tag.active_queues to refcount_t

Message ID 20190322144818.987-3-yuyufen@huawei.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: convert atomic_t to refcount_t | expand

Commit Message

Yufen Yu March 22, 2019, 2:48 p.m. UTC
We convert 'active_queues' from atomic_t to newly provied
refcount_t type and API, which can prevent accidental counter
overflows and underflows.

Signed-off-by: Yufen Yu <yuyufen@huawei.com>
---
 block/blk-mq-debugfs.c | 2 +-
 block/blk-mq-tag.c     | 6 +++---
 block/blk-mq-tag.h     | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

Comments

Peter Zijlstra March 22, 2019, 3:02 p.m. UTC | #1
On Fri, Mar 22, 2019 at 10:48:18PM +0800, Yufen Yu wrote:
> We convert 'active_queues' from atomic_t to newly provied
> refcount_t type and API, which can prevent accidental counter
> overflows and underflows.

There is no initialization using refcount_set(), which leads me to
believe we're 0 initialized and then it's broken again.

> Signed-off-by: Yufen Yu <yuyufen@huawei.com>
> ---
>  block/blk-mq-debugfs.c | 2 +-
>  block/blk-mq-tag.c     | 6 +++---
>  block/blk-mq-tag.h     | 2 +-
>  3 files changed, 5 insertions(+), 5 deletions(-)
> 
> diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
> index 81536b7201be..48f0cc2c90ba 100644
> --- a/block/blk-mq-debugfs.c
> +++ b/block/blk-mq-debugfs.c
> @@ -473,7 +473,7 @@ static void blk_mq_debugfs_tags_show(struct seq_file *m,
>  	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
>  	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
>  	seq_printf(m, "active_queues=%d\n",
> -		   atomic_read(&tags->active_queues));
> +		   refcount_read(&tags->active_queues));
>  
>  	seq_puts(m, "\nbitmap_tags:\n");
>  	sbitmap_queue_show(&tags->bitmap_tags, m);
> diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
> index 3fcb15fa6398..1d713f221bf7 100644
> --- a/block/blk-mq-tag.c
> +++ b/block/blk-mq-tag.c
> @@ -31,7 +31,7 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
>  {
>  	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
>  	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
> -		atomic_inc(&hctx->tags->active_queues);
> +		refcount_inc(&hctx->tags->active_queues);
>  
>  	return true;
>  }
> @@ -57,7 +57,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
>  	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
>  		return;
>  
> -	atomic_dec(&tags->active_queues);
> +	refcount_dec(&tags->active_queues);
>  
>  	blk_mq_tag_wakeup_all(tags, false);
>  }
> @@ -82,7 +82,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
>  	if (bt->sb.depth == 1)
>  		return true;
>  
> -	users = atomic_read(&hctx->tags->active_queues);
> +	users = refcount_read(&hctx->tags->active_queues);
>  	if (!users)
>  		return true;
>  
> diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
> index 61deab0b5a5a..e948b4833a2a 100644
> --- a/block/blk-mq-tag.h
> +++ b/block/blk-mq-tag.h
> @@ -11,7 +11,7 @@ struct blk_mq_tags {
>  	unsigned int nr_tags;
>  	unsigned int nr_reserved_tags;
>  
> -	atomic_t active_queues;
> +	refcount_t active_queues;
>  
>  	struct sbitmap_queue bitmap_tags;
>  	struct sbitmap_queue breserved_tags;
> -- 
> 2.16.2.dirty
>
diff mbox series

Patch

diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 81536b7201be..48f0cc2c90ba 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -473,7 +473,7 @@  static void blk_mq_debugfs_tags_show(struct seq_file *m,
 	seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
 	seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
 	seq_printf(m, "active_queues=%d\n",
-		   atomic_read(&tags->active_queues));
+		   refcount_read(&tags->active_queues));
 
 	seq_puts(m, "\nbitmap_tags:\n");
 	sbitmap_queue_show(&tags->bitmap_tags, m);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 3fcb15fa6398..1d713f221bf7 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -31,7 +31,7 @@  bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
 	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
 	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
-		atomic_inc(&hctx->tags->active_queues);
+		refcount_inc(&hctx->tags->active_queues);
 
 	return true;
 }
@@ -57,7 +57,7 @@  void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 		return;
 
-	atomic_dec(&tags->active_queues);
+	refcount_dec(&tags->active_queues);
 
 	blk_mq_tag_wakeup_all(tags, false);
 }
@@ -82,7 +82,7 @@  static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 	if (bt->sb.depth == 1)
 		return true;
 
-	users = atomic_read(&hctx->tags->active_queues);
+	users = refcount_read(&hctx->tags->active_queues);
 	if (!users)
 		return true;
 
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 61deab0b5a5a..e948b4833a2a 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -11,7 +11,7 @@  struct blk_mq_tags {
 	unsigned int nr_tags;
 	unsigned int nr_reserved_tags;
 
-	atomic_t active_queues;
+	refcount_t active_queues;
 
 	struct sbitmap_queue bitmap_tags;
 	struct sbitmap_queue breserved_tags;