diff mbox series

[2/3] blk-mq: Move the TAG_ACTIVE and SCHED_RESTART flags from hctx into blk_mq_tags

Message ID 20191126175656.67638-3-bvanassche@acm.org (mailing list archive)
State New, archived
Headers show
Series blk-mq: Support sharing tags across hardware queues | expand

Commit Message

Bart Van Assche Nov. 26, 2019, 5:56 p.m. UTC
If each hardware queue has its own tag set it's fine to manage these
flags per hardware queue. Since the next patch will share tag sets across
hardware queues, move these flags into blk_mq_tags. This patch does not
change any functionality.

Cc: Christoph Hellwig <hch@infradead.org>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Hannes Reinecke <hare@suse.com>
Cc: John Garry <john.garry@huawei.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/blk-mq-debugfs.c |  2 --
 block/blk-mq-sched.c   |  8 ++++----
 block/blk-mq-sched.h   |  2 +-
 block/blk-mq-tag.c     |  8 ++++----
 block/blk-mq-tag.h     | 10 ++++++++++
 include/linux/blk-mq.h |  2 --
 6 files changed, 19 insertions(+), 13 deletions(-)

Comments

Ming Lei Nov. 27, 2019, 12:43 a.m. UTC | #1
On Tue, Nov 26, 2019 at 09:56:55AM -0800, Bart Van Assche wrote:
> If each hardware queue has its own tag set it's fine to manage these
> flags per hardware queue. Since the next patch will share tag sets across
> hardware queues, move these flags into blk_mq_tags. This patch does not
> change any functionality.
> 
> Cc: Christoph Hellwig <hch@infradead.org>
> Cc: Ming Lei <ming.lei@redhat.com>
> Cc: Hannes Reinecke <hare@suse.com>
> Cc: John Garry <john.garry@huawei.com>
> Signed-off-by: Bart Van Assche <bvanassche@acm.org>
> ---
>  block/blk-mq-debugfs.c |  2 --
>  block/blk-mq-sched.c   |  8 ++++----
>  block/blk-mq-sched.h   |  2 +-
>  block/blk-mq-tag.c     |  8 ++++----
>  block/blk-mq-tag.h     | 10 ++++++++++
>  include/linux/blk-mq.h |  2 --
>  6 files changed, 19 insertions(+), 13 deletions(-)
> 
> diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
> index b3f2ba483992..3678e95ec947 100644
> --- a/block/blk-mq-debugfs.c
> +++ b/block/blk-mq-debugfs.c
> @@ -211,8 +211,6 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
>  #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
>  static const char *const hctx_state_name[] = {
>  	HCTX_STATE_NAME(STOPPED),
> -	HCTX_STATE_NAME(TAG_ACTIVE),
> -	HCTX_STATE_NAME(SCHED_RESTART),
>  };
>  #undef HCTX_STATE_NAME
>  
> diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
> index ca22afd47b3d..7d98b6513148 100644
> --- a/block/blk-mq-sched.c
> +++ b/block/blk-mq-sched.c
> @@ -64,18 +64,18 @@ void blk_mq_sched_assign_ioc(struct request *rq)
>   */
>  void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
>  {
> -	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
> +	if (test_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state))
>  		return;
>  
> -	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
> +	set_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state);
>  }
>  EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
>  
>  void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
>  {
> -	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
> +	if (!test_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state))
>  		return;
> -	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
> +	clear_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state);
>  
>  	blk_mq_run_hw_queue(hctx, true);
>  }

RESTART is supposed for restarting the hctx of this request queue,
instead of the tags of host-wide, which is covered by blk_mq_mark_tag_wait().

> diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
> index 126021fc3a11..15174a646468 100644
> --- a/block/blk-mq-sched.h
> +++ b/block/blk-mq-sched.h
> @@ -82,7 +82,7 @@ static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
>  
>  static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
>  {
> -	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
> +	return test_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state);
>  }
>  
>  #endif
> diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
> index 586c9d6e904a..a60e1b4a8158 100644
> --- a/block/blk-mq-tag.c
> +++ b/block/blk-mq-tag.c
> @@ -23,8 +23,8 @@
>   */
>  bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
>  {
> -	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
> -	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
> +	if (!test_bit(BLK_MQ_T_ACTIVE, &hctx->tags->state) &&
> +	    !test_and_set_bit(BLK_MQ_T_ACTIVE, &hctx->tags->state))
>  		atomic_inc(&hctx->tags->active_queues);

The above is wrong.

With this change, tags->active_queues may become just 1, and the
variable is supposed to represent number of active LUNs using this
shared tags.

That is said the flag of BLK_MQ_T_ACTIVE is really per-hctx instead of
per-tags.

>  
>  	return true;
> @@ -48,7 +48,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
>  {
>  	struct blk_mq_tags *tags = hctx->tags;
>  
> -	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
> +	if (!test_and_clear_bit(BLK_MQ_T_ACTIVE, &hctx->tags->state))
>  		return;
>  
>  	atomic_dec(&tags->active_queues);
> @@ -67,7 +67,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
>  
>  	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
>  		return true;
> -	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
> +	if (!test_bit(BLK_MQ_T_ACTIVE, &hctx->tags->state))
>  		return true;
>  
>  	/*
> diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
> index d0c10d043891..f75fa936b090 100644
> --- a/block/blk-mq-tag.h
> +++ b/block/blk-mq-tag.h
> @@ -4,6 +4,11 @@
>  
>  #include "blk-mq.h"
>  
> +enum {
> +	BLK_MQ_T_ACTIVE		= 1,
> +	BLK_MQ_T_SCHED_RESTART	= 2,
> +};
> +
>  /*
>   * Tag address space map.
>   */
> @@ -11,6 +16,11 @@ struct blk_mq_tags {
>  	unsigned int nr_tags;
>  	unsigned int nr_reserved_tags;
>  
> +	/**
> +	 * @state: BLK_MQ_T_* flags. Defines the state of the hw
> +	 * queue (active, scheduled to restart).
> +	 */
> +	unsigned long	state;

It isn't unusual for SCSI HBA to see hundreds of LUNs, and this patch
will make .state shared for these LUNs, and read/write concurrently from
IO path on all these queues, and performance should hurt much in this way
given all related helpers are used in hot path.


Thanks,
Ming
diff mbox series

Patch

diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index b3f2ba483992..3678e95ec947 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -211,8 +211,6 @@  static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
 static const char *const hctx_state_name[] = {
 	HCTX_STATE_NAME(STOPPED),
-	HCTX_STATE_NAME(TAG_ACTIVE),
-	HCTX_STATE_NAME(SCHED_RESTART),
 };
 #undef HCTX_STATE_NAME
 
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index ca22afd47b3d..7d98b6513148 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -64,18 +64,18 @@  void blk_mq_sched_assign_ioc(struct request *rq)
  */
 void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
 {
-	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+	if (test_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state))
 		return;
 
-	set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+	set_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state);
 }
 EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
 
 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
 {
-	if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+	if (!test_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state))
 		return;
-	clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+	clear_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state);
 
 	blk_mq_run_hw_queue(hctx, true);
 }
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 126021fc3a11..15174a646468 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -82,7 +82,7 @@  static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
 
 static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
 {
-	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+	return test_bit(BLK_MQ_T_SCHED_RESTART, &hctx->tags->state);
 }
 
 #endif
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 586c9d6e904a..a60e1b4a8158 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -23,8 +23,8 @@ 
  */
 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
-	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
-	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+	if (!test_bit(BLK_MQ_T_ACTIVE, &hctx->tags->state) &&
+	    !test_and_set_bit(BLK_MQ_T_ACTIVE, &hctx->tags->state))
 		atomic_inc(&hctx->tags->active_queues);
 
 	return true;
@@ -48,7 +48,7 @@  void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 {
 	struct blk_mq_tags *tags = hctx->tags;
 
-	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+	if (!test_and_clear_bit(BLK_MQ_T_ACTIVE, &hctx->tags->state))
 		return;
 
 	atomic_dec(&tags->active_queues);
@@ -67,7 +67,7 @@  static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 
 	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
 		return true;
-	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+	if (!test_bit(BLK_MQ_T_ACTIVE, &hctx->tags->state))
 		return true;
 
 	/*
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index d0c10d043891..f75fa936b090 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -4,6 +4,11 @@ 
 
 #include "blk-mq.h"
 
+enum {
+	BLK_MQ_T_ACTIVE		= 1,
+	BLK_MQ_T_SCHED_RESTART	= 2,
+};
+
 /*
  * Tag address space map.
  */
@@ -11,6 +16,11 @@  struct blk_mq_tags {
 	unsigned int nr_tags;
 	unsigned int nr_reserved_tags;
 
+	/**
+	 * @state: BLK_MQ_T_* flags. Defines the state of the hw
+	 * queue (active, scheduled to restart).
+	 */
+	unsigned long	state;
 	atomic_t active_queues;
 
 	struct sbitmap_queue bitmap_tags;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 11cfd6470b1a..522631d108af 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -394,8 +394,6 @@  enum {
 	BLK_MQ_F_ALLOC_POLICY_BITS = 1,
 
 	BLK_MQ_S_STOPPED	= 0,
-	BLK_MQ_S_TAG_ACTIVE	= 1,
-	BLK_MQ_S_SCHED_RESTART	= 2,
 
 	BLK_MQ_MAX_DEPTH	= 10240,