diff mbox series

[V6,3/3] blk-mq: don't deactivate hctx if managed irq isn't used

Message ID 20210722095246.1240526-4-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: fix blk_mq_alloc_request_hctx | expand

Commit Message

Ming Lei July 22, 2021, 9:52 a.m. UTC
blk-mq deactivates one hctx when the last CPU in hctx->cpumask become
offline by draining all requests originated from this hctx and moving new
allocation to other active hctx. This way is for avoiding inflight IO in
case of managed irq because managed irq is shutdown when the last CPU in
the irq's affinity becomes offline.

However, lots of drivers(nvme fc, rdma, tcp, loop, ...) don't use managed
irq, so they needn't to deactivate hctx when the last CPU becomes offline.
Also, some of them are the only user of blk_mq_alloc_request_hctx() which
is used for connecting io queue. And their requirement is that the connect
request needs to be submitted successfully via one specified hctx even though
all CPUs in this hctx->cpumask have become offline.

Addressing the requirement for nvme fc/rdma/loop by allowing to
allocate request from one hctx when all CPUs in this hctx are offline,
since these drivers don't use managed irq.

Finally don't deactivate one hctx when it doesn't use managed irq.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-mq.c | 27 +++++++++++++++++----------
 block/blk-mq.h |  8 ++++++++
 2 files changed, 25 insertions(+), 10 deletions(-)

Comments

John Garry Aug. 18, 2021, 9:38 a.m. UTC | #1
On 22/07/2021 10:52, Ming Lei wrote:
> blk-mq deactivates one hctx when the last CPU in hctx->cpumask become
> offline by draining all requests originated from this hctx and moving new
> allocation to other active hctx. This way is for avoiding inflight IO in
> case of managed irq because managed irq is shutdown when the last CPU in
> the irq's affinity becomes offline.
> 
> However, lots of drivers(nvme fc, rdma, tcp, loop, ...) don't use managed
> irq, so they needn't to deactivate hctx when the last CPU becomes offline.
> Also, some of them are the only user of blk_mq_alloc_request_hctx() which
> is used for connecting io queue. And their requirement is that the connect
> request needs to be submitted successfully via one specified hctx even though
> all CPUs in this hctx->cpumask have become offline.
> 
> Addressing the requirement for nvme fc/rdma/loop by allowing to
> allocate request from one hctx when all CPUs in this hctx are offline,
> since these drivers don't use managed irq.
> 
> Finally don't deactivate one hctx when it doesn't use managed irq.
> 
> Reviewed-by: Christoph Hellwig<hch@lst.de>
> Signed-off-by: Ming Lei<ming.lei@redhat.com>

Sorry for lateness. Just a few minor comments below, regardless:

Reviewed-by: John Garry <john.garry@huawei.com>

> ---
>   block/blk-mq.c | 27 +++++++++++++++++----------
>   block/blk-mq.h |  8 ++++++++
>   2 files changed, 25 insertions(+), 10 deletions(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 2c4ac51e54eb..591ab07c64d8 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -427,6 +427,15 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
>   }
>   EXPORT_SYMBOL(blk_mq_alloc_request);
>   
> +static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)

I don't see why it needs to be inline. I think generally we leave it to 
the compiler to decide.

> +{
> +	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
> +
> +	if (cpu >= nr_cpu_ids)
> +		cpu = cpumask_first(hctx->cpumask);
> +	return cpu;
> +}
> +
>   struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
>   	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
>   {
> @@ -468,7 +477,10 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
>   	data.hctx = q->queue_hw_ctx[hctx_idx];
>   	if (!blk_mq_hw_queue_mapped(data.hctx))
>   		goto out_queue_exit;
> -	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
> +
> +	WARN_ON_ONCE(blk_mq_hctx_use_managed_irq(data.hctx));
> +
> +	cpu = blk_mq_first_mapped_cpu(data.hctx);
>   	data.ctx = __blk_mq_get_ctx(q, cpu);
>   
>   	if (!q->elevator)
> @@ -1501,15 +1513,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
>   	hctx_unlock(hctx, srcu_idx);
>   }
>   
> -static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
> -{
> -	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
> -
> -	if (cpu >= nr_cpu_ids)
> -		cpu = cpumask_first(hctx->cpumask);
> -	return cpu;
> -}
> -
>   /*
>    * It'd be great if the workqueue API had a way to pass
>    * in a mask and had some smarts for more clever placement.
> @@ -2556,6 +2559,10 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
>   	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
>   			struct blk_mq_hw_ctx, cpuhp_online);
>   
> +	/* hctx needn't to be deactivated in case managed irq isn't used */
> +	if (!blk_mq_hctx_use_managed_irq(hctx))
> +		return 0;

In a previous version (v2) I think that we just didn't register the cpu 
hotplug handlers, so not sure why that changed.

> +
>   	if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
>   	    !blk_mq_last_cpu_in_hctx(cpu, hctx))
>   		return 0;
> diff --git a/block/blk-mq.h b/block/blk-mq.h
> index d08779f77a26..7333b659d8f5 100644
> --- a/block/blk-mq.h
> +++ b/block/blk-mq.h
> @@ -119,6 +119,14 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
>   	return ctx->hctxs[type];
>   }
>   
> +static inline bool blk_mq_hctx_use_managed_irq(struct blk_mq_hw_ctx *hctx)
> +{
> +	if (hctx->type == HCTX_TYPE_POLL)
> +		return false;
> +
> +	return hctx->queue->tag_set->map[hctx->type].use_managed_irq;
> +}

This function only seems to be used in blk-mq.c, so not sure why you put 
it in .h file.
Ming Lei Aug. 18, 2021, 10:32 a.m. UTC | #2
On Wed, Aug 18, 2021 at 10:38:15AM +0100, John Garry wrote:
> On 22/07/2021 10:52, Ming Lei wrote:
> > blk-mq deactivates one hctx when the last CPU in hctx->cpumask become
> > offline by draining all requests originated from this hctx and moving new
> > allocation to other active hctx. This way is for avoiding inflight IO in
> > case of managed irq because managed irq is shutdown when the last CPU in
> > the irq's affinity becomes offline.
> > 
> > However, lots of drivers(nvme fc, rdma, tcp, loop, ...) don't use managed
> > irq, so they needn't to deactivate hctx when the last CPU becomes offline.
> > Also, some of them are the only user of blk_mq_alloc_request_hctx() which
> > is used for connecting io queue. And their requirement is that the connect
> > request needs to be submitted successfully via one specified hctx even though
> > all CPUs in this hctx->cpumask have become offline.
> > 
> > Addressing the requirement for nvme fc/rdma/loop by allowing to
> > allocate request from one hctx when all CPUs in this hctx are offline,
> > since these drivers don't use managed irq.
> > 
> > Finally don't deactivate one hctx when it doesn't use managed irq.
> > 
> > Reviewed-by: Christoph Hellwig<hch@lst.de>
> > Signed-off-by: Ming Lei<ming.lei@redhat.com>
> 
> Sorry for lateness. Just a few minor comments below, regardless:
> 
> Reviewed-by: John Garry <john.garry@huawei.com>
> 
> > ---
> >   block/blk-mq.c | 27 +++++++++++++++++----------
> >   block/blk-mq.h |  8 ++++++++
> >   2 files changed, 25 insertions(+), 10 deletions(-)
> > 
> > diff --git a/block/blk-mq.c b/block/blk-mq.c
> > index 2c4ac51e54eb..591ab07c64d8 100644
> > --- a/block/blk-mq.c
> > +++ b/block/blk-mq.c
> > @@ -427,6 +427,15 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
> >   }
> >   EXPORT_SYMBOL(blk_mq_alloc_request);
> > +static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
> 
> I don't see why it needs to be inline. I think generally we leave it to the
> compiler to decide.

Usually if one function is called more than two times, it will not be
inlined. blk_mq_first_mapped_cpu() is called in fast path, so we need to
inline it.

> 
> > +{
> > +	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
> > +
> > +	if (cpu >= nr_cpu_ids)
> > +		cpu = cpumask_first(hctx->cpumask);
> > +	return cpu;
> > +}
> > +
> >   struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
> >   	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
> >   {
> > @@ -468,7 +477,10 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
> >   	data.hctx = q->queue_hw_ctx[hctx_idx];
> >   	if (!blk_mq_hw_queue_mapped(data.hctx))
> >   		goto out_queue_exit;
> > -	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
> > +
> > +	WARN_ON_ONCE(blk_mq_hctx_use_managed_irq(data.hctx));
> > +
> > +	cpu = blk_mq_first_mapped_cpu(data.hctx);
> >   	data.ctx = __blk_mq_get_ctx(q, cpu);
> >   	if (!q->elevator)
> > @@ -1501,15 +1513,6 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
> >   	hctx_unlock(hctx, srcu_idx);
> >   }
> > -static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
> > -{
> > -	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
> > -
> > -	if (cpu >= nr_cpu_ids)
> > -		cpu = cpumask_first(hctx->cpumask);
> > -	return cpu;
> > -}
> > -
> >   /*
> >    * It'd be great if the workqueue API had a way to pass
> >    * in a mask and had some smarts for more clever placement.
> > @@ -2556,6 +2559,10 @@ static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
> >   	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
> >   			struct blk_mq_hw_ctx, cpuhp_online);
> > +	/* hctx needn't to be deactivated in case managed irq isn't used */
> > +	if (!blk_mq_hctx_use_managed_irq(hctx))
> > +		return 0;
> 
> In a previous version (v2) I think that we just didn't register the cpu
> hotplug handlers, so not sure why that changed.

Sorry, forget to mention the change, inside blk_mq_realloc_hw_ctxs()
we may grab one cached hctx without initializing it, so use managed irq
info change may not be visible in blk_mq_init_hctx().

Given blk_mq_hctx_notify_offline() isn't in fast path, I'd rather to
check in blk_mq_hctx_notify_offline(), which is more reliable than
running the check in blk_mq_init_hctx().

> 
> > +
> >   	if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
> >   	    !blk_mq_last_cpu_in_hctx(cpu, hctx))
> >   		return 0;
> > diff --git a/block/blk-mq.h b/block/blk-mq.h
> > index d08779f77a26..7333b659d8f5 100644
> > --- a/block/blk-mq.h
> > +++ b/block/blk-mq.h
> > @@ -119,6 +119,14 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
> >   	return ctx->hctxs[type];
> >   }
> > +static inline bool blk_mq_hctx_use_managed_irq(struct blk_mq_hw_ctx *hctx)
> > +{
> > +	if (hctx->type == HCTX_TYPE_POLL)
> > +		return false;
> > +
> > +	return hctx->queue->tag_set->map[hctx->type].use_managed_irq;
> > +}
> 
> This function only seems to be used in blk-mq.c, so not sure why you put it
> in .h file.

oops, the helper must be used in early version, I will send a new
version soon.


Thanks,
Ming
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2c4ac51e54eb..591ab07c64d8 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -427,6 +427,15 @@  struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
 }
 EXPORT_SYMBOL(blk_mq_alloc_request);
 
+static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
+{
+	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
+
+	if (cpu >= nr_cpu_ids)
+		cpu = cpumask_first(hctx->cpumask);
+	return cpu;
+}
+
 struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 	unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
 {
@@ -468,7 +477,10 @@  struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
 	data.hctx = q->queue_hw_ctx[hctx_idx];
 	if (!blk_mq_hw_queue_mapped(data.hctx))
 		goto out_queue_exit;
-	cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
+
+	WARN_ON_ONCE(blk_mq_hctx_use_managed_irq(data.hctx));
+
+	cpu = blk_mq_first_mapped_cpu(data.hctx);
 	data.ctx = __blk_mq_get_ctx(q, cpu);
 
 	if (!q->elevator)
@@ -1501,15 +1513,6 @@  static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
 	hctx_unlock(hctx, srcu_idx);
 }
 
-static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
-{
-	int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask);
-
-	if (cpu >= nr_cpu_ids)
-		cpu = cpumask_first(hctx->cpumask);
-	return cpu;
-}
-
 /*
  * It'd be great if the workqueue API had a way to pass
  * in a mask and had some smarts for more clever placement.
@@ -2556,6 +2559,10 @@  static int blk_mq_hctx_notify_offline(unsigned int cpu, struct hlist_node *node)
 	struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node,
 			struct blk_mq_hw_ctx, cpuhp_online);
 
+	/* hctx needn't to be deactivated in case managed irq isn't used */
+	if (!blk_mq_hctx_use_managed_irq(hctx))
+		return 0;
+
 	if (!cpumask_test_cpu(cpu, hctx->cpumask) ||
 	    !blk_mq_last_cpu_in_hctx(cpu, hctx))
 		return 0;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index d08779f77a26..7333b659d8f5 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -119,6 +119,14 @@  static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
 	return ctx->hctxs[type];
 }
 
+static inline bool blk_mq_hctx_use_managed_irq(struct blk_mq_hw_ctx *hctx)
+{
+	if (hctx->type == HCTX_TYPE_POLL)
+		return false;
+
+	return hctx->queue->tag_set->map[hctx->type].use_managed_irq;
+}
+
 /*
  * sysfs helpers
  */