diff mbox series

[V2,5/5] blk-mq: Wait for for hctx inflight requests on CPU unplug

Message ID 20190527150207.11372-6-ming.lei@redhat.com (mailing list archive)
State Not Applicable
Headers show
Series blk-mq: Wait for for hctx inflight requests on CPU unplug | expand

Commit Message

Ming Lei May 27, 2019, 3:02 p.m. UTC
Managed interrupts can not migrate affinity when their CPUs are offline.
If the CPU is allowed to shutdown before they're returned, commands
dispatched to managed queues won't be able to complete through their
irq handlers.

Wait in cpu hotplug handler until all inflight requests on the tags
are completed or timeout. Wait once for each tags, so we can save time
in case of shared tags.

Based on the following patch from Keith, and use simple delay-spin
instead.

https://lore.kernel.org/linux-block/20190405215920.27085-1-keith.busch@intel.com/

Some SCSI devices may have single blk_mq hw queue and multiple private
completion queues, and wait until all requests on the private completion
queue are completed.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-mq-tag.c |  2 +-
 block/blk-mq-tag.h |  5 +++
 block/blk-mq.c     | 94 ++++++++++++++++++++++++++++++++++++++++++----
 3 files changed, 93 insertions(+), 8 deletions(-)

Comments

John Garry May 28, 2019, 4:50 p.m. UTC | #1
On 27/05/2019 16:02, Ming Lei wrote:
> Managed interrupts can not migrate affinity when their CPUs are offline.
> If the CPU is allowed to shutdown before they're returned, commands
> dispatched to managed queues won't be able to complete through their
> irq handlers.
>
> Wait in cpu hotplug handler until all inflight requests on the tags
> are completed or timeout. Wait once for each tags, so we can save time
> in case of shared tags.
>
> Based on the following patch from Keith, and use simple delay-spin
> instead.
>
> https://lore.kernel.org/linux-block/20190405215920.27085-1-keith.busch@intel.com/
>
> Some SCSI devices may have single blk_mq hw queue and multiple private
> completion queues, and wait until all requests on the private completion
> queue are completed.

Hi Ming,

I'm a bit concerned that this approach won't work due to ordering: it 
seems that the IRQ would be shutdown prior to the CPU dead notification 
for the last CPU in the mask (where we attempt to drain the queue 
associated with the IRQ, which would require the IRQ to be still enabled).

I hope that you can tell me that I'm wrong...

Thanks,
John

>
> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> ---
>  block/blk-mq-tag.c |  2 +-
>  block/blk-mq-tag.h |  5 +++
>  block/blk-mq.c     | 94 ++++++++++++++++++++++++++++++++++++++++++----
>  3 files changed, 93 insertions(+), 8 deletions(-)
>
> diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
> index 7513c8eaabee..b24334f99c5d 100644
> --- a/block/blk-mq-tag.c
> +++ b/block/blk-mq-tag.c
> @@ -332,7 +332,7 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
>   *		true to continue iterating tags, false to stop.
>   * @priv:	Will be passed as second argument to @fn.
>   */
> -static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
> +void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
>  		busy_tag_iter_fn *fn, void *priv)
>  {
>  	if (tags->nr_reserved_tags)
> diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
> index 61deab0b5a5a..9ce7606a87f0 100644
> --- a/block/blk-mq-tag.h
> +++ b/block/blk-mq-tag.h
> @@ -19,6 +19,9 @@ struct blk_mq_tags {
>  	struct request **rqs;
>  	struct request **static_rqs;
>  	struct list_head page_list;
> +
> +#define BLK_MQ_TAGS_DRAINED           0
> +	unsigned long flags;
>  };
>
>
> @@ -35,6 +38,8 @@ extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
>  extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
>  void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
>  		void *priv);
> +void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
> +		busy_tag_iter_fn *fn, void *priv);
>
>  static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
>  						 struct blk_mq_hw_ctx *hctx)
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 32b8ad3d341b..ab1fbfd48374 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -2215,6 +2215,65 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
>  	return -ENOMEM;
>  }
>
> +static int blk_mq_hctx_notify_prepare(unsigned int cpu, struct hlist_node *node)
> +{
> +	struct blk_mq_hw_ctx	*hctx =
> +		hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
> +
> +	if (hctx->tags)
> +		clear_bit(BLK_MQ_TAGS_DRAINED, &hctx->tags->flags);
> +
> +	return 0;
> +}
> +
> +struct blk_mq_inflight_rq_data {
> +	unsigned cnt;
> +	const struct cpumask *cpumask;
> +};
> +
> +static bool blk_mq_count_inflight_rq(struct request *rq, void *data,
> +				     bool reserved)
> +{
> +	struct blk_mq_inflight_rq_data *count = data;
> +
> +	if ((blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) &&
> +			cpumask_test_cpu(blk_mq_rq_cpu(rq), count->cpumask))
> +		count->cnt++;
> +
> +	return true;
> +}
> +
> +unsigned blk_mq_tags_inflight_rqs(struct blk_mq_tags *tags,
> +		const struct cpumask *completion_cpus)
> +{
> +	struct blk_mq_inflight_rq_data data = {
> +		.cnt = 0,
> +		.cpumask = completion_cpus,
> +	};
> +
> +	blk_mq_all_tag_busy_iter(tags, blk_mq_count_inflight_rq, &data);
> +
> +	return data.cnt;
> +}
> +
> +static void blk_mq_drain_inflight_rqs(struct blk_mq_tags *tags,
> +		const struct cpumask *completion_cpus)
> +{
> +	if (!tags)
> +		return;
> +
> +	/* Can't apply the optimization in case of private completion queues */
> +	if (completion_cpus == cpu_all_mask &&
> +			test_and_set_bit(BLK_MQ_TAGS_DRAINED, &tags->flags))
> +		return;
> +
> +	while (1) {
> +		if (!blk_mq_tags_inflight_rqs(tags, completion_cpus))
> +			break;
> +		msleep(5);
> +	}
> +}
> +
>  /*
>   * 'cpu' is going away. splice any existing rq_list entries from this
>   * software queue to the hw queue dispatch list, and ensure that it
> @@ -2226,6 +2285,8 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
>  	struct blk_mq_ctx *ctx;
>  	LIST_HEAD(tmp);
>  	enum hctx_type type;
> +	struct request_queue *q;
> +	const struct cpumask *cpumask = NULL, *completion_cpus;
>
>  	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
>  	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
> @@ -2238,14 +2299,32 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
>  	}
>  	spin_unlock(&ctx->lock);
>
> -	if (list_empty(&tmp))
> -		return 0;
> +	if (!list_empty(&tmp)) {
> +		spin_lock(&hctx->lock);
> +		list_splice_tail_init(&tmp, &hctx->dispatch);
> +		spin_unlock(&hctx->lock);
>
> -	spin_lock(&hctx->lock);
> -	list_splice_tail_init(&tmp, &hctx->dispatch);
> -	spin_unlock(&hctx->lock);
> +		blk_mq_run_hw_queue(hctx, true);
> +	}
> +
> +	/*
> +	 * Interrupt for the current completion queue will be shutdown, so
> +	 * wait until all requests on this queue are completed.
> +	 */
> +	q = hctx->queue;
> +	if (q->mq_ops->complete_queue_affinity)
> +		cpumask = q->mq_ops->complete_queue_affinity(hctx, cpu);
> +
> +	if (!cpumask) {
> +		cpumask = hctx->cpumask;
> +		completion_cpus = cpu_all_mask;
> +	} else {
> +		completion_cpus = cpumask;
> +	}
> +
> +	if (cpumask_first_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
> +		blk_mq_drain_inflight_rqs(hctx->tags, completion_cpus);
>
> -	blk_mq_run_hw_queue(hctx, true);
>  	return 0;
>  }
>
> @@ -3541,7 +3620,8 @@ EXPORT_SYMBOL(blk_mq_rq_cpu);
>
>  static int __init blk_mq_init(void)
>  {
> -	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
> +	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead",
> +				blk_mq_hctx_notify_prepare,
>  				blk_mq_hctx_notify_dead);
>  	return 0;
>  }
>
Ming Lei May 29, 2019, 2:28 a.m. UTC | #2
On Tue, May 28, 2019 at 05:50:40PM +0100, John Garry wrote:
> On 27/05/2019 16:02, Ming Lei wrote:
> > Managed interrupts can not migrate affinity when their CPUs are offline.
> > If the CPU is allowed to shutdown before they're returned, commands
> > dispatched to managed queues won't be able to complete through their
> > irq handlers.
> > 
> > Wait in cpu hotplug handler until all inflight requests on the tags
> > are completed or timeout. Wait once for each tags, so we can save time
> > in case of shared tags.
> > 
> > Based on the following patch from Keith, and use simple delay-spin
> > instead.
> > 
> > https://lore.kernel.org/linux-block/20190405215920.27085-1-keith.busch@intel.com/
> > 
> > Some SCSI devices may have single blk_mq hw queue and multiple private
> > completion queues, and wait until all requests on the private completion
> > queue are completed.
> 
> Hi Ming,
> 
> I'm a bit concerned that this approach won't work due to ordering: it seems
> that the IRQ would be shutdown prior to the CPU dead notification for the

Managed IRQ shutdown is run in irq_migrate_all_off_this_cpu(), which is
called in the callback of takedown_cpu(). And the CPU dead notification
is always sent after that CPU becomes offline, see cpuhp_invoke_callback().

> last CPU in the mask (where we attempt to drain the queue associated with
> the IRQ, which would require the IRQ to be still enabled).
> 
> I hope that you can tell me that I'm wrong...

Or you add one line printk in both irq_migrate_all_off_this_cpu() and
blk_mq_hctx_notify_dead(), you will see if you are wrong.


Thanks, 
Ming
Ming Lei May 29, 2019, 2:42 a.m. UTC | #3
On Wed, May 29, 2019 at 10:28:52AM +0800, Ming Lei wrote:
> On Tue, May 28, 2019 at 05:50:40PM +0100, John Garry wrote:
> > On 27/05/2019 16:02, Ming Lei wrote:
> > > Managed interrupts can not migrate affinity when their CPUs are offline.
> > > If the CPU is allowed to shutdown before they're returned, commands
> > > dispatched to managed queues won't be able to complete through their
> > > irq handlers.
> > > 
> > > Wait in cpu hotplug handler until all inflight requests on the tags
> > > are completed or timeout. Wait once for each tags, so we can save time
> > > in case of shared tags.
> > > 
> > > Based on the following patch from Keith, and use simple delay-spin
> > > instead.
> > > 
> > > https://lore.kernel.org/linux-block/20190405215920.27085-1-keith.busch@intel.com/
> > > 
> > > Some SCSI devices may have single blk_mq hw queue and multiple private
> > > completion queues, and wait until all requests on the private completion
> > > queue are completed.
> > 
> > Hi Ming,
> > 
> > I'm a bit concerned that this approach won't work due to ordering: it seems
> > that the IRQ would be shutdown prior to the CPU dead notification for the
> 
> Managed IRQ shutdown is run in irq_migrate_all_off_this_cpu(), which is
> called in the callback of takedown_cpu(). And the CPU dead notification
> is always sent after that CPU becomes offline, see cpuhp_invoke_callback().

Hammm, looks we both say same thing.

Yeah, it is too late to drain requests in the cpu hotplug DEAD handler,
maybe we can try to move managed IRQ shutdown after sending the dead
notification.

I need to think of it further.

Thanks,
Ming
John Garry May 29, 2019, 9:42 a.m. UTC | #4
On 29/05/2019 03:42, Ming Lei wrote:
> On Wed, May 29, 2019 at 10:28:52AM +0800, Ming Lei wrote:
>> On Tue, May 28, 2019 at 05:50:40PM +0100, John Garry wrote:
>>> On 27/05/2019 16:02, Ming Lei wrote:
>>>> Managed interrupts can not migrate affinity when their CPUs are offline.
>>>> If the CPU is allowed to shutdown before they're returned, commands
>>>> dispatched to managed queues won't be able to complete through their
>>>> irq handlers.
>>>>
>>>> Wait in cpu hotplug handler until all inflight requests on the tags
>>>> are completed or timeout. Wait once for each tags, so we can save time
>>>> in case of shared tags.
>>>>
>>>> Based on the following patch from Keith, and use simple delay-spin
>>>> instead.
>>>>
>>>> https://lore.kernel.org/linux-block/20190405215920.27085-1-keith.busch@intel.com/
>>>>
>>>> Some SCSI devices may have single blk_mq hw queue and multiple private
>>>> completion queues, and wait until all requests on the private completion
>>>> queue are completed.
>>>
>>> Hi Ming,
>>>
>>> I'm a bit concerned that this approach won't work due to ordering: it seems
>>> that the IRQ would be shutdown prior to the CPU dead notification for the
>>
>> Managed IRQ shutdown is run in irq_migrate_all_off_this_cpu(), which is
>> called in the callback of takedown_cpu(). And the CPU dead notification
>> is always sent after that CPU becomes offline, see cpuhp_invoke_callback().
>
> Hammm, looks we both say same thing.
>
> Yeah, it is too late to drain requests in the cpu hotplug DEAD handler,
> maybe we can try to move managed IRQ shutdown after sending the dead
> notification.
>

Even if the IRQ is shutdown later, all CPUs would still be dead, so none 
available to receive the interrupt or do the work for draining the queue.

> I need to think of it further.

It would seem that we just need to be informed of CPU offlining earlier, 
and plug the drain in there.

>

Cheers,
John

> Thanks,
> Ming
>
> .
>
Ming Lei May 29, 2019, 10:10 a.m. UTC | #5
On Wed, May 29, 2019 at 10:42:00AM +0100, John Garry wrote:
> On 29/05/2019 03:42, Ming Lei wrote:
> > On Wed, May 29, 2019 at 10:28:52AM +0800, Ming Lei wrote:
> > > On Tue, May 28, 2019 at 05:50:40PM +0100, John Garry wrote:
> > > > On 27/05/2019 16:02, Ming Lei wrote:
> > > > > Managed interrupts can not migrate affinity when their CPUs are offline.
> > > > > If the CPU is allowed to shutdown before they're returned, commands
> > > > > dispatched to managed queues won't be able to complete through their
> > > > > irq handlers.
> > > > > 
> > > > > Wait in cpu hotplug handler until all inflight requests on the tags
> > > > > are completed or timeout. Wait once for each tags, so we can save time
> > > > > in case of shared tags.
> > > > > 
> > > > > Based on the following patch from Keith, and use simple delay-spin
> > > > > instead.
> > > > > 
> > > > > https://lore.kernel.org/linux-block/20190405215920.27085-1-keith.busch@intel.com/
> > > > > 
> > > > > Some SCSI devices may have single blk_mq hw queue and multiple private
> > > > > completion queues, and wait until all requests on the private completion
> > > > > queue are completed.
> > > > 
> > > > Hi Ming,
> > > > 
> > > > I'm a bit concerned that this approach won't work due to ordering: it seems
> > > > that the IRQ would be shutdown prior to the CPU dead notification for the
> > > 
> > > Managed IRQ shutdown is run in irq_migrate_all_off_this_cpu(), which is
> > > called in the callback of takedown_cpu(). And the CPU dead notification
> > > is always sent after that CPU becomes offline, see cpuhp_invoke_callback().
> > 
> > Hammm, looks we both say same thing.
> > 
> > Yeah, it is too late to drain requests in the cpu hotplug DEAD handler,
> > maybe we can try to move managed IRQ shutdown after sending the dead
> > notification.
> > 
> 
> Even if the IRQ is shutdown later, all CPUs would still be dead, so none
> available to receive the interrupt or do the work for draining the queue.
> 
> > I need to think of it further.
> 
> It would seem that we just need to be informed of CPU offlining earlier, and
> plug the drain in there.

Yes, looks blk-mq has to be notified before unplugging CPU for this
issue.

And we should be careful to handle the multiple reply queue case, given the queue
shouldn't be stopped or quieseced because other reply queues are still active.

The new CPUHP state for blk-mq should be invoked after the to-be-offline
CPU is quiesced and before it becomes offline.

Thanks,
Ming
Ming Lei May 29, 2019, 3:33 p.m. UTC | #6
On Wed, May 29, 2019 at 6:11 PM Ming Lei <ming.lei@redhat.com> wrote:
>
> On Wed, May 29, 2019 at 10:42:00AM +0100, John Garry wrote:
> > On 29/05/2019 03:42, Ming Lei wrote:
> > > On Wed, May 29, 2019 at 10:28:52AM +0800, Ming Lei wrote:
> > > > On Tue, May 28, 2019 at 05:50:40PM +0100, John Garry wrote:
> > > > > On 27/05/2019 16:02, Ming Lei wrote:
> > > > > > Managed interrupts can not migrate affinity when their CPUs are offline.
> > > > > > If the CPU is allowed to shutdown before they're returned, commands
> > > > > > dispatched to managed queues won't be able to complete through their
> > > > > > irq handlers.
> > > > > >
> > > > > > Wait in cpu hotplug handler until all inflight requests on the tags
> > > > > > are completed or timeout. Wait once for each tags, so we can save time
> > > > > > in case of shared tags.
> > > > > >
> > > > > > Based on the following patch from Keith, and use simple delay-spin
> > > > > > instead.
> > > > > >
> > > > > > https://lore.kernel.org/linux-block/20190405215920.27085-1-keith.busch@intel.com/
> > > > > >
> > > > > > Some SCSI devices may have single blk_mq hw queue and multiple private
> > > > > > completion queues, and wait until all requests on the private completion
> > > > > > queue are completed.
> > > > >
> > > > > Hi Ming,
> > > > >
> > > > > I'm a bit concerned that this approach won't work due to ordering: it seems
> > > > > that the IRQ would be shutdown prior to the CPU dead notification for the
> > > >
> > > > Managed IRQ shutdown is run in irq_migrate_all_off_this_cpu(), which is
> > > > called in the callback of takedown_cpu(). And the CPU dead notification
> > > > is always sent after that CPU becomes offline, see cpuhp_invoke_callback().
> > >
> > > Hammm, looks we both say same thing.
> > >
> > > Yeah, it is too late to drain requests in the cpu hotplug DEAD handler,
> > > maybe we can try to move managed IRQ shutdown after sending the dead
> > > notification.
> > >
> >
> > Even if the IRQ is shutdown later, all CPUs would still be dead, so none
> > available to receive the interrupt or do the work for draining the queue.
> >
> > > I need to think of it further.
> >
> > It would seem that we just need to be informed of CPU offlining earlier, and
> > plug the drain in there.
>
> Yes, looks blk-mq has to be notified before unplugging CPU for this
> issue.
>
> And we should be careful to handle the multiple reply queue case, given the queue
> shouldn't be stopped or quieseced because other reply queues are still active.
>
> The new CPUHP state for blk-mq should be invoked after the to-be-offline
> CPU is quiesced and before it becomes offline.

Hi John,

Thinking of this issue further, so far, one doable solution is to
expose reply queues
as blk-mq hw queues, as done by the following patchset:

https://lore.kernel.org/linux-block/20180205152035.15016-1-ming.lei@redhat.com/

In which global host-wide tags are shared for all blk-mq hw queues.

Also we can remove all the reply_map stuff in drivers, then solve the problem of
draining in-flight requests during unplugging CPU in a generic approach.

Last time, it was reported that the patchset causes performance regression,
which is actually caused by duplicated io accounting in
blk_mq_queue_tag_busy_iter(),
which should be fixed easily.

What do you think of this approach?

Thanks,
Ming Lei
John Garry May 29, 2019, 4:10 p.m. UTC | #7
>>
>> And we should be careful to handle the multiple reply queue case, given the queue
>> shouldn't be stopped or quieseced because other reply queues are still active.
>>
>> The new CPUHP state for blk-mq should be invoked after the to-be-offline
>> CPU is quiesced and before it becomes offline.
>
> Hi John,
>

Hi Ming,

> Thinking of this issue further, so far, one doable solution is to
> expose reply queues
> as blk-mq hw queues, as done by the following patchset:
>
> https://lore.kernel.org/linux-block/20180205152035.15016-1-ming.lei@redhat.com/

I thought that this patchset had fundamental issues, in terms of working 
for all types of hosts. FYI, I did the backport of latest hisi_sas_v3 to 
v4.15 with this patchset (as you may have noticed in my git send 
mistake), but we have not got to test it yet.

On a related topic, we did test exposing reply queues as blk-mq hw 
queues and generating the host-wide tag internally in the LLDD with 
sbitmap, and unfortunately we were experiencing a significant 
performance hit, like 2300K -> 1800K IOPs for 4K read.

We need to test this further. I don't understand why we get such a big hit.

>
> In which global host-wide tags are shared for all blk-mq hw queues.
>
> Also we can remove all the reply_map stuff in drivers, then solve the problem of
> draining in-flight requests during unplugging CPU in a generic approach.

So you're saying that removing this reply queue stuff can make the 
solution to the problem more generic, but do you have an idea of the 
overall solution?

>
> Last time, it was reported that the patchset causes performance regression,
> which is actually caused by duplicated io accounting in
> blk_mq_queue_tag_busy_iter(),
> which should be fixed easily.
>
> What do you think of this approach?

It would still be good to have a forward port of this patchset for 
testing, if we're serious about it. Or at least this bug you mention fixed.

thanks again,
John

>
> Thanks,
> Ming Lei
>
> .
>
Ming Lei May 30, 2019, 2:28 a.m. UTC | #8
On Wed, May 29, 2019 at 05:10:38PM +0100, John Garry wrote:
> 
> > > 
> > > And we should be careful to handle the multiple reply queue case, given the queue
> > > shouldn't be stopped or quieseced because other reply queues are still active.
> > > 
> > > The new CPUHP state for blk-mq should be invoked after the to-be-offline
> > > CPU is quiesced and before it becomes offline.
> > 
> > Hi John,
> > 
> 
> Hi Ming,
> 
> > Thinking of this issue further, so far, one doable solution is to
> > expose reply queues
> > as blk-mq hw queues, as done by the following patchset:
> > 
> > https://lore.kernel.org/linux-block/20180205152035.15016-1-ming.lei@redhat.com/
> 
> I thought that this patchset had fundamental issues, in terms of working for
> all types of hosts. FYI, I did the backport of latest hisi_sas_v3 to v4.15

Could you explain it a bit about the fundamental issues for all types of
host?

It is just for hosts with multiple reply queues, such as hisi_sas v3,
megaraid_sas, mpt3sas and hpsa.

> with this patchset (as you may have noticed in my git send mistake), but we
> have not got to test it yet.
> 
> On a related topic, we did test exposing reply queues as blk-mq hw queues
> and generating the host-wide tag internally in the LLDD with sbitmap, and
> unfortunately we were experiencing a significant performance hit, like 2300K
> -> 1800K IOPs for 4K read.
> 
> We need to test this further. I don't understand why we get such a big hit.

The performance regression shouldn't have been introduced in theory, and it is
because blk_mq_queue_tag_busy_iter() iterates over the same duplicated tags multiple
times, which can be fixed easily.  

> 
> > 
> > In which global host-wide tags are shared for all blk-mq hw queues.
> > 
> > Also we can remove all the reply_map stuff in drivers, then solve the problem of
> > draining in-flight requests during unplugging CPU in a generic approach.
> 
> So you're saying that removing this reply queue stuff can make the solution
> to the problem more generic, but do you have an idea of the overall
> solution?

1) convert reply queue into blk-mq hw queue first

2) then all drivers are in same position wrt. handling requests vs.
unplugging CPU (shutdown managed IRQ)

The current handling in blk_mq_hctx_notify_dead() is actually wrong,
at that time, all CPUs on the hctx are dead, blk_mq_run_hw_queue()
still dispatches requests on driver's hw queue, and driver is invisible
to DEAD CPUs mapped to this hctx, and finally interrupt for these
requests on the hctx are lost.

Frankly speaking, the above 2nd problem is still hard to solve.

1) take_cpu_down() shutdown managed IRQ first, then run teardown callback
for states in [CPUHP_AP_ONLINE, CPUHP_AP_OFFLINE) on the to-be-offline
CPU

2) However, all runnable tasks are removed from the CPU in the teardown
callback for CPUHP_AP_SCHED_STARTING, which is run after managed IRQs
are shutdown. That said it is hard to avoid new request queued to
the hctx with all DEAD CPUs.

3) we don't support to freeze queue for specific hctx yet, or that way
may not be accepted because of extra cost in fast path

4) once request is allocated, it should be submitted to driver no matter
if CPU hotplug happens or not. Or free it and re-allocate new request
on proper sw/hw queue?

> 
> > 
> > Last time, it was reported that the patchset causes performance regression,
> > which is actually caused by duplicated io accounting in
> > blk_mq_queue_tag_busy_iter(),
> > which should be fixed easily.
> > 
> > What do you think of this approach?
> 
> It would still be good to have a forward port of this patchset for testing,
> if we're serious about it. Or at least this bug you mention fixed.

I plan to make this patchset workable on 5.2-rc for your test first.


Thanks,
Ming
Ming Lei May 30, 2019, 4:11 a.m. UTC | #9
On Thu, May 30, 2019 at 10:28 AM Ming Lei <ming.lei@redhat.com> wrote:
>
> On Wed, May 29, 2019 at 05:10:38PM +0100, John Garry wrote:
> >
> > > >
> > > > And we should be careful to handle the multiple reply queue case, given the queue
> > > > shouldn't be stopped or quieseced because other reply queues are still active.
> > > >
> > > > The new CPUHP state for blk-mq should be invoked after the to-be-offline
> > > > CPU is quiesced and before it becomes offline.
> > >
> > > Hi John,
> > >
> >
> > Hi Ming,
> >
> > > Thinking of this issue further, so far, one doable solution is to
> > > expose reply queues
> > > as blk-mq hw queues, as done by the following patchset:
> > >
> > > https://lore.kernel.org/linux-block/20180205152035.15016-1-ming.lei@redhat.com/
> >
> > I thought that this patchset had fundamental issues, in terms of working for
> > all types of hosts. FYI, I did the backport of latest hisi_sas_v3 to v4.15
>
> Could you explain it a bit about the fundamental issues for all types of
> host?
>
> It is just for hosts with multiple reply queues, such as hisi_sas v3,
> megaraid_sas, mpt3sas and hpsa.
>
> > with this patchset (as you may have noticed in my git send mistake), but we
> > have not got to test it yet.
> >
> > On a related topic, we did test exposing reply queues as blk-mq hw queues
> > and generating the host-wide tag internally in the LLDD with sbitmap, and
> > unfortunately we were experiencing a significant performance hit, like 2300K
> > -> 1800K IOPs for 4K read.
> >
> > We need to test this further. I don't understand why we get such a big hit.
>
> The performance regression shouldn't have been introduced in theory, and it is
> because blk_mq_queue_tag_busy_iter() iterates over the same duplicated tags multiple
> times, which can be fixed easily.
>
> >
> > >
> > > In which global host-wide tags are shared for all blk-mq hw queues.
> > >
> > > Also we can remove all the reply_map stuff in drivers, then solve the problem of
> > > draining in-flight requests during unplugging CPU in a generic approach.
> >
> > So you're saying that removing this reply queue stuff can make the solution
> > to the problem more generic, but do you have an idea of the overall
> > solution?
>
> 1) convert reply queue into blk-mq hw queue first
>
> 2) then all drivers are in same position wrt. handling requests vs.
> unplugging CPU (shutdown managed IRQ)
>
> The current handling in blk_mq_hctx_notify_dead() is actually wrong,
> at that time, all CPUs on the hctx are dead, blk_mq_run_hw_queue()
> still dispatches requests on driver's hw queue, and driver is invisible
> to DEAD CPUs mapped to this hctx, and finally interrupt for these
> requests on the hctx are lost.
>
> Frankly speaking, the above 2nd problem is still hard to solve.
>
> 1) take_cpu_down() shutdown managed IRQ first, then run teardown callback
> for states in [CPUHP_AP_ONLINE, CPUHP_AP_OFFLINE) on the to-be-offline
> CPU
>
> 2) However, all runnable tasks are removed from the CPU in the teardown
> callback for CPUHP_AP_SCHED_STARTING, which is run after managed IRQs
> are shutdown. That said it is hard to avoid new request queued to
> the hctx with all DEAD CPUs.
>
> 3) we don't support to freeze queue for specific hctx yet, or that way
> may not be accepted because of extra cost in fast path
>
> 4) once request is allocated, it should be submitted to driver no matter
> if CPU hotplug happens or not. Or free it and re-allocate new request
> on proper sw/hw queue?

That looks doable, we may steal bios from the old in-queue request, then
re-submit them via generic_make_request(), and finally free the old request,
but RQF_DONTPREP has to be addressed via one new callback.

So follows the overall solution for waiting request vs. CPU hotplug,
which is done
in two stages:

1) in the teardown callback of new  CPUHP state of CPUHP_BLK_MQ_PREP,
which is run before CPUHP_AP_ONLINE_IDLE,  at that time the CPU & managed
IRQ is still alive:

- stopped the hctx
- wait in-flight requests from this hctx until all are completed

2) in the teardown callback of CPUHP_BLK_MQ_DEAD, which is run
after the CPU is dead

- dequeue request queued in sw queue or scheduler queue from this hctx
- steal bios from the dequeued request, and re-submit them via
generic_make_request()
- free the dequeued request, and need to free driver resource via new
callback for
RQF_DONTPREP, looks only SCSI needs it.
- restart this hctx


Thanks,
Ming Lei
John Garry May 30, 2019, 9:31 a.m. UTC | #10
Hi Ming,

>>
>>> Thinking of this issue further, so far, one doable solution is to
>>> expose reply queues
>>> as blk-mq hw queues, as done by the following patchset:
>>>
>>> https://lore.kernel.org/linux-block/20180205152035.15016-1-ming.lei@redhat.com/
>>
>> I thought that this patchset had fundamental issues, in terms of working for
>> all types of hosts. FYI, I did the backport of latest hisi_sas_v3 to v4.15
>
> Could you explain it a bit about the fundamental issues for all types of
> host?
>

*As I understand*, splitting the tagset has issues with dual-mode HBAs - 
as in supporting NVMe and SCSI, as some HBAs do.

> It is just for hosts with multiple reply queues, such as hisi_sas v3,
> megaraid_sas, mpt3sas and hpsa.
>
>> with this patchset (as you may have noticed in my git send mistake), but we
>> have not got to test it yet.
>>
>> On a related topic, we did test exposing reply queues as blk-mq hw queues
>> and generating the host-wide tag internally in the LLDD with sbitmap, and
>> unfortunately we were experiencing a significant performance hit, like 2300K
>> -> 1800K IOPs for 4K read.
>>
>> We need to test this further. I don't understand why we get such a big hit.
>
> The performance regression shouldn't have been introduced in theory, and it is
> because blk_mq_queue_tag_busy_iter() iterates over the same duplicated tags multiple
> times, which can be fixed easily.
>

We are testing further, and I will tentatively say that we're getting 
better results (than previously) after fixing something in the LLDD. TBC.

>>
>>>
>>> In which global host-wide tags are shared for all blk-mq hw queues.
>>>
>>> Also we can remove all the reply_map stuff in drivers, then solve the problem of
>>> draining in-flight requests during unplugging CPU in a generic approach.
>>
>> So you're saying that removing this reply queue stuff can make the solution
>> to the problem more generic, but do you have an idea of the overall
>> solution?
>
> 1) convert reply queue into blk-mq hw queue first
>
> 2) then all drivers are in same position wrt. handling requests vs.
> unplugging CPU (shutdown managed IRQ)
>
> The current handling in blk_mq_hctx_notify_dead() is actually wrong,

Yeah, the comment reads that it's going away, but it's actually gone.

> at that time, all CPUs on the hctx are dead, blk_mq_run_hw_queue()
> still dispatches requests on driver's hw queue, and driver is invisible
> to DEAD CPUs mapped to this hctx, and finally interrupt for these
> requests on the hctx are lost.
>
> Frankly speaking, the above 2nd problem is still hard to solve.
>
> 1) take_cpu_down() shutdown managed IRQ first, then run teardown callback
> for states in [CPUHP_AP_ONLINE, CPUHP_AP_OFFLINE) on the to-be-offline
> CPU
>
> 2) However, all runnable tasks are removed from the CPU in the teardown
> callback for CPUHP_AP_SCHED_STARTING, which is run after managed IRQs
> are shutdown. That said it is hard to avoid new request queued to
> the hctx with all DEAD CPUs.
>
> 3) we don't support to freeze queue for specific hctx yet, or that way
> may not be accepted because of extra cost in fast path
>
> 4) once request is allocated, it should be submitted to driver no matter
> if CPU hotplug happens or not. Or free it and re-allocate new request
> on proper sw/hw queue?
>
>>
>>>
>>> Last time, it was reported that the patchset causes performance regression,
>>> which is actually caused by duplicated io accounting in
>>> blk_mq_queue_tag_busy_iter(),
>>> which should be fixed easily.
>>>
>>> What do you think of this approach?
>>
>> It would still be good to have a forward port of this patchset for testing,
>> if we're serious about it. Or at least this bug you mention fixed.
>
> I plan to make this patchset workable on 5.2-rc for your test first.
>

ok, thanks. I assume that we're still open to not adding support for 
global tags in blk-mq, but rather the LLDD generating the unique tag 
with sbitmap.

Cheers,
John

>
> Thanks,
> Ming
>
> .
>
Ming Lei May 30, 2019, 9:45 a.m. UTC | #11
On Thu, May 30, 2019 at 10:31:34AM +0100, John Garry wrote:
> Hi Ming,
> 
> > > 
> > > > Thinking of this issue further, so far, one doable solution is to
> > > > expose reply queues
> > > > as blk-mq hw queues, as done by the following patchset:
> > > > 
> > > > https://lore.kernel.org/linux-block/20180205152035.15016-1-ming.lei@redhat.com/
> > > 
> > > I thought that this patchset had fundamental issues, in terms of working for
> > > all types of hosts. FYI, I did the backport of latest hisi_sas_v3 to v4.15
> > 
> > Could you explain it a bit about the fundamental issues for all types of
> > host?
> > 
> 
> *As I understand*, splitting the tagset has issues with dual-mode HBAs - as
> in supporting NVMe and SCSI, as some HBAs do.

The patchset I mentioned doesn't split tagset. The patch just
converts SCSI's reply queue into blk_mq hw queue, and all hw queues
share the host-wide tags. You can get unique tag too.

This way isn't very different with the current single hw queue(tags),
that is why I think the performance shouldn't be bad compared with
the current single hw queue. Meantime, drivers can get simplified.

> 
> > It is just for hosts with multiple reply queues, such as hisi_sas v3,
> > megaraid_sas, mpt3sas and hpsa.
> > 
> > > with this patchset (as you may have noticed in my git send mistake), but we
> > > have not got to test it yet.
> > > 
> > > On a related topic, we did test exposing reply queues as blk-mq hw queues
> > > and generating the host-wide tag internally in the LLDD with sbitmap, and
> > > unfortunately we were experiencing a significant performance hit, like 2300K
> > > -> 1800K IOPs for 4K read.
> > > 
> > > We need to test this further. I don't understand why we get such a big hit.
> > 
> > The performance regression shouldn't have been introduced in theory, and it is
> > because blk_mq_queue_tag_busy_iter() iterates over the same duplicated tags multiple
> > times, which can be fixed easily.
> > 
> 
> We are testing further, and I will tentatively say that we're getting better
> results (than previously) after fixing something in the LLDD. TBC.
> 
> > > 
> > > > 
> > > > In which global host-wide tags are shared for all blk-mq hw queues.
> > > > 
> > > > Also we can remove all the reply_map stuff in drivers, then solve the problem of
> > > > draining in-flight requests during unplugging CPU in a generic approach.
> > > 
> > > So you're saying that removing this reply queue stuff can make the solution
> > > to the problem more generic, but do you have an idea of the overall
> > > solution?
> > 
> > 1) convert reply queue into blk-mq hw queue first
> > 
> > 2) then all drivers are in same position wrt. handling requests vs.
> > unplugging CPU (shutdown managed IRQ)
> > 
> > The current handling in blk_mq_hctx_notify_dead() is actually wrong,
> 
> Yeah, the comment reads that it's going away, but it's actually gone.
> 
> > at that time, all CPUs on the hctx are dead, blk_mq_run_hw_queue()
> > still dispatches requests on driver's hw queue, and driver is invisible
> > to DEAD CPUs mapped to this hctx, and finally interrupt for these
> > requests on the hctx are lost.
> > 
> > Frankly speaking, the above 2nd problem is still hard to solve.
> > 
> > 1) take_cpu_down() shutdown managed IRQ first, then run teardown callback
> > for states in [CPUHP_AP_ONLINE, CPUHP_AP_OFFLINE) on the to-be-offline
> > CPU
> > 
> > 2) However, all runnable tasks are removed from the CPU in the teardown
> > callback for CPUHP_AP_SCHED_STARTING, which is run after managed IRQs
> > are shutdown. That said it is hard to avoid new request queued to
> > the hctx with all DEAD CPUs.
> > 
> > 3) we don't support to freeze queue for specific hctx yet, or that way
> > may not be accepted because of extra cost in fast path
> > 
> > 4) once request is allocated, it should be submitted to driver no matter
> > if CPU hotplug happens or not. Or free it and re-allocate new request
> > on proper sw/hw queue?
> > 
> > > 
> > > > 
> > > > Last time, it was reported that the patchset causes performance regression,
> > > > which is actually caused by duplicated io accounting in
> > > > blk_mq_queue_tag_busy_iter(),
> > > > which should be fixed easily.
> > > > 
> > > > What do you think of this approach?
> > > 
> > > It would still be good to have a forward port of this patchset for testing,
> > > if we're serious about it. Or at least this bug you mention fixed.
> > 
> > I plan to make this patchset workable on 5.2-rc for your test first.
> > 
> 
> ok, thanks. I assume that we're still open to not adding support for global
> tags in blk-mq, but rather the LLDD generating the unique tag with sbitmap.

Actually it is global tags.

Thanks,
Ming
diff mbox series

Patch

diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 7513c8eaabee..b24334f99c5d 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -332,7 +332,7 @@  static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
  *		true to continue iterating tags, false to stop.
  * @priv:	Will be passed as second argument to @fn.
  */
-static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
+void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
 		busy_tag_iter_fn *fn, void *priv)
 {
 	if (tags->nr_reserved_tags)
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 61deab0b5a5a..9ce7606a87f0 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -19,6 +19,9 @@  struct blk_mq_tags {
 	struct request **rqs;
 	struct request **static_rqs;
 	struct list_head page_list;
+
+#define BLK_MQ_TAGS_DRAINED           0
+	unsigned long flags;
 };
 
 
@@ -35,6 +38,8 @@  extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
 extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 		void *priv);
+void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
+		busy_tag_iter_fn *fn, void *priv);
 
 static inline struct sbq_wait_state *bt_wait_ptr(struct sbitmap_queue *bt,
 						 struct blk_mq_hw_ctx *hctx)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 32b8ad3d341b..ab1fbfd48374 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2215,6 +2215,65 @@  int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 	return -ENOMEM;
 }
 
+static int blk_mq_hctx_notify_prepare(unsigned int cpu, struct hlist_node *node)
+{
+	struct blk_mq_hw_ctx	*hctx =
+		hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
+
+	if (hctx->tags)
+		clear_bit(BLK_MQ_TAGS_DRAINED, &hctx->tags->flags);
+
+	return 0;
+}
+
+struct blk_mq_inflight_rq_data {
+	unsigned cnt;
+	const struct cpumask *cpumask;
+};
+
+static bool blk_mq_count_inflight_rq(struct request *rq, void *data,
+				     bool reserved)
+{
+	struct blk_mq_inflight_rq_data *count = data;
+
+	if ((blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) &&
+			cpumask_test_cpu(blk_mq_rq_cpu(rq), count->cpumask))
+		count->cnt++;
+
+	return true;
+}
+
+unsigned blk_mq_tags_inflight_rqs(struct blk_mq_tags *tags,
+		const struct cpumask *completion_cpus)
+{
+	struct blk_mq_inflight_rq_data data = {
+		.cnt = 0,
+		.cpumask = completion_cpus,
+	};
+
+	blk_mq_all_tag_busy_iter(tags, blk_mq_count_inflight_rq, &data);
+
+	return data.cnt;
+}
+
+static void blk_mq_drain_inflight_rqs(struct blk_mq_tags *tags,
+		const struct cpumask *completion_cpus)
+{
+	if (!tags)
+		return;
+
+	/* Can't apply the optimization in case of private completion queues */
+	if (completion_cpus == cpu_all_mask &&
+			test_and_set_bit(BLK_MQ_TAGS_DRAINED, &tags->flags))
+		return;
+
+	while (1) {
+		if (!blk_mq_tags_inflight_rqs(tags, completion_cpus))
+			break;
+		msleep(5);
+	}
+}
+
 /*
  * 'cpu' is going away. splice any existing rq_list entries from this
  * software queue to the hw queue dispatch list, and ensure that it
@@ -2226,6 +2285,8 @@  static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
 	struct blk_mq_ctx *ctx;
 	LIST_HEAD(tmp);
 	enum hctx_type type;
+	struct request_queue *q;
+	const struct cpumask *cpumask = NULL, *completion_cpus;
 
 	hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
 	ctx = __blk_mq_get_ctx(hctx->queue, cpu);
@@ -2238,14 +2299,32 @@  static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
 	}
 	spin_unlock(&ctx->lock);
 
-	if (list_empty(&tmp))
-		return 0;
+	if (!list_empty(&tmp)) {
+		spin_lock(&hctx->lock);
+		list_splice_tail_init(&tmp, &hctx->dispatch);
+		spin_unlock(&hctx->lock);
 
-	spin_lock(&hctx->lock);
-	list_splice_tail_init(&tmp, &hctx->dispatch);
-	spin_unlock(&hctx->lock);
+		blk_mq_run_hw_queue(hctx, true);
+	}
+
+	/*
+	 * Interrupt for the current completion queue will be shutdown, so
+	 * wait until all requests on this queue are completed.
+	 */
+	q = hctx->queue;
+	if (q->mq_ops->complete_queue_affinity)
+		cpumask = q->mq_ops->complete_queue_affinity(hctx, cpu);
+
+	if (!cpumask) {
+		cpumask = hctx->cpumask;
+		completion_cpus = cpu_all_mask;
+	} else {
+		completion_cpus = cpumask;
+	}
+
+	if (cpumask_first_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
+		blk_mq_drain_inflight_rqs(hctx->tags, completion_cpus);
 
-	blk_mq_run_hw_queue(hctx, true);
 	return 0;
 }
 
@@ -3541,7 +3620,8 @@  EXPORT_SYMBOL(blk_mq_rq_cpu);
 
 static int __init blk_mq_init(void)
 {
-	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
+	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead",
+				blk_mq_hctx_notify_prepare,
 				blk_mq_hctx_notify_dead);
 	return 0;
 }