diff mbox series

[3/3] blk-mq: Use llist_head for blk_cpu_done

Message ID 20201204191356.2516405-4-bigeasy@linutronix.de (mailing list archive)
State New, archived
Headers show
Series blk-mq: Don't complete in IRQ, use llist_head | expand

Commit Message

Sebastian Andrzej Siewior Dec. 4, 2020, 7:13 p.m. UTC
With llist_head it is possible to avoid the locking (the irq-off region)
when items are added. This makes it possible to add items on a remote
CPU.
llist_add() returns true if the list was previously empty. This can be
used to invoke the SMP function call / raise sofirq only if the first
item was added (otherwise it is already pending).
This simplifies the code a little and reduces the IRQ-off regions. With
this change it possible to reduce the SMP-function call a simple
__raise_softirq_irqoff().
blk_mq_complete_request_remote() needs a preempt-disable section if the
request needs to complete on the local CPU. Some callers (USB-storage)
invoke this preemptible context and the request needs to be enqueued on
the same CPU as the softirq is raised.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 block/blk-mq.c         | 77 ++++++++++++++----------------------------
 include/linux/blkdev.h |  2 +-
 2 files changed, 27 insertions(+), 52 deletions(-)

Comments

Christoph Hellwig Dec. 8, 2020, 1:20 p.m. UTC | #1
On Fri, Dec 04, 2020 at 08:13:56PM +0100, Sebastian Andrzej Siewior wrote:
> With llist_head it is possible to avoid the locking (the irq-off region)
> when items are added. This makes it possible to add items on a remote
> CPU.
> llist_add() returns true if the list was previously empty. This can be
> used to invoke the SMP function call / raise sofirq only if the first
> item was added (otherwise it is already pending).
> This simplifies the code a little and reduces the IRQ-off regions. With
> this change it possible to reduce the SMP-function call a simple
> __raise_softirq_irqoff().
> blk_mq_complete_request_remote() needs a preempt-disable section if the
> request needs to complete on the local CPU. Some callers (USB-storage)
> invoke this preemptible context and the request needs to be enqueued on
> the same CPU as the softirq is raised.
> 
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> ---
>  block/blk-mq.c         | 77 ++++++++++++++----------------------------
>  include/linux/blkdev.h |  2 +-
>  2 files changed, 27 insertions(+), 52 deletions(-)
> 
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 3c0e94913d874..b5138327952a4 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -41,7 +41,7 @@
>  #include "blk-mq-sched.h"
>  #include "blk-rq-qos.h"
>  
> +static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
>  
>  static void blk_mq_poll_stats_start(struct request_queue *q);
>  static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
> @@ -567,68 +567,32 @@ void blk_mq_end_request(struct request *rq, blk_status_t error)
>  }
>  EXPORT_SYMBOL(blk_mq_end_request);
>  
> +static void blk_complete_reqs(struct llist_head *cpu_list)
>  {
> +	struct llist_node *entry;
> +	struct request *rq, *rq_next;
>  
> +	entry = llist_del_all(cpu_list);
> +	entry = llist_reverse_order(entry);

I find the variable naming and split of the assignments a little
strange.  What about:

static void blk_complete_reqs(struct llist_head *list)
{
	struct llist_node *first = llist_reverse_order(llist_del_all(list));
	struct request *rq, *next;

?

> +	llist_for_each_entry_safe(rq, rq_next, entry, ipi_list)
>  		rq->q->mq_ops->complete(rq);
>  }

Aren't some sanitizers going to be unhappy if we never delete the
request from the list?

>  bool blk_mq_complete_request_remote(struct request *rq)
>  {
> +	struct llist_head *cpu_list;
>  	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
>  
>  	/*
> @@ -669,12 +634,22 @@ bool blk_mq_complete_request_remote(struct request *rq)
>  		return false;
>  
>  	if (blk_mq_complete_need_ipi(rq)) {
> +		unsigned int cpu;
> +
> +		cpu = rq->mq_ctx->cpu;
> +		cpu_list = &per_cpu(blk_cpu_done, cpu);
> +		if (llist_add(&rq->ipi_list, cpu_list)) {
> +			INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
> +			smp_call_function_single_async(cpu, &rq->csd);
> +		}

I think the above code section inside the conditional should go into a
little helper instead of being open coded here in the fast path routine.
I laso don't really see the ¶oint of the cpu and cpulist locl variables.

>  	} else {
>  		if (rq->q->nr_hw_queues > 1)
>  			return false;
> +		preempt_disable();
> +		cpu_list = this_cpu_ptr(&blk_cpu_done);
> +		if (llist_add(&rq->ipi_list, cpu_list))
> +			raise_softirq(BLOCK_SOFTIRQ);
> +		preempt_enable();

I think the section after the return false here also would benefit from
a little helper with a descriptive name.

Otherwise this looks good to me.
Christoph Hellwig Dec. 8, 2020, 1:28 p.m. UTC | #2
Just to clarify what I mean, I think the flow in
blk_mq_complete_request_remote should turn into something like:


	...

	if (rq->cmd_flags & REQ_HIPRI)
		return false;

	if (blk_mq_complete_need_ipi(rq))
		blk_mq_complete_send_ipi(rq);
		return true;
	}

	if (rq->q->nr_hw_queues == 1) {
		blk_mq_raise_softirq(rq);
		return true;
	}

	return false;
}
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3c0e94913d874..b5138327952a4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -41,7 +41,7 @@ 
 #include "blk-mq-sched.h"
 #include "blk-rq-qos.h"
 
-static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);
 
 static void blk_mq_poll_stats_start(struct request_queue *q);
 static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -567,68 +567,32 @@  void blk_mq_end_request(struct request *rq, blk_status_t error)
 }
 EXPORT_SYMBOL(blk_mq_end_request);
 
-/*
- * Softirq action handler - move entries to local list and loop over them
- * while passing them to the queue registered handler.
- */
-static __latent_entropy void blk_done_softirq(struct softirq_action *h)
+static void blk_complete_reqs(struct llist_head *cpu_list)
 {
-	struct list_head *cpu_list, local_list;
+	struct llist_node *entry;
+	struct request *rq, *rq_next;
 
-	local_irq_disable();
-	cpu_list = this_cpu_ptr(&blk_cpu_done);
-	list_replace_init(cpu_list, &local_list);
-	local_irq_enable();
+	entry = llist_del_all(cpu_list);
+	entry = llist_reverse_order(entry);
 
-	while (!list_empty(&local_list)) {
-		struct request *rq;
-
-		rq = list_entry(local_list.next, struct request, ipi_list);
-		list_del_init(&rq->ipi_list);
+	llist_for_each_entry_safe(rq, rq_next, entry, ipi_list)
 		rq->q->mq_ops->complete(rq);
-	}
 }
 
-static void blk_mq_trigger_softirq(struct request *rq)
+static __latent_entropy void blk_done_softirq(struct softirq_action *h)
 {
-	struct list_head *list;
-	unsigned long flags;
-
-	local_irq_save(flags);
-	list = this_cpu_ptr(&blk_cpu_done);
-	list_add_tail(&rq->ipi_list, list);
-
-	/*
-	 * If the list only contains our just added request, signal a raise of
-	 * the softirq.  If there are already entries there, someone already
-	 * raised the irq but it hasn't run yet.
-	 */
-	if (list->next == &rq->ipi_list)
-		raise_softirq_irqoff(BLOCK_SOFTIRQ);
-	local_irq_restore(flags);
+	blk_complete_reqs(this_cpu_ptr(&blk_cpu_done));
 }
 
 static int blk_softirq_cpu_dead(unsigned int cpu)
 {
-	/*
-	 * If a CPU goes away, splice its entries to the current CPU
-	 * and trigger a run of the softirq
-	 */
-	local_irq_disable();
-	list_splice_init(&per_cpu(blk_cpu_done, cpu),
-			 this_cpu_ptr(&blk_cpu_done));
-	raise_softirq_irqoff(BLOCK_SOFTIRQ);
-	local_irq_enable();
-
+	blk_complete_reqs(&per_cpu(blk_cpu_done, cpu));
 	return 0;
 }
 
-
 static void __blk_mq_complete_request_remote(void *data)
 {
-	struct request *rq = data;
-
-	blk_mq_trigger_softirq(rq);
+	__raise_softirq_irqoff(BLOCK_SOFTIRQ);
 }
 
 static inline bool blk_mq_complete_need_ipi(struct request *rq)
@@ -659,6 +623,7 @@  static inline bool blk_mq_complete_need_ipi(struct request *rq)
 
 bool blk_mq_complete_request_remote(struct request *rq)
 {
+	struct llist_head *cpu_list;
 	WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
 
 	/*
@@ -669,12 +634,22 @@  bool blk_mq_complete_request_remote(struct request *rq)
 		return false;
 
 	if (blk_mq_complete_need_ipi(rq)) {
-		INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
-		smp_call_function_single_async(rq->mq_ctx->cpu, &rq->csd);
+		unsigned int cpu;
+
+		cpu = rq->mq_ctx->cpu;
+		cpu_list = &per_cpu(blk_cpu_done, cpu);
+		if (llist_add(&rq->ipi_list, cpu_list)) {
+			INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq);
+			smp_call_function_single_async(cpu, &rq->csd);
+		}
 	} else {
 		if (rq->q->nr_hw_queues > 1)
 			return false;
-		blk_mq_trigger_softirq(rq);
+		preempt_disable();
+		cpu_list = this_cpu_ptr(&blk_cpu_done);
+		if (llist_add(&rq->ipi_list, cpu_list))
+			raise_softirq(BLOCK_SOFTIRQ);
+		preempt_enable();
 	}
 
 	return true;
@@ -3905,7 +3880,7 @@  static int __init blk_mq_init(void)
 	int i;
 
 	for_each_possible_cpu(i)
-		INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+		init_llist_head(&per_cpu(blk_cpu_done, i));
 	open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
 
 	cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 17cedf0dc83db..7b05390766eec 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -156,7 +156,7 @@  struct request {
 	 */
 	union {
 		struct hlist_node hash;	/* merge hash */
-		struct list_head ipi_list;
+		struct llist_node ipi_list;
 	};
 
 	/*