diff mbox series

[V6,6/8] blk-mq: re-submit IO in case that hctx is inactive

Message ID 20200407092901.314228-7-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series blk-mq: improvement CPU hotplug | expand

Commit Message

Ming Lei April 7, 2020, 9:28 a.m. UTC
When all CPUs in one hctx are offline and this hctx becomes inactive,
we shouldn't run this hw queue for completing request any more.

So steal bios from the request, and resubmit them, and finally free
the request in blk_mq_hctx_notify_dead().

Cc: John Garry <john.garry@huawei.com>
Cc: Bart Van Assche <bvanassche@acm.org>
Cc: Hannes Reinecke <hare@suse.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-mq.c | 131 +++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 121 insertions(+), 10 deletions(-)
diff mbox series

Patch

diff --git a/block/blk-mq.c b/block/blk-mq.c
index aac86cd99f02..6749f39fdd11 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2319,10 +2319,98 @@  static int blk_mq_hctx_notify_online(unsigned int cpu, struct hlist_node *node)
 	return 0;
 }
 
+static void blk_mq_resubmit_end_io(struct request *rq, blk_status_t error)
+{
+	struct request *orig_rq = rq->end_io_data;
+
+	blk_mq_cleanup_rq(orig_rq);
+	blk_mq_end_request(orig_rq, error);
+
+	blk_put_request(rq);
+}
+
+static void blk_mq_resubmit_passthrough_io(struct request *rq)
+{
+	struct request *nrq;
+	unsigned int flags = 0, cmd_flags = 0;
+	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+	struct blk_mq_tags *tags = rq->q->elevator ? hctx->sched_tags :
+		hctx->tags;
+	bool reserved = blk_mq_tag_is_reserved(tags, rq->internal_tag);
+
+	if (rq->rq_flags & RQF_PREEMPT)
+		flags |= BLK_MQ_REQ_PREEMPT;
+	if (reserved)
+		flags |= BLK_MQ_REQ_RESERVED;
+
+	/* avoid allocation failure & IO merge */
+	cmd_flags = (rq->cmd_flags & ~REQ_NOWAIT) | REQ_NOMERGE;
+
+	nrq = blk_get_request(rq->q, cmd_flags, flags);
+	if (!nrq)
+		return;
+
+	nrq->__sector = blk_rq_pos(rq);
+	nrq->__data_len = blk_rq_bytes(rq);
+	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
+		nrq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+		nrq->special_vec = rq->special_vec;
+	}
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+	nrq->nr_integrity_segments = rq->nr_integrity_segments;
+#endif
+	nrq->nr_phys_segments = rq->nr_phys_segments;
+	nrq->ioprio = rq->ioprio;
+	nrq->extra_len = rq->extra_len;
+	nrq->rq_disk = rq->rq_disk;
+	nrq->part = rq->part;
+	nrq->write_hint = rq->write_hint;
+	nrq->timeout = rq->timeout;
+
+	memcpy(blk_mq_rq_to_pdu(nrq), blk_mq_rq_to_pdu(rq),
+			rq->q->tag_set->cmd_size);
+
+	nrq->end_io = blk_mq_resubmit_end_io;
+	nrq->end_io_data = rq;
+	nrq->bio = rq->bio;
+	nrq->biotail = rq->biotail;
+
+	blk_account_io_start(nrq, true);
+	blk_mq_sched_insert_request(nrq, true, true, true);
+}
+
+static void blk_mq_resubmit_fs_io(struct request *rq)
+{
+	struct bio_list list;
+	struct bio *bio;
+
+	bio_list_init(&list);
+	blk_steal_bios(&list, rq);
+
+	while (true) {
+		bio = bio_list_pop(&list);
+		if (!bio)
+			break;
+
+		generic_make_request(bio);
+	}
+
+	blk_mq_cleanup_rq(rq);
+	blk_mq_end_request(rq, 0);
+}
+
+static void blk_mq_resubmit_io(struct request *rq)
+{
+	if (rq->end_io || blk_rq_is_passthrough(rq))
+		blk_mq_resubmit_passthrough_io(rq);
+	else
+		blk_mq_resubmit_fs_io(rq);
+}
+
 /*
- * 'cpu' is going away. splice any existing rq_list entries from this
- * software queue to the hw queue dispatch list, and ensure that it
- * gets run.
+ * 'cpu' has gone away. If this hctx is inactive, we can't dispatch request
+ * to the hctx any more, so steal bios from requests of this hctx, and
+ * re-submit them to the request queue, and free these requests finally.
  */
 static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
 {
@@ -2342,16 +2430,39 @@  static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
 	}
 	spin_unlock(&ctx->lock);
 
-	clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
+	if (!test_bit(BLK_MQ_S_INACTIVE, &hctx->state)) {
+		if (!list_empty(&tmp)) {
+			spin_lock(&hctx->lock);
+			list_splice_tail_init(&tmp, &hctx->dispatch);
+			spin_unlock(&hctx->lock);
+			blk_mq_run_hw_queue(hctx, true);
+		}
+	} else {
+		LIST_HEAD(flush_in);
+		LIST_HEAD(flush_out);
+		struct request *rq, *nxt;
 
-	if (list_empty(&tmp))
-		return 0;
+		/* requests in dispatch list have to be re-submitted too */
+		spin_lock(&hctx->lock);
+		list_splice_tail_init(&hctx->dispatch, &tmp);
+		spin_unlock(&hctx->lock);
 
-	spin_lock(&hctx->lock);
-	list_splice_tail_init(&tmp, &hctx->dispatch);
-	spin_unlock(&hctx->lock);
+		/* blk_end_flush_machinery will cover flush request */
+		list_for_each_entry_safe(rq, nxt, &tmp, queuelist) {
+			if (rq->rq_flags & RQF_FLUSH_SEQ)
+				list_move(&rq->queuelist, &flush_in);
+		}
+		blk_end_flush_machinery(hctx, &flush_in, &flush_out);
+		list_splice_tail(&flush_out, &tmp);
+
+		while (!list_empty(&tmp)) {
+			rq = list_first_entry(&tmp, struct request, queuelist);
+			list_del_init(&rq->queuelist);
+			blk_mq_resubmit_io(rq);
+		}
+		clear_bit(BLK_MQ_S_INACTIVE, &hctx->state);
+	}
 
-	blk_mq_run_hw_queue(hctx, true);
 	return 0;
 }