@@ -2294,6 +2294,7 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
enum hctx_type type;
bool hctx_dead;
struct request *rq;
+ struct elevator_queue *e;
hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
@@ -2304,12 +2305,31 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
hctx_dead = cpumask_first_and(hctx->cpumask, cpu_online_mask) >=
nr_cpu_ids;
- spin_lock(&ctx->lock);
- if (!list_empty(&ctx->rq_lists[type])) {
- list_splice_init(&ctx->rq_lists[type], &tmp);
- blk_mq_hctx_clear_pending(hctx, ctx);
+ e = hctx->queue->elevator;
+ if (!e) {
+ spin_lock(&ctx->lock);
+ if (!list_empty(&ctx->rq_lists[type])) {
+ list_splice_init(&ctx->rq_lists[type], &tmp);
+ blk_mq_hctx_clear_pending(hctx, ctx);
+ }
+ spin_unlock(&ctx->lock);
+ } else if (hctx_dead) {
+ LIST_HEAD(sched_tmp);
+
+ while ((rq = e->type->ops.dispatch_request(hctx))) {
+ if (rq->mq_hctx != hctx)
+ list_add(&rq->queuelist, &sched_tmp);
+ else
+ list_add(&rq->queuelist, &tmp);
+ }
+
+ while (!list_empty(&sched_tmp)) {
+ rq = list_entry(sched_tmp.next, struct request,
+ queuelist);
+ list_del_init(&rq->queuelist);
+ blk_mq_sched_insert_request(rq, true, true, true);
+ }
}
- spin_unlock(&ctx->lock);
if (list_empty(&tmp))
return 0;
If hctx becomes dead, all in-queue IO requests aimed at this hctx have to be re-submitted, so cover requests queued in scheduler queue. Cc: Bart Van Assche <bvanassche@acm.org> Cc: Hannes Reinecke <hare@suse.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Keith Busch <keith.busch@intel.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-mq.c | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-)