@@ -43,6 +43,8 @@
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
+static void blk_mq_hctx_deactivate(struct blk_mq_hw_ctx *hctx);
+
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
int ddir, sectors, bucket;
@@ -1400,28 +1402,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
int srcu_idx;
/*
- * We should be running this queue from one of the CPUs that
- * are mapped to it.
- *
- * There are at least two related races now between setting
- * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
- * __blk_mq_run_hw_queue():
- *
- * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
- * but later it becomes online, then this warning is harmless
- * at all
- *
- * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
- * but later it becomes offline, then the warning can't be
- * triggered, and we depend on blk-mq timeout handler to
- * handle dispatched requests to this hctx
+ * BLK_MQ_S_INACTIVE may not deal with some requeue corner case:
+ * one request is requeued after cpu unplug is handled, so check
+ * if the hctx is actually inactive. If yes, deactive it and
+ * re-submit all requests in the queue.
*/
if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
- cpu_online(hctx->next_cpu)) {
- printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
- raw_smp_processor_id(),
- cpumask_empty(hctx->cpumask) ? "inactive": "active");
- dump_stack();
+ cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) >=
+ nr_cpu_ids) {
+ blk_mq_hctx_deactivate(hctx);
+ return;
}
/*