@@ -1418,7 +1418,7 @@ void blk_execute_rq_nowait(struct reques
}
blk_mq_insert_request(rq, at_head ? BLK_MQ_INSERT_AT_HEAD : 0);
- blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
+ blk_mq_run_hw_queue(hctx, false);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
@@ -2322,8 +2322,6 @@ void blk_mq_run_hw_queue(struct blk_mq_h
*/
WARN_ON_ONCE(!async && in_interrupt());
- might_sleep_if(!async && hctx->flags & BLK_MQ_F_BLOCKING);
-
need_run = blk_mq_hw_queue_need_run(hctx);
if (!need_run) {
unsigned long flags;
@@ -2342,7 +2340,8 @@ void blk_mq_run_hw_queue(struct blk_mq_h
return;
}
- if (async || !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
+ if (async || (hctx->flags & BLK_MQ_F_BLOCKING) ||
+ !cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
blk_mq_delay_run_hw_queue(hctx, 0);
return;
}
@@ -2477,7 +2476,7 @@ void blk_mq_start_hw_queue(struct blk_mq
{
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- blk_mq_run_hw_queue(hctx, hctx->flags & BLK_MQ_F_BLOCKING);
+ blk_mq_run_hw_queue(hctx, false);
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);
@@ -2513,8 +2512,7 @@ void blk_mq_start_stopped_hw_queues(stru
unsigned long i;
queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_start_stopped_hw_queue(hctx, async ||
- (hctx->flags & BLK_MQ_F_BLOCKING));
+ blk_mq_start_stopped_hw_queue(hctx, async);
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
@@ -2572,8 +2570,6 @@ static void blk_mq_insert_requests(struc
list_for_each_entry(rq, list, queuelist) {
BUG_ON(rq->mq_ctx != ctx);
trace_block_rq_insert(rq);
- if (rq->cmd_flags & REQ_NOWAIT)
- run_queue_async = true;
}
spin_lock(&ctx->lock);
@@ -2739,7 +2735,7 @@ static void blk_mq_try_issue_directly(st
if ((rq->rq_flags & RQF_USE_SCHED) || !blk_mq_get_budget_and_tag(rq)) {
blk_mq_insert_request(rq, 0);
- blk_mq_run_hw_queue(hctx, rq->cmd_flags & REQ_NOWAIT);
+ blk_mq_run_hw_queue(hctx, false);
return;
}
@@ -429,8 +429,7 @@ static void scsi_single_lun_run(struct s
* but in most cases, we will be first. Ideally, each LU on the
* target would get some limited time or requests on the target.
*/
- blk_mq_run_hw_queues(current_sdev->request_queue,
- shost->queuecommand_may_block);
+ blk_mq_run_hw_queues(current_sdev->request_queue, false);
spin_lock_irqsave(shost->host_lock, flags);
if (!starget->starget_sdev_user)