@@ -134,10 +134,7 @@ static void blk_flush_restore_request(struct request *rq)
static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
if (rq->q->mq_ops) {
- struct request_queue *q = rq->q;
-
- blk_mq_add_to_requeue_list(rq, add_front);
- blk_mq_kick_requeue_list(q);
+ blk_mq_add_to_requeue_list(rq, add_front, true);
return false;
} else {
if (add_front)
@@ -494,12 +494,12 @@ static void __blk_mq_requeue_request(struct request *rq)
}
}
-void blk_mq_requeue_request(struct request *rq)
+void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
{
__blk_mq_requeue_request(rq);
BUG_ON(blk_queued_rq(rq));
- blk_mq_add_to_requeue_list(rq, true);
+ blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
}
EXPORT_SYMBOL(blk_mq_requeue_request);
@@ -533,7 +533,8 @@ static void blk_mq_requeue_work(struct work_struct *work)
blk_mq_run_hw_queues(q, false);
}
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+ bool kick_requeue_list)
{
struct request_queue *q = rq->q;
unsigned long flags;
@@ -552,6 +553,9 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
list_add_tail(&rq->queuelist, &q->requeue_list);
}
spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+ if (kick_requeue_list)
+ blk_mq_kick_requeue_list(q);
}
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
@@ -2043,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Requeue pending requests (flush or discard) */
list_del_init(&req->queuelist);
BUG_ON(req->nr_phys_segments > segs);
- blk_mq_requeue_request(req);
+ blk_mq_requeue_request(req, false);
}
blk_mq_start_stopped_hwqueues(info->rq);
blk_mq_kick_requeue_list(info->rq);
@@ -347,7 +347,7 @@ EXPORT_SYMBOL(dm_mq_kick_requeue_list);
static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
{
- blk_mq_requeue_request(rq);
+ blk_mq_requeue_request(rq, false);
__dm_mq_kick_requeue_list(rq->q, msecs);
}
@@ -203,7 +203,7 @@ void nvme_requeue_req(struct request *req)
{
unsigned long flags;
- blk_mq_requeue_request(req);
+ blk_mq_requeue_request(req, false);
spin_lock_irqsave(req->q->queue_lock, flags);
if (!blk_queue_stopped(req->q))
blk_mq_kick_requeue_list(req->q);
@@ -86,10 +86,8 @@ scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
- struct request_queue *q = cmd->request->q;
- blk_mq_requeue_request(cmd->request);
- blk_mq_kick_requeue_list(q);
+ blk_mq_requeue_request(cmd->request, true);
put_device(&sdev->sdev_gendev);
}
@@ -218,8 +218,9 @@ void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, int error);
void __blk_mq_end_request(struct request *rq, int error);
-void blk_mq_requeue_request(struct request *rq);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
+void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
+ bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q);