@@ -192,7 +192,7 @@ static void blk_flush_complete_seq(struct request *rq,
spin_lock(&hctx->requeue_lock);
list_add_tail(&rq->queuelist, &hctx->flush_list);
spin_unlock(&hctx->requeue_lock);
- blk_mq_kick_requeue_list(q);
+ blk_mq_run_hw_queues(q, true);
break;
case REQ_FSEQ_DONE:
@@ -354,7 +354,7 @@ static void blk_kick_flush(struct blk_mq_hw_ctx *hctx,
list_add_tail(&flush_rq->queuelist, &hctx->flush_list);
spin_unlock(&hctx->requeue_lock);
- blk_mq_kick_requeue_list(q);
+ blk_mq_run_hw_queues(q, true);
}
static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
@@ -114,7 +114,7 @@ static ssize_t queue_state_write(void *data, const char __user *buf,
} else if (strcmp(op, "start") == 0) {
blk_mq_start_stopped_hw_queues(q, true);
} else if (strcmp(op, "kick") == 0) {
- blk_mq_kick_requeue_list(q);
+ blk_mq_run_hw_queues(q, true);
} else {
pr_err("%s: unsupported operation '%s'\n", __func__, op);
inval:
@@ -1436,7 +1436,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
spin_unlock_irqrestore(&hctx->requeue_lock, flags);
if (kick_requeue_list)
- blk_mq_kick_requeue_list(q);
+ blk_mq_run_hw_queues(q, true);
}
EXPORT_SYMBOL(blk_mq_requeue_request);
@@ -1473,19 +1473,6 @@ static void blk_mq_process_requeue_list(struct blk_mq_hw_ctx *hctx)
}
}
-void blk_mq_kick_requeue_list(struct request_queue *q)
-{
- blk_mq_run_hw_queues(q, true);
-}
-EXPORT_SYMBOL(blk_mq_kick_requeue_list);
-
-void blk_mq_delay_kick_requeue_list(struct request_queue *q,
- unsigned long msecs)
-{
- blk_mq_delay_run_hw_queues(q, msecs);
-}
-EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
-
static bool blk_mq_rq_inflight(struct request *rq, void *priv)
{
/*
@@ -902,7 +902,7 @@ static inline void __ublk_rq_task_work(struct request *req,
*/
if (unlikely(!mapped_bytes)) {
blk_mq_requeue_request(req, false);
- blk_mq_delay_kick_requeue_list(req->q,
+ blk_mq_delay_run_hw_queues(req->q,
UBLK_REQUEUE_DELAY_MS);
return;
}
@@ -1297,7 +1297,7 @@ static void ublk_unquiesce_dev(struct ublk_device *ub)
blk_mq_unquiesce_queue(ub->ub_disk->queue);
/* We may have requeued some rqs in ublk_quiesce_queue() */
- blk_mq_kick_requeue_list(ub->ub_disk->queue);
+ blk_mq_run_hw_queues(ub->ub_disk->queue, true);
}
static void ublk_stop_dev(struct ublk_device *ub)
@@ -2341,7 +2341,7 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
blk_mq_unquiesce_queue(ub->ub_disk->queue);
pr_devel("%s: queue unquiesced, dev id %d.\n",
__func__, header->dev_id);
- blk_mq_kick_requeue_list(ub->ub_disk->queue);
+ blk_mq_run_hw_queues(ub->ub_disk->queue, true);
ub->dev_info.state = UBLK_S_DEV_LIVE;
schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
ret = 0;
@@ -2040,7 +2040,6 @@ static int blkif_recover(struct blkfront_info *info)
blk_mq_requeue_request(req, false);
}
blk_mq_start_stopped_hw_queues(info->rq, true);
- blk_mq_kick_requeue_list(info->rq);
while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
/* Traverse the list of pending bios and re-queue them */
@@ -64,7 +64,7 @@ int dm_request_based(struct mapped_device *md)
void dm_start_queue(struct request_queue *q)
{
blk_mq_unquiesce_queue(q);
- blk_mq_kick_requeue_list(q);
+ blk_mq_run_hw_queues(q, true);
}
void dm_stop_queue(struct request_queue *q)
@@ -170,14 +170,14 @@ static void dm_end_request(struct request *clone, blk_status_t error)
void dm_mq_kick_requeue_list(struct mapped_device *md)
{
- blk_mq_kick_requeue_list(md->queue);
+ blk_mq_run_hw_queues(md->queue, true);
}
EXPORT_SYMBOL(dm_mq_kick_requeue_list);
static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
{
blk_mq_requeue_request(rq, false);
- blk_mq_delay_kick_requeue_list(rq->q, msecs);
+ blk_mq_delay_run_hw_queues(rq->q, msecs);
}
static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
@@ -303,7 +303,7 @@ static void nvme_retry_req(struct request *req)
nvme_req(req)->retries++;
blk_mq_requeue_request(req, false);
- blk_mq_delay_kick_requeue_list(req->q, delay);
+ blk_mq_delay_run_hw_queues(req->q, delay);
}
static void nvme_log_error(struct request *req)
@@ -243,7 +243,7 @@ static void scm_request_requeue(struct scm_request *scmrq)
atomic_dec(&bdev->queued_reqs);
scm_request_done(scmrq);
- blk_mq_kick_requeue_list(bdev->rq);
+ blk_mq_run_hw_queues(bdev->rq, true);
}
static void scm_request_finish(struct scm_request *scmrq)
@@ -124,7 +124,7 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
if (msecs) {
blk_mq_requeue_request(rq, false);
- blk_mq_delay_kick_requeue_list(rq->q, msecs);
+ blk_mq_delay_run_hw_queues(rq->q, msecs);
} else
blk_mq_requeue_request(rq, true);
}
@@ -871,8 +871,6 @@ static inline bool blk_mq_add_to_batch(struct request *req,
}
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
-void blk_mq_kick_requeue_list(struct request_queue *q);
-void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
bool blk_mq_complete_request_remote(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);