@@ -1426,15 +1426,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
rq->rq_flags &= ~RQF_SOFTBARRIER;
list_del_init(&rq->queuelist);
- /*
- * If RQF_DONTPREP, rq has contained some driver specific
- * data, so insert it to hctx dispatch list to avoid any
- * merge.
- */
- if (rq->rq_flags & RQF_DONTPREP)
- blk_mq_request_bypass_insert(rq, false, false);
- else
- blk_mq_sched_insert_request(rq, true, false, false);
+ blk_mq_sched_insert_request(rq, /*at_head=*/true, false, false);
}
while (!list_empty(&rq_list)) {
@@ -2065,9 +2057,15 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
if (nr_budgets)
blk_mq_release_budgets(q, list);
- spin_lock(&hctx->lock);
- list_splice_tail_init(list, &hctx->dispatch);
- spin_unlock(&hctx->lock);
+ if (!q->elevator) {
+ spin_lock(&hctx->lock);
+ list_splice_tail_init(list, &hctx->dispatch);
+ spin_unlock(&hctx->lock);
+ } else {
+ q->elevator->type->ops.insert_requests(
+ hctx, list,
+ /*at_head=*/true);
+ }
/*
* Order adding requests to hctx->dispatch and checking
@@ -64,8 +64,9 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_RESV ((__force req_flags_t)(1 << 23))
/* flags that prevent us from merging requests: */
-#define RQF_NOMERGE_FLAGS \
- (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
+#define RQF_NOMERGE_FLAGS \
+ (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_DONTPREP | \
+ RQF_SPECIAL_PAYLOAD)
enum mq_rq_state {
MQ_RQ_IDLE = 0,
Let the I/O scheduler control which requests are dispatched. Cc: Christoph Hellwig <hch@lst.de> Cc: Damien Le Moal <damien.lemoal@opensource.wdc.com> Cc: Ming Lei <ming.lei@redhat.com> Cc: Mike Snitzer <snitzer@kernel.org> Signed-off-by: Bart Van Assche <bvanassche@acm.org> --- block/blk-mq.c | 22 ++++++++++------------ include/linux/blk-mq.h | 5 +++-- 2 files changed, 13 insertions(+), 14 deletions(-)