@@ -130,7 +130,7 @@ static int blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
* in blk_mq_dispatch_rq_list().
*/
list_add(&rq->queuelist, &rq_list);
- } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true));
+ } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true, 1));
return ret;
}
@@ -198,7 +198,7 @@ static int blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
/* round robin for fair dispatch */
ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
- } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true));
+ } while (blk_mq_dispatch_rq_list(rq->mq_hctx, &rq_list, true, 1));
WRITE_ONCE(hctx->dispatch_from, ctx);
return ret;
@@ -238,7 +238,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
*/
if (!list_empty(&rq_list)) {
blk_mq_sched_mark_restart_hctx(hctx);
- if (blk_mq_dispatch_rq_list(hctx, &rq_list, false)) {
+ if (blk_mq_dispatch_rq_list(hctx, &rq_list, false, 0)) {
if (has_sched_dispatch)
ret = blk_mq_do_dispatch_sched(hctx);
else
@@ -251,7 +251,7 @@ static int __blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
ret = blk_mq_do_dispatch_ctx(hctx);
} else {
blk_mq_flush_busy_ctxs(hctx, &rq_list);
- blk_mq_dispatch_rq_list(hctx, &rq_list, false);
+ blk_mq_dispatch_rq_list(hctx, &rq_list, false, 0);
}
return ret;
@@ -1212,7 +1212,8 @@ static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq,
*/
if (!blk_mq_mark_tag_wait(hctx, rq)) {
/* budget is always obtained before getting tag */
- blk_mq_put_dispatch_budget(rq->q);
+ if (ask_budget)
+ blk_mq_put_dispatch_budget(rq->q);
return PREP_DISPATCH_NO_TAG;
}
}
@@ -1233,12 +1234,17 @@ static blk_status_t blk_mq_dispatch_rq(struct request *rq, bool is_last)
static void blk_mq_handle_partial_dispatch(struct blk_mq_hw_ctx *hctx,
struct list_head *list, enum prep_dispatch prep,
- blk_status_t ret, bool queued)
+ blk_status_t ret, bool queued, unsigned budgets)
{
struct request_queue *q = hctx->queue;
bool needs_restart;
bool no_tag = false;
bool no_budget_avail = false;
+ unsigned i = 0;
+
+ /* release got budgets */
+ while (i++ < budgets)
+ blk_mq_put_dispatch_budget(hctx->queue);
/*
* For non-shared tags, the RESTART check
@@ -1298,9 +1304,11 @@ static void blk_mq_handle_partial_dispatch(struct blk_mq_hw_ctx *hctx,
/*
* Returns true if we did some work AND can potentially do more.
+ *
+ * @budgets is only valid iff @got_budget is true.
*/
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
- bool got_budget)
+ bool got_budget, unsigned int budgets)
{
enum prep_dispatch prep;
struct request *rq;
@@ -1360,7 +1368,9 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
* that is where we will continue on next queue run.
*/
if (!list_empty(list)) {
- blk_mq_handle_partial_dispatch(hctx, list, prep, ret, !!queued);
+ blk_mq_handle_partial_dispatch(hctx, list, prep, ret,
+ !!queued,
+ got_budget ? budgets - queued : 0);
blk_mq_update_dispatch_busy(hctx, true);
return false;
} else
@@ -40,7 +40,8 @@ struct blk_mq_ctx {
void blk_mq_exit_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q);
-bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *, bool);
+bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *,
+ bool, unsigned int);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
bool kick_requeue_list);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
Pass obtained budget count to blk_mq_dispatch_rq_list(), and prepare for supporting fully batching submission. With the obtained budget count, it is easier to put extra budgets in case of .queue_rq failure. Cc: Sagi Grimberg <sagi@grimberg.me> Cc: Baolin Wang <baolin.wang7@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-mq-sched.c | 8 ++++---- block/blk-mq.c | 18 ++++++++++++++---- block/blk-mq.h | 3 ++- 3 files changed, 20 insertions(+), 9 deletions(-)