@@ -99,12 +99,24 @@ static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
static inline unsigned blk_mq_sched_queue_depth(struct request_queue *q)
{
/*
- * Default to double of smaller one between hw queue_depth and 128,
+ * q->queue_depth is more close to scheduler queue, so use it
+ * as hint for computing scheduler queue depth if it is valid
+ */
+ unsigned q_depth = q->queue_depth ?: q->tag_set->queue_depth;
+
+ /*
+ * Default to double of smaller one between queue depth and 128,
* since we don't split into sync/async like the old code did.
* Additionally, this is a per-hw queue depth.
*/
- return 2 * min_t(unsigned int, q->tag_set->queue_depth,
- BLKDEV_MAX_RQ);
+ q_depth = 2 * min_t(unsigned int, q_depth, BLKDEV_MAX_RQ);
+
+ /*
+ * when queue depth of driver is too small, we set queue depth
+ * of scheduler queue as 32 so that small queue device still
+ * can benefit from IO merging.
+ */
+ return max_t(unsigned, q_depth, 32);
}
#endif
@@ -2593,7 +2593,9 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
}
EXPORT_SYMBOL(blk_mq_free_tag_set);
-int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
+static int __blk_mq_update_nr_requests(struct request_queue *q,
+ bool sched_only,
+ unsigned int nr)
{
struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
@@ -2612,7 +2614,7 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
* If we're using an MQ scheduler, just update the scheduler
* queue depth. This is similar to what the old code would do.
*/
- if (!hctx->sched_tags) {
+ if (!sched_only && !hctx->sched_tags) {
ret = blk_mq_tag_update_depth(hctx, &hctx->tags,
min(nr, set->queue_depth),
false);
@@ -2632,6 +2634,27 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return ret;
}
+int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
+{
+ return __blk_mq_update_nr_requests(q, false, nr);
+}
+
+/*
+ * When drivers update q->queue_depth, this API is called so that
+ * we can use this queue depth as hint for adjusting scheduler
+ * queue depth.
+ */
+int blk_mq_update_sched_queue_depth(struct request_queue *q)
+{
+ unsigned nr;
+
+ if (!q->mq_ops || !q->elevator)
+ return 0;
+
+ nr = blk_mq_sched_queue_depth(q);
+ return __blk_mq_update_nr_requests(q, true, nr);
+}
+
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int nr_hw_queues)
{
@@ -36,6 +36,7 @@ bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
bool wait);
struct request *blk_mq_dispatch_rq_from_ctxs(struct blk_mq_hw_ctx *hctx);
+int blk_mq_update_sched_queue_depth(struct request_queue *q);
/*
* Internal helpers for allocating/freeing the request map
@@ -877,6 +877,8 @@ void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
{
q->queue_depth = depth;
wbt_set_queue_depth(q->rq_wb, depth);
+
+ WARN_ON(blk_mq_update_sched_queue_depth(q));
}
EXPORT_SYMBOL(blk_set_queue_depth);