@@ -533,19 +533,20 @@ static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
* Limit depths of async I/O and sync writes so as to counter both
* problems.
*/
-static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+static int bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
{
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct bfq_data *bfqd = data->q->elevator->elevator_data;
struct sbitmap_queue *bt;
+ int old_depth;
if (op_is_sync(op) && !op_is_write(op))
- return;
+ return 0;
if (data->flags & BLK_MQ_REQ_RESERVED) {
if (unlikely(!tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
- return;
+ return 0;
}
bt = &tags->breserved_tags;
} else
@@ -554,12 +555,18 @@ static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
if (unlikely(bfqd->sb_shift != bt->sb.shift))
bfq_update_depths(bfqd, bt);
+ old_depth = data->shallow_depth;
data->shallow_depth =
bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
__func__, bfqd->wr_busy_queues, op_is_sync(op),
data->shallow_depth);
+
+ if (old_depth != data->shallow_depth)
+ return data->shallow_depth;
+
+ return 0;
}
static struct bfq_queue *
@@ -433,17 +433,23 @@ static void rq_clear_domain_token(struct kyber_queue_data *kqd,
}
}
-static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
+static int kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
{
+ struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
+
+ if (op_is_sync(op))
+ return 0;
+
/*
* We use the scheduler tags as per-hardware queue queueing tokens.
* Async requests can be limited at this stage.
*/
- if (!op_is_sync(op)) {
- struct kyber_queue_data *kqd = data->q->elevator->elevator_data;
-
+ if (data->shallow_depth != kqd->async_depth) {
data->shallow_depth = kqd->async_depth;
+ return data->shallow_depth;
}
+
+ return 0;
}
static void kyber_prepare_request(struct request *rq, struct bio *bio)
@@ -105,7 +105,7 @@ struct elevator_mq_ops {
int (*request_merge)(struct request_queue *q, struct request **, struct bio *);
void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
void (*requests_merged)(struct request_queue *, struct request *, struct request *);
- void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
+ int (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
void (*prepare_request)(struct request *, struct bio *bio);
void (*finish_request)(struct request *);
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
If the scheduler changes the shallow depth, then return the new depth. No functional changes in this patch. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- block/bfq-iosched.c | 13 ++++++++++--- block/kyber-iosched.c | 14 ++++++++++---- include/linux/elevator.h | 2 +- 3 files changed, 21 insertions(+), 8 deletions(-)