@@ -340,8 +340,6 @@ static struct request *blk_mq_get_request(struct request_queue *q,
bool clear_ctx_on_error = false;
u64 alloc_time_ns = 0;
- blk_queue_enter_live(q);
-
/* alloc_time includes depth and tag waits */
if (blk_queue_rq_alloc_time(q))
alloc_time_ns = ktime_get_ns();
@@ -377,7 +375,6 @@ static struct request *blk_mq_get_request(struct request_queue *q,
if (tag == BLK_MQ_TAG_FAIL) {
if (clear_ctx_on_error)
data->ctx = NULL;
- blk_queue_exit(q);
return NULL;
}
@@ -407,11 +404,14 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
if (ret)
return ERR_PTR(ret);
+ blk_queue_enter_live(q);
rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
- if (!rq)
+ if (!rq) {
+ blk_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
+ }
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
@@ -456,11 +456,14 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
+ blk_queue_enter_live(q);
rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
- if (!rq)
+ if (!rq) {
+ blk_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
+ }
return rq;
}
@@ -2038,8 +2041,10 @@ blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio);
data.cmd_flags = bio->bi_opf;
+ blk_queue_enter_live(q);
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
+ blk_queue_exit(q);
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
Move the blk_queue_enter_live calls into the callers, where they can successively be cleaned up. Signed-off-by: Christoph Hellwig <hch@lst.de> --- block/blk-mq.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-)