@@ -519,21 +519,12 @@ static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
- struct blk_mq_tag_set *set = q->tag_set;
- int ret;
+ hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
+ q->nr_requests);
- hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
- set->reserved_tags, set->flags);
if (!hctx->sched_tags)
return -ENOMEM;
-
- ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
- if (ret) {
- blk_mq_free_rq_map(hctx->sched_tags, set->flags);
- hctx->sched_tags = NULL;
- }
-
- return ret;
+ return 0;
}
/* called in queue's release handler, tagset has gone away */
@@ -592,7 +592,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth > tags->nr_tags) {
struct blk_mq_tag_set *set = hctx->queue->tag_set;
struct blk_mq_tags *new;
- bool ret;
if (!can_grow)
return -EINVAL;
@@ -604,15 +603,9 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
if (tdepth > MAX_SCHED_RQ)
return -EINVAL;
- new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
- tags->nr_reserved_tags, set->flags);
+ new = blk_mq_alloc_map_and_rqs(set, hctx->queue_num, tdepth);
if (!new)
return -ENOMEM;
- ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
- if (ret) {
- blk_mq_free_rq_map(new, set->flags);
- return -ENOMEM;
- }
blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
blk_mq_free_rq_map(*tagsptr, set->flags);
@@ -2383,11 +2383,11 @@ void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags)
blk_mq_free_tags(tags, flags);
}
-struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
- unsigned int hctx_idx,
- unsigned int nr_tags,
- unsigned int reserved_tags,
- unsigned int flags)
+static struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
+ unsigned int hctx_idx,
+ unsigned int nr_tags,
+ unsigned int reserved_tags,
+ unsigned int flags)
{
struct blk_mq_tags *tags;
int node;
@@ -2435,8 +2435,9 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
return 0;
}
-int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
- unsigned int hctx_idx, unsigned int depth)
+static int blk_mq_alloc_rqs(struct blk_mq_tag_set *set,
+ struct blk_mq_tags *tags,
+ unsigned int hctx_idx, unsigned int depth)
{
unsigned int i, j, entries_per_page, max_order = 4;
size_t rq_size, left;
@@ -2847,25 +2848,34 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
}
}
-static bool __blk_mq_alloc_map_and_request(struct blk_mq_tag_set *set,
- int hctx_idx)
+struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
+ unsigned int hctx_idx,
+ unsigned int depth)
{
- unsigned int flags = set->flags;
- int ret = 0;
+ struct blk_mq_tags *tags;
+ int ret;
- set->tags[hctx_idx] = blk_mq_alloc_rq_map(set, hctx_idx,
- set->queue_depth, set->reserved_tags, flags);
- if (!set->tags[hctx_idx])
- return false;
+ tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags,
+ set->flags);
+ if (!tags)
+ return NULL;
- ret = blk_mq_alloc_rqs(set, set->tags[hctx_idx], hctx_idx,
- set->queue_depth);
- if (!ret)
- return true;
+ ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
+ if (ret) {
+ blk_mq_free_rq_map(tags, set->flags);
+ return NULL;
+ }
- blk_mq_free_rq_map(set->tags[hctx_idx], flags);
- set->tags[hctx_idx] = NULL;
- return false;
+ return tags;
+}
+
+static bool __blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
+ int hctx_idx)
+{
+ set->tags[hctx_idx] = blk_mq_alloc_map_and_rqs(set, hctx_idx,
+ set->queue_depth);
+
+ return set->tags[hctx_idx];
}
static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
@@ -2910,7 +2920,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
hctx_idx = set->map[j].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
- !__blk_mq_alloc_map_and_request(set, hctx_idx)) {
+ !__blk_mq_alloc_map_and_rqs(set, hctx_idx)) {
/*
* If tags initialization fail for some hctx,
* that hctx won't be brought online. In this
@@ -3343,7 +3353,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
int i;
for (i = 0; i < set->nr_hw_queues; i++) {
- if (!__blk_mq_alloc_map_and_request(set, i))
+ if (!__blk_mq_alloc_map_and_rqs(set, i))
goto out_unwind;
cond_resched();
}
@@ -3362,7 +3372,7 @@ static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
* may reduce the depth asked for, if memory is tight. set->queue_depth
* will be updated to reflect the allocated depth.
*/
-static int blk_mq_alloc_map_and_requests(struct blk_mq_tag_set *set)
+static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
{
unsigned int depth;
int err;
@@ -3528,7 +3538,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
if (ret)
goto out_free_mq_map;
- ret = blk_mq_alloc_map_and_requests(set);
+ ret = blk_mq_alloc_set_map_and_rqs(set);
if (ret)
goto out_free_mq_map;
@@ -55,13 +55,8 @@ void blk_mq_put_rq_ref(struct request *rq);
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx);
void blk_mq_free_rq_map(struct blk_mq_tags *tags, unsigned int flags);
-struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
- unsigned int hctx_idx,
- unsigned int nr_tags,
- unsigned int reserved_tags,
- unsigned int flags);
-int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
- unsigned int hctx_idx, unsigned int depth);
+struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
+ unsigned int hctx_idx, unsigned int depth);
/*
* Internal helpers for request insertion into sw queues