@@ -3291,13 +3291,14 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
set->nr_hw_queues = 1;
set->nr_maps = 1;
set->queue_depth = min(64U, set->queue_depth);
+ } else {
+ /*
+ * There is no use for more h/w queues than cpus
+ * if we just have a single map
+ */
+ if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
+ set->nr_hw_queues = nr_cpu_ids;
}
- /*
- * There is no use for more h/w queues than cpus if we just have
- * a single map
- */
- if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
- set->nr_hw_queues = nr_cpu_ids;
if (blk_mq_realloc_tag_set_tags(set, 0, set->nr_hw_queues) < 0)
return -ENOMEM;
@@ -3309,7 +3310,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
GFP_KERNEL, set->numa_node);
if (!set->map[i].mq_map)
goto out_free_mq_map;
- set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
+ set->map[i].nr_queues = set->nr_hw_queues;
}
ret = blk_mq_update_queue_map(set);
Do not update nr_hw_queues again after setting it to 1 for a kdump kernel. This avoids allocating a tag set of size nr_cpu_ids and but then just using one tag set. Signed-off-by: Daniel Wagner <dwagner@suse.de> --- Hi, I stumbled across this and didn't make sense to me that we might allocated more tag sets than we potently use. But maybe I am not seeing the obvious thing. Only compiled tested. Thanks, Daniel block/blk-mq.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-)