@@ -4962,7 +4962,7 @@ EXPORT_SYMBOL_GPL(nvme_remove_admin_tag_set);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int nr_maps,
- unsigned int cmd_size)
+ unsigned int cmd_size, unsigned int nr_hw_queues)
{
int ret;
@@ -4983,9 +4983,10 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
set->flags |= BLK_MQ_F_BLOCKING;
set->cmd_size = cmd_size,
set->driver_data = ctrl;
- set->nr_hw_queues = ctrl->queue_count - 1;
+ set->nr_hw_queues = nr_hw_queues - 1;
set->timeout = NVME_IO_TIMEOUT;
set->nr_maps = nr_maps;
+
ret = blk_mq_alloc_tag_set(set);
if (ret)
return ret;
@@ -2918,7 +2918,8 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
&nvme_fc_mq_ops, 1,
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz));
+ ctrl->lport->ops->fcprqst_priv_sz),
+ ctrl->ctrl.queue_count);
if (ret)
return ret;
@@ -748,7 +748,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int nr_maps,
- unsigned int cmd_size);
+ unsigned int cmd_size, unsigned int nr_hw_queues);
void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
@@ -3033,7 +3033,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (dev->online_queues > 1) {
nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
- nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
+ nvme_pci_nr_maps(dev), sizeof(struct nvme_iod),
+ dev->ctrl.queue_count);
nvme_dbbuf_set(dev);
}
@@ -795,7 +795,7 @@ static int nvme_rdma_alloc_tag_set(struct nvme_ctrl *ctrl)
return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set,
&nvme_rdma_mq_ops,
ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
- cmd_size);
+ cmd_size, ctrl->queue_count);
}
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl)
@@ -1893,7 +1893,8 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
&nvme_tcp_mq_ops,
ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
- sizeof(struct nvme_tcp_request));
+ sizeof(struct nvme_tcp_request),
+ ctrl->queue_count);
if (ret)
goto out_free_io_queues;
}
@@ -496,7 +496,8 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
&nvme_loop_mq_ops, 1,
sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist),
+ ctrl->ctrl.queue_count);
if (ret)
goto out_destroy_queues;
Allow this function to take nr_hw_queues as a parameter. And change all the callers. This is in preparation to introduce queues which do not have to be registered with block-layer. Signed-off-by: Kanchan Joshi <joshi.k@samsung.com> --- drivers/nvme/host/core.c | 5 +++-- drivers/nvme/host/fc.c | 3 ++- drivers/nvme/host/nvme.h | 2 +- drivers/nvme/host/pci.c | 3 ++- drivers/nvme/host/rdma.c | 2 +- drivers/nvme/host/tcp.c | 3 ++- drivers/nvme/target/loop.c | 3 ++- 7 files changed, 13 insertions(+), 8 deletions(-)