@@ -38,6 +38,7 @@ struct nvmet_rdma_cmd {
struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
struct nvme_command *nvme_cmd;
struct nvmet_rdma_queue *queue;
+ struct ib_srq *srq;
};
enum {
@@ -461,8 +462,8 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
cmd->sge[0].addr, cmd->sge[0].length,
DMA_FROM_DEVICE);
- if (ndev->srq)
- ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+ if (cmd->srq)
+ ret = ib_post_srq_recv(cmd->srq, &cmd->wr, NULL);
else
ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
@@ -882,6 +883,7 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
ndev->srq_size = srq_size;
for (i = 0; i < srq_size; i++) {
+ ndev->srq_cmds[i].srq = srq;
ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
if (ret)
goto out_free_cmds;
This is a preparetion patch for the SRQ per completion vector feature. Signed-off-by: Max Gurtovoy <maxg@mellanox.com> --- drivers/nvme/target/rdma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)