@@ -803,7 +803,6 @@ static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
{
enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
- struct ib_device *dev = ch->qp->pd->device;
struct scatterlist *prev = NULL;
u32 max_sge;
unsigned prev_nents;
@@ -818,8 +817,7 @@ static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
return -ENOMEM;
}
- max_sge = dir == DMA_TO_DEVICE ? dev->attrs.max_sge :
- dev->attrs.max_sge_rd;
+ max_sge = dir == DMA_TO_DEVICE ? ch->max_send_sge : ch->max_recv_sge;
for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
@@ -1607,6 +1605,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
struct ib_qp_init_attr *qp_init;
struct srpt_port *sport = ch->sport;
struct srpt_device *sdev = sport->sdev;
+ const struct ib_device_attr *attrs = &sdev->device->attrs;
u32 srp_sq_size = sport->port_attrib.srp_sq_size;
int ret;
@@ -1644,7 +1643,7 @@ retry:
*/
qp_init->cap.max_send_wr = srp_sq_size / 2;
qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
- qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
+ qp_init->cap.max_send_sge = min(attrs->max_sge, SRPT_MAX_SG_PER_WQE);
qp_init->port_num = ch->sport->port;
ch->qp = ib_create_qp(sdev->pd, qp_init);
@@ -1663,8 +1662,14 @@ retry:
atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
- pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
- __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
+ /*
+ * qp_init->cap.max_recv_sge is not relevant for SRQ so use
+ * max_send_sge instead for SRQ.
+ */
+ ch->max_send_sge = qp_init->cap.max_send_sge;
+ ch->max_recv_sge = qp_init->cap.max_send_sge;
+ pr_debug("%s: max_cqe= %d max_sge= %d w %d r sq_size = %d cm_id= %p\n",
+ __func__, ch->cq->cqe, ch->max_send_sge, ch->max_recv_sge,
qp_init->cap.max_send_wr, ch->cm_id);
ret = srpt_init_ch_qp(ch, ch->qp);
@@ -106,7 +106,7 @@ enum {
SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
SRPT_DEF_SG_TABLESIZE = 128,
- SRPT_DEF_SG_PER_WQE = 16,
+ SRPT_MAX_SG_PER_WQE = 16,
MIN_SRPT_SQ_SIZE = 16,
DEF_SRPT_SQ_SIZE = 4096,
@@ -235,6 +235,8 @@ enum rdma_ch_state {
* @cq: IB completion queue for this channel.
* @rq_size: IB receive queue size.
* @rsp_size IB response message size in bytes.
+ * @max_send_sge: Maximum SG elements per WR for send requests posted on @qp.
+ * @max_recv_sge: Maximum SG elements per WR for recv requests posted on @qp.
* @sq_wr_avail: number of work requests available in the send queue.
* @sport: pointer to the information of the HCA port used by this
* channel.
@@ -265,6 +267,8 @@ struct srpt_rdma_ch {
struct kref kref;
int rq_size;
u32 rsp_size;
+ int max_send_sge;
+ int max_recv_sge;
atomic_t sq_wr_avail;
struct srpt_port *sport;
u8 i_port_id[16];
Limit the number of SG elements per work request to what the HCA and the queue pair support. Fixes: 34693573fde0 ("IB/srpt: Reduce QP buffer size") Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Cc: <stable@vger.kernel.org> #v4.7+ Cc: Christoph Hellwig <hch@lst.de> Cc: Sagi Grimberg <sagi@grimberg.me> Cc: Nicholas Bellinger <nab@linux-iscsi.org> Cc: Parav Pandit <pandit.parav@gmail.com> Cc: Laurence Oberman <loberman@redhat.com> --- drivers/infiniband/ulp/srpt/ib_srpt.c | 17 +++++++++++------ drivers/infiniband/ulp/srpt/ib_srpt.h | 6 +++++- 2 files changed, 16 insertions(+), 7 deletions(-)