diff mbox series

[for-next,11/13] RDMA/rxe: Extend rxe_req.c to support xrc qps

Message ID 20220917031211.21272-1-rpearsonhpe@gmail.com (mailing list archive)
State Superseded
Headers show
Series Implement the xrc transport | expand

Commit Message

Bob Pearson Sept. 17, 2022, 3:12 a.m. UTC
Extend code in rxe_req.c to support xrc qp types.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_req.c | 38 +++++++++++++++++------------
 1 file changed, 22 insertions(+), 16 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index d2a9abfed596..e7bb969f97f3 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -229,7 +229,7 @@  static struct sk_buff *init_req_packet(struct rxe_qp *qp,
 {
 	struct rxe_dev		*rxe = to_rdev(qp->ibqp.device);
 	struct sk_buff		*skb;
-	struct rxe_send_wr	*ibwr = &wqe->wr;
+	struct rxe_send_wr	*wr = &wqe->wr;
 	int			pad = (-payload) & 0x3;
 	int			paylen;
 	int			solicited;
@@ -246,13 +246,13 @@  static struct sk_buff *init_req_packet(struct rxe_qp *qp,
 		return NULL;
 
 	/* init bth */
-	solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
+	solicited = (wr->send_flags & IB_SEND_SOLICITED) &&
 			(pkt->mask & RXE_LAST_MASK) &&
 			((pkt->mask & (RXE_SEND_MASK)) ||
 			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
 			(RXE_WRITE_MASK | RXE_IMMDT_MASK));
 
-	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
+	qp_num = (pkt->mask & RXE_DETH_MASK) ? wr->wr.ud.remote_qpn :
 					 qp->attr.dest_qp_num;
 
 	ack_req = ((pkt->mask & RXE_LAST_MASK) ||
@@ -264,34 +264,37 @@  static struct sk_buff *init_req_packet(struct rxe_qp *qp,
 		 ack_req, pkt->psn);
 
 	/* init optional headers */
+	if (pkt->mask & RXE_XRCETH_MASK)
+		xrceth_set_srqn(pkt, wr->srq_num);
+
 	if (pkt->mask & RXE_RETH_MASK) {
-		reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
+		reth_set_rkey(pkt, wr->wr.rdma.rkey);
 		reth_set_va(pkt, wqe->iova);
 		reth_set_len(pkt, wqe->dma.resid);
 	}
 
 	if (pkt->mask & RXE_IMMDT_MASK)
-		immdt_set_imm(pkt, ibwr->ex.imm_data);
+		immdt_set_imm(pkt, wr->ex.imm_data);
 
 	if (pkt->mask & RXE_IETH_MASK)
-		ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
+		ieth_set_rkey(pkt, wr->ex.invalidate_rkey);
 
 	if (pkt->mask & RXE_ATMETH_MASK) {
 		atmeth_set_va(pkt, wqe->iova);
-		if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
-			atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
-			atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
+		if ((opcode & IB_OPCODE_CMD) == IB_OPCODE_COMPARE_SWAP) {
+			atmeth_set_swap_add(pkt, wr->wr.atomic.swap);
+			atmeth_set_comp(pkt, wr->wr.atomic.compare_add);
 		} else {
-			atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
+			atmeth_set_swap_add(pkt, wr->wr.atomic.compare_add);
 		}
-		atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
+		atmeth_set_rkey(pkt, wr->wr.atomic.rkey);
 	}
 
 	if (pkt->mask & RXE_DETH_MASK) {
 		if (qp->ibqp.qp_num == 1)
 			deth_set_qkey(pkt, GSI_QKEY);
 		else
-			deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
+			deth_set_qkey(pkt, wr->wr.ud.remote_qkey);
 		deth_set_sqp(pkt, qp->ibqp.qp_num);
 	}
 
@@ -338,8 +341,10 @@  static void update_wqe_state(struct rxe_qp *qp,
 		struct rxe_pkt_info *pkt)
 {
 	if (pkt->mask & RXE_LAST_MASK) {
-		if (qp_type(qp) == IB_QPT_RC)
+		if (qp_type(qp) == IB_QPT_RC ||
+		    qp_type(qp) == IB_QPT_XRC_INI)
 			wqe->state = wqe_state_pending;
+		/* other qp types handled in rxe_xmit_packet() */
 	} else {
 		wqe->state = wqe_state_processing;
 	}
@@ -532,9 +537,10 @@  int rxe_requester(void *arg)
 			goto done;
 	}
 
-	if (unlikely(qp_type(qp) == IB_QPT_RC &&
-		psn_compare(qp->req.psn, (qp->comp.psn +
-				RXE_MAX_UNACKED_PSNS)) > 0)) {
+	if (unlikely((qp_type(qp) == IB_QPT_RC ||
+		      qp_type(qp) == IB_QPT_XRC_INI) &&
+		     psn_compare(qp->req.psn, (qp->comp.psn +
+					RXE_MAX_UNACKED_PSNS)) > 0)) {
 		qp->req.wait_psn = 1;
 		goto exit;
 	}