diff mbox series

[for-next,v3,2/8] RDMA/rxe: Isolate code to fill request roce headers

Message ID 20230727192831.65495-3-rpearsonhpe@gmail.com (mailing list archive)
State Changes Requested
Delegated to: Jason Gunthorpe
Headers show
Series RDMA/rxe: Misc cleanups | expand

Commit Message

Bob Pearson July 27, 2023, 7:28 p.m. UTC
Isolate the code to fill in roce headers in a request packet into
a subroutine named rxe_init_roce_hdrs.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_req.c | 108 +++++++++++++++-------------
 1 file changed, 57 insertions(+), 51 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 31858761ca1e..6e9c8da001a4 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -411,86 +411,92 @@  static inline int get_mtu(struct rxe_qp *qp)
 	return rxe->port.mtu_cap;
 }
 
-static struct sk_buff *init_req_packet(struct rxe_qp *qp,
-				       struct rxe_av *av,
-				       struct rxe_send_wqe *wqe,
-				       int opcode, u32 payload,
-				       struct rxe_pkt_info *pkt)
+static void rxe_init_roce_hdrs(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
+			       struct rxe_pkt_info *pkt)
 {
-	struct rxe_dev		*rxe = to_rdev(qp->ibqp.device);
-	struct sk_buff		*skb;
-	struct rxe_send_wr	*ibwr = &wqe->wr;
-	int			solicited;
-	u32			qp_num;
-	int			ack_req;
-
-	/* length from start of bth to end of icrc */
-	pkt->pad = (-payload) & 0x3;
-	pkt->paylen = rxe_opcode[opcode].length + payload +
-			pkt->pad + RXE_ICRC_SIZE;
-
-	/* init skb */
-	skb = rxe_init_packet(rxe, av, pkt->paylen, pkt);
-	if (unlikely(!skb))
-		return NULL;
+	struct rxe_send_wr *wr = &wqe->wr;
+	int is_send;
+	int is_write_imm;
+	int is_end;
+	int solicited;
+	u32 dst_qpn;
+	u32 qkey;
+	int ack_req;
 
 	/* init bth */
-	solicited = (ibwr->send_flags & IB_SEND_SOLICITED) &&
-			(pkt->mask & RXE_END_MASK) &&
-			((pkt->mask & (RXE_SEND_MASK)) ||
-			(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
-			(RXE_WRITE_MASK | RXE_IMMDT_MASK));
-
-	qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
-					 qp->attr.dest_qp_num;
-
-	ack_req = ((pkt->mask & RXE_END_MASK) ||
-		(qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
+	is_send = pkt->mask & RXE_SEND_MASK;
+	is_write_imm = (pkt->mask & RXE_WRITE_MASK) &&
+		       (pkt->mask & RXE_IMMDT_MASK);
+	is_end = pkt->mask & RXE_END_MASK;
+	solicited = (wr->send_flags & IB_SEND_SOLICITED) && is_end &&
+		    (is_send || is_write_imm);
+	dst_qpn = (pkt->mask & RXE_DETH_MASK) ? wr->wr.ud.remote_qpn :
+					       qp->attr.dest_qp_num;
+	ack_req = is_end || (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK);
 	if (ack_req)
 		qp->req.noack_pkts = 0;
 
 	bth_init(pkt, pkt->opcode, solicited, 0, pkt->pad,
-		 IB_DEFAULT_PKEY_FULL, qp_num,
-		 ack_req, pkt->psn);
+		 IB_DEFAULT_PKEY_FULL, dst_qpn, ack_req, pkt->psn);
 
-	/* init optional headers */
+	/* init extended headers */
 	if (pkt->mask & RXE_RETH_MASK) {
 		if (pkt->mask & RXE_FETH_MASK)
-			reth_set_rkey(pkt, ibwr->wr.flush.rkey);
+			reth_set_rkey(pkt, wr->wr.flush.rkey);
 		else
-			reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
+			reth_set_rkey(pkt, wr->wr.rdma.rkey);
 		reth_set_va(pkt, wqe->iova);
 		reth_set_len(pkt, wqe->dma.resid);
 	}
 
-	/* Fill Flush Extension Transport Header */
 	if (pkt->mask & RXE_FETH_MASK)
-		feth_init(pkt, ibwr->wr.flush.type, ibwr->wr.flush.level);
+		feth_init(pkt, wr->wr.flush.type, wr->wr.flush.level);
 
 	if (pkt->mask & RXE_IMMDT_MASK)
-		immdt_set_imm(pkt, ibwr->ex.imm_data);
+		immdt_set_imm(pkt, wr->ex.imm_data);
 
 	if (pkt->mask & RXE_IETH_MASK)
-		ieth_set_rkey(pkt, ibwr->ex.invalidate_rkey);
+		ieth_set_rkey(pkt, wr->ex.invalidate_rkey);
 
 	if (pkt->mask & RXE_ATMETH_MASK) {
 		atmeth_set_va(pkt, wqe->iova);
-		if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
-			atmeth_set_swap_add(pkt, ibwr->wr.atomic.swap);
-			atmeth_set_comp(pkt, ibwr->wr.atomic.compare_add);
+		if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+			atmeth_set_swap_add(pkt, wr->wr.atomic.swap);
+			atmeth_set_comp(pkt, wr->wr.atomic.compare_add);
 		} else {
-			atmeth_set_swap_add(pkt, ibwr->wr.atomic.compare_add);
+			atmeth_set_swap_add(pkt, wr->wr.atomic.compare_add);
 		}
-		atmeth_set_rkey(pkt, ibwr->wr.atomic.rkey);
+		atmeth_set_rkey(pkt, wr->wr.atomic.rkey);
 	}
 
 	if (pkt->mask & RXE_DETH_MASK) {
-		if (qp->ibqp.qp_num == 1)
-			deth_set_qkey(pkt, GSI_QKEY);
-		else
-			deth_set_qkey(pkt, ibwr->wr.ud.remote_qkey);
-		deth_set_sqp(pkt, qp->ibqp.qp_num);
+		qkey = (qp->ibqp.qp_num == 1) ? GSI_QKEY :
+						wr->wr.ud.remote_qkey;
+		deth_set_qkey(pkt, qkey);
+		deth_set_sqp(pkt, qp_num(qp));
 	}
+}
+
+static struct sk_buff *init_req_packet(struct rxe_qp *qp,
+				       struct rxe_av *av,
+				       struct rxe_send_wqe *wqe,
+				       int opcode, u32 payload,
+				       struct rxe_pkt_info *pkt)
+{
+	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+	struct sk_buff *skb;
+
+	/* length from start of bth to end of icrc */
+	pkt->pad = (-payload) & 0x3;
+	pkt->paylen = rxe_opcode[opcode].length + payload +
+			pkt->pad + RXE_ICRC_SIZE;
+
+	/* init skb */
+	skb = rxe_init_packet(rxe, av, pkt->paylen, pkt);
+	if (unlikely(!skb))
+		return NULL;
+
+	rxe_init_roce_hdrs(qp, wqe, pkt);
 
 	return skb;
 }