diff mbox series

[for-next,v3,06/10] RDMA/rxe: Extend rxe_init_req_packet() for frags

Message ID 20230727200128.65947-7-rpearsonhpe@gmail.com (mailing list archive)
State Changes Requested
Delegated to: Jason Gunthorpe
Headers show
Series RDMA/rxe: Implement support for nonlinear packets | expand

Commit Message

Bob Pearson July 27, 2023, 8:01 p.m. UTC
Add code to rxe_build_req_packet() to allocate space for the
pad and icrc if the skb is fragmented.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_loc.h    |  5 ++
 drivers/infiniband/sw/rxe/rxe_mr.c     |  5 +-
 drivers/infiniband/sw/rxe/rxe_opcode.c |  2 +
 drivers/infiniband/sw/rxe/rxe_req.c    | 83 ++++++++++++++++++++++----
 4 files changed, 84 insertions(+), 11 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 96b1fb79610a..40624de62288 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -177,7 +177,12 @@  void rxe_srq_cleanup(struct rxe_pool_elem *elem);
 void rxe_dealloc(struct ib_device *ib_dev);
 
 int rxe_completer(struct rxe_qp *qp);
+
+/* rxe_req.c */
+int rxe_prepare_pad_icrc(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+			 int payload, bool frag);
 int rxe_requester(struct rxe_qp *qp);
+
 int rxe_responder(struct rxe_qp *qp);
 
 /* rxe_icrc.c */
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 0ac71238599a..5178775f2d4e 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -263,7 +263,10 @@  int rxe_add_frag(struct sk_buff *skb, struct rxe_mr *mr, struct page *page,
 	skb_frag_t *frag = &skb_shinfo(skb)->frags[nr_frags];
 
 	if (nr_frags >= MAX_SKB_FRAGS) {
-		rxe_dbg_mr(mr, "ran out of frags");
+		if (mr)
+			rxe_dbg_mr(mr, "ran out of frags");
+		else
+			rxe_dbg("ran out of frags");
 		return -EINVAL;
 	}
 
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index f358b732a751..a72e5fd4f571 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -399,6 +399,8 @@  struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
 			[RXE_BTH]	= 0,
 			[RXE_FETH]	= RXE_BTH_BYTES,
 			[RXE_RETH]	= RXE_BTH_BYTES + RXE_FETH_BYTES,
+			[RXE_PAYLOAD]	= RXE_BTH_BYTES + RXE_FETH_BYTES +
+					  RXE_RETH_BYTES,
 		}
 	},
 	[IB_OPCODE_RC_ATOMIC_WRITE]                        = {
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 491360fef346..cf34d1a58f85 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -316,26 +316,83 @@  static void rxe_init_roce_hdrs(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 
 static int rxe_init_payload(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
 			    struct rxe_pkt_info *pkt, u32 payload,
-			    struct sk_buff *skb)
+			    struct sk_buff *skb, bool frag)
 {
+	int len = skb_tailroom(skb);
+	int tot_len = payload + pkt->pad + RXE_ICRC_SIZE;
+	int access = 0;
 	int skb_offset = 0;
+	int op;
+	void *addr;
 	void *data;
 	int err = 0;
 
 	if (wqe->wr.send_flags & IB_SEND_INLINE) {
+		if (WARN_ON(frag)) {
+			rxe_err_qp(qp, "inline data for fragmented skb not supported");
+			return -EINVAL;
+		}
+		if (len < tot_len) {
+			rxe_err_qp(qp, "skb too small");
+			return -EINVAL;
+		}
 		data = &wqe->dma.inline_data[wqe->dma.sge_offset];
 		memcpy(payload_addr(pkt), data, payload);
 		wqe->dma.resid -= payload;
 		wqe->dma.sge_offset += payload;
 	} else {
-		err = rxe_copy_dma_data(skb, qp->pd, 0, &wqe->dma,
-					payload_addr(pkt), skb_offset,
-					payload, RXE_COPY_FROM_MR);
+		op = frag ? RXE_FRAG_FROM_MR : RXE_COPY_FROM_MR;
+		addr = frag ? NULL : payload_addr(pkt);
+		err = rxe_copy_dma_data(skb, qp->pd, access, &wqe->dma,
+					addr, skb_offset, payload, op);
 	}
 
 	return err;
 }
 
+/**
+ * rxe_prepare_pad_icrc() - Alloc space if fragmented and init pad and icrc
+ * @pkt: packet info
+ * @skb: packet buffer
+ * @payload: roce payload
+ * @frag: true if skb is fragmented
+ *
+ * Returns: 0 on success else an error
+ */
+int rxe_prepare_pad_icrc(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+			 int payload, bool frag)
+{
+	unsigned int length = RXE_ICRC_SIZE + pkt->pad;
+	unsigned int offset;
+	struct page *page;
+	u64 iova;
+	u8 *addr;
+
+	if (frag) {
+		addr = skb_end_pointer(skb) - length;
+		iova = (uintptr_t)addr;
+		page = virt_to_page(iova);
+		offset = iova & (PAGE_SIZE - 1);
+
+		/* make sure we have enough room and frag
+		 * doesn't cross page boundary should never
+		 * happen
+		 */
+		if (WARN_ON(((skb->end - skb->tail) <= length) ||
+			((offset + length) > PAGE_SIZE)))
+			return -ENOMEM;
+
+		memset(addr, 0, length);
+
+		return rxe_add_frag(skb, NULL, page, length, offset);
+	}
+
+	addr = payload_addr(pkt) + payload;
+	memset(addr, 0, length);
+
+	return 0;
+}
+
 static struct sk_buff *rxe_init_req_packet(struct rxe_qp *qp,
 					   struct rxe_send_wqe *wqe,
 					   int opcode, u32 payload,
@@ -345,7 +402,7 @@  static struct sk_buff *rxe_init_req_packet(struct rxe_qp *qp,
 	struct sk_buff *skb = NULL;
 	struct rxe_av *av;
 	struct rxe_ah *ah = NULL;
-	u8 *pad_addr;
+	bool frag = false;
 	int err;
 
 	pkt->rxe = rxe;
@@ -380,9 +437,13 @@  static struct sk_buff *rxe_init_req_packet(struct rxe_qp *qp,
 
 	/* init payload if any */
 	if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
-		err = rxe_init_payload(qp, wqe, pkt, payload, skb);
-		if (unlikely(err))
+		err = rxe_init_payload(qp, wqe, pkt, payload,
+				       skb, frag);
+		if (unlikely(err)) {
+			rxe_dbg_qp(qp, "rxe_init_payload failed, err = %d",
+				   err);
 			goto err_out;
+		}
 	} else if (pkt->mask & RXE_FLUSH_MASK) {
 		/* oA19-2: shall have no payload. */
 		wqe->dma.resid = 0;
@@ -394,9 +455,11 @@  static struct sk_buff *rxe_init_req_packet(struct rxe_qp *qp,
 	}
 
 	/* init pad and icrc */
-	if (pkt->pad) {
-		pad_addr = payload_addr(pkt) + payload;
-		memset(pad_addr, 0, pkt->pad);
+	err = rxe_prepare_pad_icrc(pkt, skb, payload, frag);
+	if (unlikely(err)) {
+		rxe_dbg_qp(qp, "rxe_prepare_pad_icrc failed, err = %d",
+			   err);
+		goto err_out;
 	}
 
 	/* init IP and UDP network headers */