diff mbox series

[for-next,7/9] RDMA/rxe: Check for unsupported wr opcodes

Message ID 20230222233237.48940-8-rpearsonhpe@gmail.com (mailing list archive)
State Superseded
Headers show
Series RDMA/rxe: Correct qp reference counting | expand

Commit Message

Bob Pearson Feb. 22, 2023, 11:32 p.m. UTC
Currently the rxe driver does not check for unsupported work
request opcodes in posted send work requests. This patch adds
code to do this and immediately return an error if an
unsupported opocode is used.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_verbs.c | 37 ++++++++++++++++++++++-----
 1 file changed, 30 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 8dfed5f8b6b7..b70403df20ae 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -695,7 +695,7 @@  static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	return -EINVAL;
 }
 
-static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
+static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
 			 const struct ib_send_wr *ibwr)
 {
 	wr->wr_id = ibwr->wr_id;
@@ -746,11 +746,22 @@  static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
 			wr->wr.reg.key = reg_wr(ibwr)->key;
 			wr->wr.reg.access = reg_wr(ibwr)->access;
 			break;
+		case IB_WR_SEND:
+		case IB_WR_BIND_MW:
+		case IB_WR_RDMA_READ_WITH_INV:
+		case IB_WR_FLUSH:
+		case IB_WR_ATOMIC_WRITE:
+			/* nothing to do here */
+			break;
 		default:
-			WARN_ON(1);
+			rxe_err_qp(qp, "unsupported send wr opcode = %d",
+				   wr->opcode);
+			return -EOPNOTSUPP;
 			break;
 		}
 	}
+
+	return 0;
 }
 
 static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
@@ -766,19 +777,22 @@  static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
 	}
 }
 
-static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
+static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 			 unsigned int mask, unsigned int length,
 			 struct rxe_send_wqe *wqe)
 {
 	int num_sge = ibwr->num_sge;
+	int err;
 
-	init_send_wr(qp, &wqe->wr, ibwr);
+	err = init_send_wr(qp, &wqe->wr, ibwr);
+	if (err)
+		return err;
 
 	/* local operation */
 	if (unlikely(mask & WR_LOCAL_OP_MASK)) {
 		wqe->mask = mask;
 		wqe->state = wqe_state_posted;
-		return;
+		return 0;
 	}
 
 	if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
@@ -797,6 +811,8 @@  static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	wqe->dma.sge_offset	= 0;
 	wqe->state		= wqe_state_posted;
 	wqe->ssn		= atomic_add_return(1, &qp->ssn);
+
+	return 0;
 }
 
 static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
@@ -809,8 +825,10 @@  static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	int full;
 
 	err = validate_send_wr(qp, ibwr, mask, length);
-	if (err)
+	if (err) {
+		rxe_dbg_qp(qp, "malformed wr");
 		return err;
+	}
 
 	spin_lock_irqsave(&qp->sq.sq_lock, flags);
 
@@ -822,7 +840,12 @@  static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
 	}
 
 	send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP);
-	init_send_wqe(qp, ibwr, mask, length, send_wqe);
+
+	err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
+	if (err) {
+		rxe_err_qp(qp, "failed to init send wqe");
+		return err;
+	}
 
 	queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP);