diff mbox series

[for-next,1/2] RDMA/erdma: Introduce internal post_send/post_recv for qp drain

Message ID 20220824094251.23190-2-chengyou@linux.alibaba.com (mailing list archive)
State Superseded
Headers show
Series RDMA/erdma: Introduce custom implementation of drain_sq and drain_rq | expand

Commit Message

Cheng Xu Aug. 24, 2022, 9:42 a.m. UTC
For erdma, hardware won't process newly posted send WRs or recv WRs
after QP state changed to error, and no flush cqes will generated
for them. So, internal post_send and post_recv functions are introduced
to prevent the new send WRs or recv WRs.

Signed-off-by: Cheng Xu <chengyou@linux.alibaba.com>
---
 drivers/infiniband/hw/erdma/erdma_main.c  |  4 +-
 drivers/infiniband/hw/erdma/erdma_qp.c    | 45 ++++++++++++++++++++---
 drivers/infiniband/hw/erdma/erdma_verbs.h | 17 +++++++--
 3 files changed, 55 insertions(+), 11 deletions(-)

Comments

Leon Romanovsky Aug. 24, 2022, 12:10 p.m. UTC | #1
On Wed, Aug 24, 2022 at 05:42:50PM +0800, Cheng Xu wrote:
> For erdma, hardware won't process newly posted send WRs or recv WRs
> after QP state changed to error, and no flush cqes will generated
> for them. So, internal post_send and post_recv functions are introduced
> to prevent the new send WRs or recv WRs.
> 
> Signed-off-by: Cheng Xu <chengyou@linux.alibaba.com>
> ---
>  drivers/infiniband/hw/erdma/erdma_main.c  |  4 +-
>  drivers/infiniband/hw/erdma/erdma_qp.c    | 45 ++++++++++++++++++++---
>  drivers/infiniband/hw/erdma/erdma_verbs.h | 17 +++++++--
>  3 files changed, 55 insertions(+), 11 deletions(-)
> 

Thanks,
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
diff mbox series

Patch

diff --git a/drivers/infiniband/hw/erdma/erdma_main.c b/drivers/infiniband/hw/erdma/erdma_main.c
index 07e743d24847..4921ebc1286d 100644
--- a/drivers/infiniband/hw/erdma/erdma_main.c
+++ b/drivers/infiniband/hw/erdma/erdma_main.c
@@ -460,8 +460,8 @@  static const struct ib_device_ops erdma_device_ops = {
 	.mmap = erdma_mmap,
 	.mmap_free = erdma_mmap_free,
 	.modify_qp = erdma_modify_qp,
-	.post_recv = erdma_post_recv,
-	.post_send = erdma_post_send,
+	.post_recv = erdma_post_recv_nodrain,
+	.post_send = erdma_post_send_nodrain,
 	.poll_cq = erdma_poll_cq,
 	.query_device = erdma_query_device,
 	.query_gid = erdma_query_gid,
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index bc3ec22a62c5..abf8b134d076 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -475,8 +475,8 @@  static void kick_sq_db(struct erdma_qp *qp, u16 pi)
 	writeq(db_data, qp->kern_qp.hw_sq_db);
 }
 
-int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
-		    const struct ib_send_wr **bad_send_wr)
+static int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
+			   const struct ib_send_wr **bad_send_wr, bool drain)
 {
 	struct erdma_qp *qp = to_eqp(ibqp);
 	int ret = 0;
@@ -488,6 +488,16 @@  int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
 		return -EINVAL;
 
 	spin_lock_irqsave(&qp->lock, flags);
+
+	if (unlikely(qp->flags & ERDMA_QP_FLAGS_TX_STOPPED)) {
+		*bad_send_wr = send_wr;
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (unlikely(drain))
+		qp->flags |= ERDMA_QP_FLAGS_TX_STOPPED;
+
 	sq_pi = qp->kern_qp.sq_pi;
 
 	while (wr) {
@@ -507,11 +517,19 @@  int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
 
 		wr = wr->next;
 	}
-	spin_unlock_irqrestore(&qp->lock, flags);
 
+out:
+	spin_unlock_irqrestore(&qp->lock, flags);
 	return ret;
 }
 
+int erdma_post_send_nodrain(struct ib_qp *ibqp,
+			    const struct ib_send_wr *send_wr,
+			    const struct ib_send_wr **bad_send_wr)
+{
+	return erdma_post_send(ibqp, send_wr, bad_send_wr, false);
+}
+
 static int erdma_post_recv_one(struct erdma_qp *qp,
 			       const struct ib_recv_wr *recv_wr)
 {
@@ -542,8 +560,8 @@  static int erdma_post_recv_one(struct erdma_qp *qp,
 	return 0;
 }
 
-int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
-		    const struct ib_recv_wr **bad_recv_wr)
+static int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
+			   const struct ib_recv_wr **bad_recv_wr, bool drain)
 {
 	const struct ib_recv_wr *wr = recv_wr;
 	struct erdma_qp *qp = to_eqp(ibqp);
@@ -552,6 +570,15 @@  int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
 
 	spin_lock_irqsave(&qp->lock, flags);
 
+	if (unlikely(qp->flags & ERDMA_QP_FLAGS_RX_STOPPED)) {
+		ret = -EINVAL;
+		*bad_recv_wr = recv_wr;
+		goto out;
+	}
+
+	if (unlikely(drain))
+		qp->flags |= ERDMA_QP_FLAGS_RX_STOPPED;
+
 	while (wr) {
 		ret = erdma_post_recv_one(qp, wr);
 		if (ret) {
@@ -561,6 +588,14 @@  int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
 		wr = wr->next;
 	}
 
+out:
 	spin_unlock_irqrestore(&qp->lock, flags);
 	return ret;
 }
+
+int erdma_post_recv_nodrain(struct ib_qp *ibqp,
+			    const struct ib_recv_wr *recv_wr,
+			    const struct ib_recv_wr **bad_recv_wr)
+{
+	return erdma_post_recv(ibqp, recv_wr, bad_recv_wr, false);
+}
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index c7baddb1f292..f4148fbac878 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -195,6 +195,12 @@  struct erdma_qp_attrs {
 	u8 pd_len;
 };
 
+enum erdma_qp_flags {
+	ERDMA_QP_FLAGS_DRAIN_ISSUED = (1 << 0),
+	ERDMA_QP_FLAGS_TX_STOPPED = (1 << 1),
+	ERDMA_QP_FLAGS_RX_STOPPED = (1 << 2),
+};
+
 struct erdma_qp {
 	struct ib_qp ibqp;
 	struct kref ref;
@@ -202,6 +208,7 @@  struct erdma_qp {
 	struct erdma_dev *dev;
 	struct erdma_cep *cep;
 	struct rw_semaphore state_lock;
+	unsigned long flags;
 
 	union {
 		struct erdma_kqp kern_qp;
@@ -328,10 +335,12 @@  void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
 void erdma_qp_get_ref(struct ib_qp *ibqp);
 void erdma_qp_put_ref(struct ib_qp *ibqp);
 struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
-int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
-		    const struct ib_send_wr **bad_send_wr);
-int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
-		    const struct ib_recv_wr **bad_recv_wr);
+int erdma_post_send_nodrain(struct ib_qp *ibqp,
+			    const struct ib_send_wr *send_wr,
+			    const struct ib_send_wr **bad_send_wr);
+int erdma_post_recv_nodrain(struct ib_qp *ibqp,
+			    const struct ib_recv_wr *recv_wr,
+			    const struct ib_recv_wr **bad_recv_wr);
 int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
 				u32 max_num_sg);