diff mbox

[rdma-core,2/2] cxgb4: Atomically flush per QP HW CQEs

Message ID 20180518073617.26404-3-rajur@chelsio.com (mailing list archive)
State Accepted
Delegated to: Jason Gunthorpe
Headers show

Commit Message

Raju Rangoju May 18, 2018, 7:36 a.m. UTC
From: Potnuri Bharat Teja <bharat@chelsio.com>

When a CQ is shared by multiple QPs, c4iw_flush_hw_cq() needs to acquire
corresponding QP lock before moving the CQEs into its corresponding SW queue
and accessing the SQ contents for completing a WR.
Ignore CQEs if the corresponding QP is already flushed.

Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Raju Rangoju <rajur@chelsio.com>
---
 providers/cxgb4/cq.c       | 20 +++++++++++++++++++-
 providers/cxgb4/libcxgb4.h |  2 +-
 providers/cxgb4/qp.c       |  4 ++--
 3 files changed, 22 insertions(+), 4 deletions(-)
diff mbox

Patch

diff --git a/providers/cxgb4/cq.c b/providers/cxgb4/cq.c
index be6cf2f2..478c596a 100644
--- a/providers/cxgb4/cq.c
+++ b/providers/cxgb4/cq.c
@@ -196,7 +196,7 @@  static void advance_oldest_read(struct t4_wq *wq)
  * Deal with out-of-order and/or completions that complete
  * prior unsignalled WRs.
  */
-void c4iw_flush_hw_cq(struct c4iw_cq *chp)
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
 {
 	struct t4_cqe *hw_cqe, *swcqe, read_cqe;
 	struct c4iw_qp *qhp;
@@ -220,6 +220,14 @@  void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 		if (qhp == NULL)
 			goto next_cqe;
 
+		if (flush_qhp != qhp) {
+			pthread_spin_lock(&qhp->lock);
+
+			if (qhp->wq.flushed == 1) {
+				goto next_cqe;
+			}
+		}
+
 		if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
 			goto next_cqe;
 
@@ -279,6 +287,8 @@  void c4iw_flush_hw_cq(struct c4iw_cq *chp)
 next_cqe:
 		t4_hwcq_consume(&chp->cq);
 		ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
+		if (qhp && flush_qhp != qhp)
+			pthread_spin_unlock(&qhp->lock);
 	}
 }
 
@@ -372,6 +382,14 @@  static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
 	}
 
 	/*
+	 * skip HW cqe's if wq is already flushed.
+	 */
+	if (wq->flushed && !SW_CQE(hw_cqe)) {
+		ret = -EAGAIN;
+		goto skip_cqe;
+	}
+
+	/*
 	 * Gotta tweak READ completions:
 	 *	1) the cqe doesn't contain the sq_wptr from the wr.
 	 *	2) opcode not reflected from the wr.
diff --git a/providers/cxgb4/libcxgb4.h b/providers/cxgb4/libcxgb4.h
index 893bd85d..8eda822e 100644
--- a/providers/cxgb4/libcxgb4.h
+++ b/providers/cxgb4/libcxgb4.h
@@ -225,7 +225,7 @@  int c4iw_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
 int c4iw_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid,
 			     uint16_t lid);
 void c4iw_async_event(struct ibv_async_event *event);
-void c4iw_flush_hw_cq(struct c4iw_cq *chp);
+void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
 void c4iw_flush_sq(struct c4iw_qp *qhp);
 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
diff --git a/providers/cxgb4/qp.c b/providers/cxgb4/qp.c
index 46806341..5d90510c 100644
--- a/providers/cxgb4/qp.c
+++ b/providers/cxgb4/qp.c
@@ -517,12 +517,12 @@  void c4iw_flush_qp(struct c4iw_qp *qhp)
 
 	update_qp_state(qhp);
 
-	c4iw_flush_hw_cq(rchp);
+	c4iw_flush_hw_cq(rchp, qhp);
 	c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
 	c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
 
 	if (schp != rchp)
-		c4iw_flush_hw_cq(schp);
+		c4iw_flush_hw_cq(schp, qhp);
 
 	c4iw_flush_sq(qhp);