diff mbox series

[for-next,6/9] RDMA/rxe: Replace some __rxe_do_task by rxe_sched_task

Message ID 20230222233237.48940-7-rpearsonhpe@gmail.com (mailing list archive)
State Superseded
Headers show
Series RDMA/rxe: Correct qp reference counting | expand

Commit Message

Bob Pearson Feb. 22, 2023, 11:32 p.m. UTC
In rxe_qp.c there are several calls to __rxe_do_task if the
qp is not RC for the completion tasklet. This is not really
correct since elsewhere in the driver rxe_run_task and
rxe_sched_task are used for the completion tasklet which
prevents reentering the completion tasklet code while
__rxe_do_task does not. It can only be safely used when the
task machinery is stopped as in rxe_qp_reset and
rxe_qp_do_cleanup.

In the latter two cases there are if statements checking
to see of the qp has a send queue which is not required.

This patch replaces calls to __rxe_do_task by rxe_sched_task
except in the two mentioned cases and removes the conditional
code in those.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_qp.c | 37 ++++++++----------------------
 1 file changed, 10 insertions(+), 27 deletions(-)
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index c954dd9394ba..544a5aa59ff7 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -473,13 +473,8 @@  static void rxe_qp_reset(struct rxe_qp *qp)
 {
 	/* stop tasks from running */
 	rxe_disable_task(&qp->resp.task);
-
-	/* stop request/comp */
-	if (qp->sq.queue) {
-		if (qp_type(qp) == IB_QPT_RC)
-			rxe_disable_task(&qp->comp.task);
-		rxe_disable_task(&qp->req.task);
-	}
+	rxe_disable_task(&qp->comp.task);
+	rxe_disable_task(&qp->req.task);
 
 	/* move qp to the reset state */
 	qp->req.state = QP_STATE_RESET;
@@ -490,12 +485,11 @@  static void rxe_qp_reset(struct rxe_qp *qp)
 	 * etc.
 	 */
 	__rxe_do_task(&qp->resp.task);
+	__rxe_do_task(&qp->comp.task);
+	__rxe_do_task(&qp->req.task);
 
-	if (qp->sq.queue) {
-		__rxe_do_task(&qp->comp.task);
-		__rxe_do_task(&qp->req.task);
+	if (qp->sq.queue)
 		rxe_queue_reset(qp->sq.queue);
-	}
 
 	/* cleanup attributes */
 	atomic_set(&qp->ssn, 0);
@@ -533,10 +527,7 @@  static void rxe_qp_drain(struct rxe_qp *qp)
 	if (qp->sq.queue) {
 		if (qp->req.state != QP_STATE_DRAINED) {
 			qp->req.state = QP_STATE_DRAIN;
-			if (qp_type(qp) == IB_QPT_RC)
-				rxe_sched_task(&qp->comp.task);
-			else
-				__rxe_do_task(&qp->comp.task);
+			rxe_sched_task(&qp->comp.task);
 			rxe_sched_task(&qp->req.task);
 		}
 	}
@@ -552,11 +543,7 @@  void rxe_qp_error(struct rxe_qp *qp)
 
 	/* drain work and packet queues */
 	rxe_sched_task(&qp->resp.task);
-
-	if (qp_type(qp) == IB_QPT_RC)
-		rxe_sched_task(&qp->comp.task);
-	else
-		__rxe_do_task(&qp->comp.task);
+	rxe_sched_task(&qp->comp.task);
 	rxe_sched_task(&qp->req.task);
 }
 
@@ -784,13 +771,9 @@  static void rxe_qp_do_cleanup(struct work_struct *work)
 	rxe_cleanup_task(&qp->comp.task);
 
 	/* flush out any receive wr's or pending requests */
-	if (qp->req.task.func)
-		__rxe_do_task(&qp->req.task);
-
-	if (qp->sq.queue) {
-		__rxe_do_task(&qp->comp.task);
-		__rxe_do_task(&qp->req.task);
-	}
+	__rxe_do_task(&qp->req.task);
+	__rxe_do_task(&qp->comp.task);
+	__rxe_do_task(&qp->req.task);
 
 	if (qp->sq.queue)
 		rxe_queue_cleanup(qp->sq.queue);