@@ -477,20 +477,8 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
}
}
-static inline enum comp_state complete_ack(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt,
- struct rxe_send_wqe *wqe)
+static void comp_check_sq_drain_done(struct rxe_qp *qp)
{
- if (wqe->has_rd_atomic) {
- wqe->has_rd_atomic = 0;
- atomic_inc(&qp->req.rd_atomic);
- if (qp->req.need_rd_atomic) {
- qp->comp.timeout_retry = 0;
- qp->req.need_rd_atomic = 0;
- rxe_sched_task(&qp->req.task);
- }
- }
-
if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
/* state_lock used by requester & completer */
spin_lock_bh(&qp->state_lock);
@@ -507,10 +495,27 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
qp->ibqp.event_handler(&ev,
qp->ibqp.qp_context);
}
- } else {
- spin_unlock_bh(&qp->state_lock);
+ return;
}
+ spin_unlock_bh(&qp->state_lock);
}
+}
+
+static inline enum comp_state complete_ack(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt,
+ struct rxe_send_wqe *wqe)
+{
+ if (wqe->has_rd_atomic) {
+ wqe->has_rd_atomic = 0;
+ atomic_inc(&qp->req.rd_atomic);
+ if (qp->req.need_rd_atomic) {
+ qp->comp.timeout_retry = 0;
+ qp->req.need_rd_atomic = 0;
+ rxe_sched_task(&qp->req.task);
+ }
+ }
+
+ comp_check_sq_drain_done(qp);
do_complete(qp, wqe);
@@ -108,17 +108,12 @@ void rnr_nak_timer(struct timer_list *t)
rxe_sched_task(&qp->req.task);
}
-static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+static void req_check_sq_drain_done(struct rxe_qp *qp)
{
- struct rxe_send_wqe *wqe;
struct rxe_queue *q = qp->sq.queue;
unsigned int index = qp->req.wqe_index;
- unsigned int cons;
- unsigned int prod;
-
- wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
- cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
- prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
+ unsigned int cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ struct rxe_send_wqe *wqe = queue_addr_from_index(q, cons);
if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
/* check to see if we are drained;
@@ -126,18 +121,14 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
*/
spin_lock_bh(&qp->state_lock);
do {
- if (!qp->attr.sq_draining) {
+ if (!qp->attr.sq_draining)
/* comp just finished */
- spin_unlock_bh(&qp->state_lock);
break;
- }
if (wqe && ((index != cons) ||
- (wqe->state != wqe_state_posted))) {
+ (wqe->state != wqe_state_posted)))
/* comp not done yet */
- spin_unlock_bh(&qp->state_lock);
break;
- }
qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock);
@@ -151,9 +142,22 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
qp->ibqp.event_handler(&ev,
qp->ibqp.qp_context);
}
+ return;
} while (0);
+ spin_unlock_bh(&qp->state_lock);
}
+}
+static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
+{
+ struct rxe_send_wqe *wqe;
+ struct rxe_queue *q = qp->sq.queue;
+ unsigned int index = qp->req.wqe_index;
+ unsigned int prod;
+
+ req_check_sq_drain_done(qp);
+
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
if (index == prod)
return NULL;
Move two blocks of code in rxe_comp.c and rxe_req.c to subroutines that check if draining is complete in the SQD state and, if so, generate a SQ_DRAINED event. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_comp.c | 35 ++++++++++++++++------------ drivers/infiniband/sw/rxe/rxe_req.c | 32 ++++++++++++++----------- 2 files changed, 38 insertions(+), 29 deletions(-)