@@ -108,7 +108,9 @@ struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
return NULL;
if (rxe_qp_type(pkt->qp) == IB_QPT_RC ||
- rxe_qp_type(pkt->qp) == IB_QPT_UC)
+ rxe_qp_type(pkt->qp) == IB_QPT_UC ||
+ rxe_qp_type(pkt->qp) == IB_QPT_XRC_INI ||
+ rxe_qp_type(pkt->qp) == IB_QPT_XRC_TGT)
return &pkt->qp->pri_av;
if (!pkt->wqe)
@@ -114,7 +114,9 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev,
{
struct dst_entry *dst = NULL;
- if (rxe_qp_type(qp) == IB_QPT_RC)
+ if (rxe_qp_type(qp) == IB_QPT_RC ||
+ rxe_qp_type(qp) == IB_QPT_XRC_INI ||
+ rxe_qp_type(qp) == IB_QPT_XRC_TGT)
dst = sk_dst_get(qp->sk->sk);
if (!dst || !dst_check(dst, qp->dst_cookie)) {
@@ -142,7 +144,9 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev,
#endif
}
- if (dst && (rxe_qp_type(qp) == IB_QPT_RC)) {
+ if (dst && (rxe_qp_type(qp) == IB_QPT_RC ||
+ rxe_qp_type(qp) == IB_QPT_XRC_INI ||
+ rxe_qp_type(qp) == IB_QPT_XRC_TGT)) {
dst_hold(dst);
sk_dst_set(qp->sk->sk, dst);
}
@@ -459,8 +463,10 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
return err;
}
- if ((rxe_qp_type(qp) != IB_QPT_RC) &&
- (pkt->mask & RXE_END_MASK)) {
+ if ((rxe_qp_type(qp) != IB_QPT_RC &&
+ rxe_qp_type(qp) != IB_QPT_XRC_INI &&
+ rxe_qp_type(qp) != IB_QPT_XRC_TGT) &&
+ pkt->mask & RXE_END_MASK) {
pkt->wqe->state = wqe_state_done;
rxe_run_task(&qp->comp.task, 1);
}
@@ -423,7 +423,9 @@ static inline int get_mtu(struct rxe_qp *qp)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- if ((rxe_qp_type(qp) == IB_QPT_RC) || (rxe_qp_type(qp) == IB_QPT_UC))
+ if (rxe_qp_type(qp) == IB_QPT_RC ||
+ rxe_qp_type(qp) == IB_QPT_UC ||
+ rxe_qp_type(qp) == IB_QPT_XRC_INI)
return qp->mtu;
return rxe->port.mtu_cap;
@@ -487,6 +489,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
ack_req, pkt->psn);
/* init optional headers */
+ if (pkt->mask & RXE_XRCETH_MASK)
+ xrceth_set_xrcsrq(pkt, ibwr->wr.xrc.srq_num);
+
if (pkt->mask & RXE_RETH_MASK) {
reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
reth_set_va(pkt, wqe->iova);
@@ -562,7 +567,8 @@ static void update_wqe_state(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
if (pkt->mask & RXE_END_MASK) {
- if (rxe_qp_type(qp) == IB_QPT_RC)
+ if (rxe_qp_type(qp) == IB_QPT_RC ||
+ rxe_qp_type(qp) == IB_QPT_XRC_INI)
wqe->state = wqe_state_pending;
} else {
wqe->state = wqe_state_processing;
@@ -730,7 +736,8 @@ int rxe_requester(void *arg)
goto next_wqe;
}
- if (unlikely(rxe_qp_type(qp) == IB_QPT_RC &&
+ if (unlikely((rxe_qp_type(qp) == IB_QPT_RC ||
+ rxe_qp_type(qp) == IB_QPT_XRC_INI) &&
psn_compare(qp->req.psn, (qp->comp.psn +
RXE_MAX_UNACKED_PSNS)) > 0)) {
qp->req.wait_psn = 1;
Extend the rxe driver to support sending XRC packets. This patch - Expands lists of QP types to include IB_XRC_INIT and IB_XRC_TGT as appropriate. - Fills in XRCETH header in XRC packets Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_av.c | 4 +++- drivers/infiniband/sw/rxe/rxe_net.c | 14 ++++++++++---- drivers/infiniband/sw/rxe/rxe_req.c | 13 ++++++++++--- 3 files changed, 23 insertions(+), 8 deletions(-)