@@ -113,16 +113,16 @@ static int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
}
}
- if (mcg->num_qp >= rxe->attr.max_mcast_qp_attach) {
+ if (atomic_read(&mcg->qp_num) >= rxe->attr.max_mcast_qp_attach) {
err = -ENOMEM;
goto out;
}
- mcg->num_qp++;
+ atomic_inc(&mcg->qp_num);
new_mca->qp = qp;
atomic_inc(&qp->mcg_num);
- list_add(&new_mca->qp_list, &mcg->qp_list);
+ list_add_tail(&new_mca->qp_list, &mcg->qp_list);
err = 0;
out:
@@ -135,6 +135,7 @@ static int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
{
struct rxe_mcg *mcg;
struct rxe_mca *mca, *tmp;
+ int n;
mcg = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
if (!mcg)
@@ -145,8 +146,8 @@ static int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) {
if (mca->qp == qp) {
list_del(&mca->qp_list);
- mcg->num_qp--;
- if (mcg->num_qp <= 0)
+ n = atomic_dec_return(&mcg->qp_num);
+ if (n <= 0)
rxe_drop_ref(mcg);
atomic_dec(&qp->mcg_num);
@@ -229,6 +229,11 @@ static inline void rxe_rcv_pkt(struct sk_buff *skb)
rxe_comp_queue_pkt(RXECB(skb)->qp, skb);
}
+/* split processing of the qp list into two stages.
+ * first just make a simple linear array from the
+ * current list while holding the lock and then
+ * process each qp without holding the lock.
+ */
static void rxe_rcv_mcast_pkt(struct sk_buff *skb)
{
struct sk_buff *s;
@@ -237,7 +242,9 @@ static void rxe_rcv_mcast_pkt(struct sk_buff *skb)
struct rxe_mcg *mcg;
struct rxe_mca *mca;
struct rxe_qp *qp;
+ struct rxe_qp **qp_array;
union ib_gid dgid;
+ int n, nmax;
int err;
if (skb->protocol == htons(ETH_P_IP))
@@ -251,15 +258,31 @@ static void rxe_rcv_mcast_pkt(struct sk_buff *skb)
if (!mcg)
goto drop; /* mcast group not registered */
+ /* this is the current number of qp's attached to mcg plus a
+ * little room in case new qp's are attached. It isn't wrong
+ * to miss some qp's since it is just a matter of precisely
+ * when the packet is assumed to be received.
+ */
+ nmax = atomic_read(&mcg->qp_num) + 2;
+ qp_array = kmalloc_array(nmax, sizeof(qp), GFP_KERNEL);
+
+ n = 0;
spin_lock_bh(&mcg->mcg_lock);
+ list_for_each_entry(mca, &mcg->qp_list, qp_list) {
+ qp_array[n++] = mca->qp;
+ if (n == nmax)
+ break;
+ }
+ spin_unlock_bh(&mcg->mcg_lock);
+ nmax = n;
/* this is unreliable datagram service so we let
* failures to deliver a multicast packet to a
* single QP happen and just move on and try
* the rest of them on the list
*/
- list_for_each_entry(mca, &mcg->qp_list, qp_list) {
- qp = mca->qp;
+ for (n = 0; n < nmax; n++) {
+ qp = qp_array[n];
/* validate qp for incoming packet */
err = check_type_state(rxe, pkt, qp);
@@ -274,8 +297,8 @@ static void rxe_rcv_mcast_pkt(struct sk_buff *skb)
* skb and pass to the QP. Pass the original skb to
* the last QP in the list.
*/
- if (mca->qp_list.next != &mcg->qp_list) {
- s = skb_clone(skb, GFP_ATOMIC);
+ if (n < nmax - 1) {
+ s = skb_clone(skb, GFP_KERNEL);
if (unlikely(!s))
continue;
@@ -295,7 +318,7 @@ static void rxe_rcv_mcast_pkt(struct sk_buff *skb)
}
}
- spin_unlock_bh(&mcg->mcg_lock);
+ kfree(qp_array);
rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
@@ -356,8 +356,8 @@ struct rxe_mcg {
spinlock_t mcg_lock; /* guard group */
struct rxe_dev *rxe;
struct list_head qp_list;
+ atomic_t qp_num;
union ib_gid mgid;
- int num_qp;
u32 qkey;
u16 pkey;
};
Currently rxe_rcv_mcast_pkt performs most of its work under the mcg->mcg_lock and calls into rxe_rcv which queues the packets to the responder and completer tasklets holding the lock which is a very bad idea. This patch walks the qp_list in mcg and copies the qp addresses to a dynamically allocated array under the lock but does the rest of the work without holding the lock. The critical section is now very small. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_mcast.c | 11 +++++---- drivers/infiniband/sw/rxe/rxe_recv.c | 33 +++++++++++++++++++++++---- drivers/infiniband/sw/rxe/rxe_verbs.h | 2 +- 3 files changed, 35 insertions(+), 11 deletions(-)