diff mbox series

[09/24] lnet: o2iblnd: Salt comp_vector

Message ID 1662429337-18737-10-git-send-email-jsimmons@infradead.org (mailing list archive)
State New, archived
Headers show
Series lustre: update to OpenSFS tree Sept 5, 2022 | expand

Commit Message

James Simmons Sept. 6, 2022, 1:55 a.m. UTC
From: Ian Ziemba <ian.ziemba@hpe.com>

If conns_per_peer is greater than 1, all the connections targeting
the same peer are assigned the same comp_vector. This results in
multiple IB CQs targeting the same peer to be serialized on a single
comp_vector.

Help spread out the IB CQ work to multiple cores by salting
comp_vector based on number of connections.

1 client to 1 server LST 1M write results with 4 conns_per_peer and
RXE configured to spread out work based on comp_vector.

Before: 1377.92 MB/s
After: 3828.48 MB/s

HPE-bug-id: LUS-11043
WC-bug-id: https://jira.whamcloud.com/browse/LU-16078
Lustre-commit: 1ef1fa06b20c424f5 ("LU-16078 o2iblnd: Salt comp_vector")
Signed-off-by: Ian Ziemba <ian.ziemba@hpe.com>
Reviewed-on: https://review.whamcloud.com/48148
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Signed-off-by: James Simmons <jsimmons@infradead.org>
---
 net/lnet/klnds/o2iblnd/o2iblnd.c | 14 +++++++++++---
 net/lnet/klnds/o2iblnd/o2iblnd.h |  2 ++
 2 files changed, 13 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/net/lnet/klnds/o2iblnd/o2iblnd.c b/net/lnet/klnds/o2iblnd/o2iblnd.c
index ea28c65..c713528 100644
--- a/net/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/net/lnet/klnds/o2iblnd/o2iblnd.c
@@ -338,6 +338,7 @@  int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
 	peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits;
 	peer_ni->ibp_queue_depth_mod = 0;	/* try to use the default */
 	kref_init(&peer_ni->ibp_kref);
+	atomic_set(&peer_ni->ibp_nconns, 0);
 
 	INIT_HLIST_NODE(&peer_ni->ibp_list);
 	INIT_LIST_HEAD(&peer_ni->ibp_conns);
@@ -569,7 +570,7 @@  static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
 	int vectors;
 	int off;
 	int i;
-	lnet_nid_t nid = conn->ibc_peer->ibp_nid;
+	lnet_nid_t ibp_nid;
 
 	vectors = conn->ibc_cmid->device->num_comp_vectors;
 	if (vectors <= 1)
@@ -579,8 +580,13 @@  static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
 	if (!mask)
 		return 0;
 
-	/* hash NID to CPU id in this partition... */
-	off = do_div(nid, cpumask_weight(*mask));
+	/* hash NID to CPU id in this partition... when targeting a single peer
+	 * with multiple QPs, to engage more cores in CQ processing to a single
+	 * peer, use ibp_nconns to salt the value the comp_vector value
+	 */
+	ibp_nid = conn->ibc_peer->ibp_nid +
+		  atomic_read(&conn->ibc_peer->ibp_nconns);
+	off = do_div(ibp_nid, cpumask_weight(*mask));
 	for_each_cpu(i, *mask) {
 		if (!off--)
 			return i % vectors;
@@ -889,6 +895,7 @@  struct kib_conn *kiblnd_create_conn(struct kib_peer_ni *peer_ni,
 	conn->ibc_state = state;
 
 	/* 1 more conn */
+	atomic_inc(&peer_ni->ibp_nconns);
 	atomic_inc(&net->ibn_nconns);
 	return conn;
 
@@ -954,6 +961,7 @@  void kiblnd_destroy_conn(struct kib_conn *conn)
 
 		kiblnd_peer_decref(peer_ni);
 		rdma_destroy_id(cmid);
+		atomic_dec(&peer_ni->ibp_nconns);
 		atomic_dec(&net->ibn_nconns);
 	}
 }
diff --git a/net/lnet/klnds/o2iblnd/o2iblnd.h b/net/lnet/klnds/o2iblnd/o2iblnd.h
index 0066e85..56d486f 100644
--- a/net/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/net/lnet/klnds/o2iblnd/o2iblnd.h
@@ -522,6 +522,8 @@  struct kib_peer_ni {
 	u16			ibp_queue_depth;
 	/* reduced value which allows conn to be created if max fails */
 	u16			ibp_queue_depth_mod;
+	/* Number of connections allocated. */
+	atomic_t		ibp_nconns;
 };
 
 extern struct kib_data kiblnd_data;