diff mbox series

[RFC,net-next,2/6] net/smc: Prepare for multiple CQs per IB devices

Message ID 20220114054852.38058-3-tonylu@linux.alibaba.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series net/smc: Spread workload over multiple cores | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 84 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Tony Lu Jan. 14, 2022, 5:48 a.m. UTC
This introduces load of completion vector helper. During setup progress
of IB device, it helps pick up the least used vector of current device.
Only one CQ and two vectors are needed, so it is no practical use right
now. This prepares for multiple CQs support.

Signed-off-by: Tony Lu <tonylu@linux.alibaba.com>
---
 net/smc/smc_ib.c | 48 ++++++++++++++++++++++++++++++++++++++++--------
 net/smc/smc_ib.h |  1 +
 2 files changed, 41 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index d1f337522bd5..9a162810ed8c 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -625,6 +625,28 @@  int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
 	return skb->len;
 }
 
+static int smc_ib_get_least_used_vector(struct smc_ib_device *smcibdev)
+{
+	int min = smcibdev->vector_load[0];
+	int i, index = 0;
+
+	/* use it from the beginning of vectors */
+	for (i = 0; i < smcibdev->ibdev->num_comp_vectors; i++) {
+		if (smcibdev->vector_load[i] < min) {
+			index = i;
+			min = smcibdev->vector_load[i];
+		}
+	}
+
+	smcibdev->vector_load[index]++;
+	return index;
+}
+
+static void smc_ib_put_vector(struct smc_ib_device *smcibdev, int index)
+{
+	smcibdev->vector_load[index]--;
+}
+
 static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
 {
 	struct smc_link *lnk = (struct smc_link *)priv;
@@ -801,8 +823,8 @@  void smc_ib_buf_unmap_sg(struct smc_link *lnk,
 
 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
 {
-	struct ib_cq_init_attr cqattr =	{
-		.cqe = SMC_MAX_CQE, .comp_vector = 0 };
+	struct ib_cq_init_attr cqattr =	{ .cqe = SMC_MAX_CQE };
+	int cq_send_vector, cq_recv_vector;
 	int cqe_size_order, smc_order;
 	long rc;
 
@@ -815,31 +837,35 @@  long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
 	smc_order = MAX_ORDER - cqe_size_order - 1;
 	if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
 		cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
+	cq_send_vector = smc_ib_get_least_used_vector(smcibdev);
+	cqattr.comp_vector = cq_send_vector;
 	smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
 					      smc_wr_tx_cq_handler, NULL,
 					      smcibdev, &cqattr);
 	rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
 	if (IS_ERR(smcibdev->roce_cq_send)) {
 		smcibdev->roce_cq_send = NULL;
-		goto out;
+		goto err_send;
 	}
-	/* spread to different completion vector */
-	if (smcibdev->ibdev->num_comp_vectors > 1)
-		cqattr.comp_vector = 1;
+	cq_recv_vector = smc_ib_get_least_used_vector(smcibdev);
+	cqattr.comp_vector = cq_recv_vector;
 	smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
 					      smc_wr_rx_cq_handler, NULL,
 					      smcibdev, &cqattr);
 	rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
 	if (IS_ERR(smcibdev->roce_cq_recv)) {
 		smcibdev->roce_cq_recv = NULL;
-		goto err;
+		goto err_recv;
 	}
 	smc_wr_add_dev(smcibdev);
 	smcibdev->initialized = 1;
 	goto out;
 
-err:
+err_recv:
+	smc_ib_put_vector(smcibdev, cq_recv_vector);
 	ib_destroy_cq(smcibdev->roce_cq_send);
+err_send:
+	smc_ib_put_vector(smcibdev, cq_send_vector);
 out:
 	mutex_unlock(&smcibdev->mutex);
 	return rc;
@@ -928,6 +954,11 @@  static int smc_ib_add_dev(struct ib_device *ibdev)
 	INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
 			      smc_ib_global_event_handler);
 	ib_register_event_handler(&smcibdev->event_handler);
+	/* vector's load per ib device */
+	smcibdev->vector_load = kcalloc(ibdev->num_comp_vectors,
+					sizeof(int), GFP_KERNEL);
+	if (!smcibdev->vector_load)
+		return -ENOMEM;
 
 	/* trigger reading of the port attributes */
 	port_cnt = smcibdev->ibdev->phys_port_cnt;
@@ -968,6 +999,7 @@  static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
 	smc_ib_cleanup_per_ibdev(smcibdev);
 	ib_unregister_event_handler(&smcibdev->event_handler);
 	cancel_work_sync(&smcibdev->port_event_work);
+	kfree(smcibdev->vector_load);
 	kfree(smcibdev);
 }
 
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 5d8b49c57f50..a748b74e56e6 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -57,6 +57,7 @@  struct smc_ib_device {				/* ib-device infos for smc */
 	atomic_t		lnk_cnt_by_port[SMC_MAX_PORTS];
 						/* number of links per port */
 	int			ndev_ifidx[SMC_MAX_PORTS]; /* ndev if indexes */
+	int			*vector_load;	/* load of all completion vectors */
 };
 
 static inline __be32 smc_ib_gid_to_ipv4(u8 gid[SMC_GID_SIZE])