diff mbox series

[net-next,1/3] nfp: support VF multi-queues configuration

Message ID 20221019140943.18851-2-simon.horman@corigine.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series nfp: support VF multi-queues configuration | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 9 maintainers not CCed: louis.peens@corigine.com bin.chen@corigine.com baowen.zheng@corigine.com niklas.soderlund@corigine.com na.wang@corigine.com fei.qin@corigine.com yu.xiao@corigine.com edumazet@google.com yinjun.zhang@corigine.com
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: line length of 99 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Simon Horman Oct. 19, 2022, 2:09 p.m. UTC
From: Diana Wang <na.wang@corigine.com>

Add VF setting multi-queue feature.
It is to configure the max queue number for each VF,
user can still modify the queue number in use by
ethtool -l <intf>

The number set of configuring queues for every vf is
{16 8 4 2 1} and total number of configuring queues
is not allowed bigger than vf queues resource.

If quantity of created VF exceeds expectation, it will
check VF number validity based on the queues not used.
The condition is that quantity of the rest queues must
not smaller than redundant VFs' number. If it meets
the condition, it will set one queue per extra VF.

If not configured(default mode), the created VFs will
divide the total vf-queues equally and it rounds down
power of 2.

Signed-off-by: Diana Wang <na.wang@corigine.com>
Signed-off-by: Simon Horman <simon.horman@corigine.com>
---
 drivers/net/ethernet/netronome/nfp/nfp_main.c |   6 ++
 drivers/net/ethernet/netronome/nfp/nfp_main.h |  13 +++
 drivers/net/ethernet/netronome/nfp/nfp_net.h  |   1 +
 .../net/ethernet/netronome/nfp/nfp_net_main.c |   3 +
 .../ethernet/netronome/nfp/nfp_net_sriov.c    | 101 ++++++++++++++++++
 .../ethernet/netronome/nfp/nfp_net_sriov.h    |   3 +
 6 files changed, 127 insertions(+)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index e66e548919d4..f0e197067e08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -29,6 +29,7 @@ 
 #include "nfp_app.h"
 #include "nfp_main.h"
 #include "nfp_net.h"
+#include "nfp_net_sriov.h"
 
 static const char nfp_driver_name[] = "nfp";
 
@@ -252,6 +253,10 @@  static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
 		return -EINVAL;
 	}
 
+	err = nfp_vf_queues_config(pf, num_vfs);
+	if (err)
+		return err;
+
 	err = pci_enable_sriov(pdev, num_vfs);
 	if (err) {
 		dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
@@ -847,6 +852,7 @@  static int nfp_pci_probe(struct pci_dev *pdev,
 	if (err)
 		goto err_fw_unload;
 
+	pf->default_config_vfs_queue = true;
 	pf->num_vfs = pci_num_vf(pdev);
 	if (pf->num_vfs > pf->limit_vfs) {
 		dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index afd3edfa2428..c24f990bcdbb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -17,6 +17,12 @@ 
 #include <linux/workqueue.h>
 #include <net/devlink.h>
 
+ /* Define how many types of max-q-number is supported to
+  * configure, currently we support 16, 8, 4, 2, 1.
+  */
+#define NFP_NET_CFG_QUEUE_TYPE		5
+#define NFP_NET_CFG_MAX_Q(type)		(1 << (NFP_NET_CFG_QUEUE_TYPE - (type) - 1))
+
 struct dentry;
 struct device;
 struct pci_dev;
@@ -63,6 +69,10 @@  struct nfp_dumpspec {
  * @irq_entries:	Array of MSI-X entries for all vNICs
  * @limit_vfs:		Number of VFs supported by firmware (~0 for PCI limit)
  * @num_vfs:		Number of SR-IOV VFs enabled
+ * @max_vf_queues:	number of queues can be allocated to VFs
+ * @config_vfs_queue:	Array to indicate VF number of each max-queue-num type
+ *                      The quantity of distributable queues is {16, 8, 4, 2, 1}
+ * @default_config_vfs_queue:	Is the method of allocating queues to VFS evenly distributed
  * @fw_loaded:		Is the firmware loaded?
  * @unload_fw_on_remove:Do we need to unload firmware on driver removal?
  * @ctrl_vnic:		Pointer to the control vNIC if available
@@ -111,6 +121,9 @@  struct nfp_pf {
 
 	unsigned int limit_vfs;
 	unsigned int num_vfs;
+	unsigned int max_vf_queues;
+	u8 config_vfs_queue[NFP_NET_CFG_QUEUE_TYPE];
+	bool default_config_vfs_queue;
 
 	bool fw_loaded;
 	bool unload_fw_on_remove;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index a101ff30a1ae..5deeae87b684 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -78,6 +78,7 @@ 
 /* Queue/Ring definitions */
 #define NFP_NET_MAX_TX_RINGS	64	/* Max. # of Tx rings per device */
 #define NFP_NET_MAX_RX_RINGS	64	/* Max. # of Rx rings per device */
+#define NFP_NET_CTRL_RINGS	1	/* Max. # of Ctrl rings per device */
 #define NFP_NET_MAX_R_VECS	(NFP_NET_MAX_TX_RINGS > NFP_NET_MAX_RX_RINGS ? \
 				 NFP_NET_MAX_TX_RINGS : NFP_NET_MAX_RX_RINGS)
 #define NFP_NET_MAX_IRQS	(NFP_NET_NON_Q_VECTORS + NFP_NET_MAX_R_VECS)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 3bae92dc899e..3c2e49813655 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -289,6 +289,7 @@  static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
 		if (err)
 			goto err_prev_deinit;
 
+		pf->max_vf_queues -= nn->max_r_vecs;
 		id++;
 	}
 
@@ -754,6 +755,8 @@  int nfp_net_pci_probe(struct nfp_pf *pf)
 		}
 	}
 
+	pf->max_vf_queues = NFP_NET_MAX_R_VECS - NFP_NET_CTRL_RINGS;
+
 	err = nfp_net_pf_app_init(pf, qc_bar, stride);
 	if (err)
 		goto err_unmap;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index 6eeeb0fda91f..eca6e65089f4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -29,6 +29,9 @@  nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool
 		return -EOPNOTSUPP;
 	}
 
+	if (cap == NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG)
+		return 0;
+
 	if (vf < 0 || vf >= app->pf->num_vfs) {
 		if (warn)
 			nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
@@ -309,3 +312,101 @@  int nfp_app_get_vf_config(struct net_device *netdev, int vf,
 
 	return 0;
 }
+
+static int nfp_set_vf_queue_config(struct nfp_pf *pf, int num_vfs)
+{
+	unsigned char config_content[sizeof(u32)] = {0};
+	unsigned int i, j, k, cfg_vf_count, offset;
+	struct nfp_net *nn;
+	u32 raw;
+	int err;
+
+	raw = 0; k = 0; cfg_vf_count = 0;
+	offset = NFP_NET_VF_CFG_MB_SZ + pf->limit_vfs * NFP_NET_VF_CFG_SZ;
+
+	for (i = 0; i < NFP_NET_CFG_QUEUE_TYPE; i++) {
+		for (j = 0; j < pf->config_vfs_queue[i]; j++) {
+			config_content[k++] = NFP_NET_CFG_MAX_Q(i);
+			cfg_vf_count++;
+			if (k == sizeof(raw) || cfg_vf_count == num_vfs) {
+				raw = config_content[0] |
+				      (config_content[1] << BITS_PER_BYTE) |
+				      (config_content[2] << (2 * BITS_PER_BYTE)) |
+				      (config_content[3] << (3 * BITS_PER_BYTE));
+				writel(raw, pf->vfcfg_tbl2 + offset);
+				offset += sizeof(raw);
+				memset(config_content, 0, sizeof(u32));
+				k = 0;
+			}
+		}
+	}
+
+	writew(NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG, pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_UPD);
+
+	nn = list_first_entry(&pf->vnics, struct nfp_net, vnic_list);
+	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VF);
+	if (err) {
+		nfp_warn(pf->cpp,
+			 "FW reconfig VF config queue failed: %d\n", err);
+		return -EINVAL;
+	}
+
+	err = readw(pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_RET);
+	if (err) {
+		nfp_warn(pf->cpp,
+			 "FW refused VF config queue update with errno: %d\n", err);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int nfp_vf_queues_config(struct nfp_pf *pf, int num_vfs)
+{
+	unsigned int i, j, cfg_num_queues = 0, cfg_num_vfs;
+
+	if (nfp_net_sriov_check(pf->app, 0, NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG, "max_queue", true))
+		return 0;
+
+	/* In default mode, the created VFs divide all the VF queues equally,
+	 * and round down to power of 2
+	 */
+	if (pf->default_config_vfs_queue) {
+		memset(pf->config_vfs_queue, 0, NFP_NET_CFG_QUEUE_TYPE);
+		j = pf->max_vf_queues / num_vfs;
+		for (i = 0; i < NFP_NET_CFG_QUEUE_TYPE; i++) {
+			if (j >= NFP_NET_CFG_MAX_Q(i)) {
+				pf->config_vfs_queue[i] = num_vfs;
+				break;
+			}
+		}
+		return nfp_set_vf_queue_config(pf, num_vfs);
+	}
+
+	for (i = 0, cfg_num_vfs = 0; i < NFP_NET_CFG_QUEUE_TYPE; i++) {
+		cfg_num_queues += NFP_NET_CFG_MAX_Q(i) * pf->config_vfs_queue[i];
+		cfg_num_vfs += pf->config_vfs_queue[i];
+	}
+
+	if (cfg_num_queues > pf->max_vf_queues) {
+		dev_warn(&pf->pdev->dev,
+			 "Number of queues from configuration is bigger than total queues number.\n");
+		return -EINVAL;
+	}
+
+	cfg_num_queues = pf->max_vf_queues - cfg_num_queues;
+
+	if (num_vfs > cfg_num_vfs) {
+		cfg_num_vfs = num_vfs - cfg_num_vfs;
+		if (cfg_num_queues < cfg_num_vfs) {
+			dev_warn(&pf->pdev->dev,
+				 "Remaining queues are not enough to be allocated.\n");
+			return -EINVAL;
+		}
+		dev_info(&pf->pdev->dev,
+			 "The extra created VFs are allocated with single queue.\n");
+		pf->config_vfs_queue[NFP_NET_CFG_QUEUE_TYPE - 1] += cfg_num_vfs;
+	}
+
+	return nfp_set_vf_queue_config(pf, num_vfs);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index 2d445fa199dc..36df29fdaf0e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -21,6 +21,7 @@ 
 #define   NFP_NET_VF_CFG_MB_CAP_TRUST			  (0x1 << 4)
 #define   NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO		  (0x1 << 5)
 #define   NFP_NET_VF_CFG_MB_CAP_RATE			  (0x1 << 6)
+#define   NFP_NET_VF_CFG_MB_CAP_QUEUE_CONFIG		  (0x1 << 7)
 #define NFP_NET_VF_CFG_MB_RET				0x2
 #define NFP_NET_VF_CFG_MB_UPD				0x4
 #define   NFP_NET_VF_CFG_MB_UPD_MAC			  (0x1 << 0)
@@ -30,6 +31,7 @@ 
 #define   NFP_NET_VF_CFG_MB_UPD_TRUST			  (0x1 << 4)
 #define   NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO		  (0x1 << 5)
 #define   NFP_NET_VF_CFG_MB_UPD_RATE			  (0x1 << 6)
+#define   NFP_NET_VF_CFG_MB_UPD_QUEUE_CONFIG		  (0x1 << 7)
 #define NFP_NET_VF_CFG_MB_VF_NUM			0x7
 
 /* VF config entry
@@ -67,5 +69,6 @@  int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
 			      int link_state);
 int nfp_app_get_vf_config(struct net_device *netdev, int vf,
 			  struct ifla_vf_info *ivi);
+int nfp_vf_queues_config(struct nfp_pf *pf, int num_vfs);
 
 #endif /* _NFP_NET_SRIOV_H_ */