diff mbox series

[v2,net-next,4/5] stmmac: intel: add support for multi-vector msi and msi-x

Message ID 20210325173916.13203-5-weifeng.voon@intel.com (mailing list archive)
State New, archived
Headers show
Series net: stmmac: enable multi-vector MSI | expand

Commit Message

Voon, Weifeng March 25, 2021, 5:39 p.m. UTC
From: Ong Boon Leong <boon.leong.ong@intel.com>

Intel mgbe controller supports multi-vector interrupts:
msi_rx_vec	0,2,4,6,8,10,12,14
msi_tx_vec	1,3,5,7,9,11,13,15
msi_sfty_ue_vec	26
msi_sfty_ce_vec	27
msi_lpi_vec	28
msi_mac_vec	29

During probe(), the driver will starts with request allocation for
multi-vector interrupts. If it fails, then it will automatically fallback
to request allocation for single interrupts.

Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com>
Co-developed-by: Voon Weifeng <weifeng.voon@intel.com>
Signed-off-by: Voon Weifeng <weifeng.voon@intel.com>
---
Changes:
v1 -> v2
 - Moved the msi tx/rx base vector check before alloc irq
 - Restuctured the clean up code after fail to alloc irq and fail to probe
 - Unprepared and unregistered the stmmac-clk if fail to alloc irq
---
 .../net/ethernet/stmicro/stmmac/dwmac-intel.c | 120 ++++++++++++++++--
 1 file changed, 111 insertions(+), 9 deletions(-)

Comments

Ong Boon Leong March 26, 2021, 9:07 a.m. UTC | #1
>+static int stmmac_config_multi_msi(struct pci_dev *pdev,
>+				   struct plat_stmmacenet_data *plat,
>+				   struct stmmac_resources *res)
>+{
For optimum RX & TX queue processing on the same IRQ, we should use
irq_set_affinity_hint() to set those RXQ and TXQ IRQ to the same CPU.
This will benefit processing for up-coming XDP TX and XDP TX ZC processing.

cpumask_t cpu_mask;

>+	int ret;
>+	int i;
>+
>+	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
>+	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
>+		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
>+			 __func__);
>+		return -1;
>+	}
>+
>+	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
>+				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
>+	if (ret < 0) {
>+		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
>+			 __func__);
>+		return ret;
>+	}
>+
>+	/* For RX MSI */
>+	for (i = 0; i < plat->rx_queues_to_use; i++) {
>+		res->rx_irq[i] = pci_irq_vector(pdev,
>+						plat->msi_rx_base_vec + i * 2);

		cpumask_clear(&cpu_mask);
		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
		irq_set_affinity_hint(res->rx_irq[i], &cpu_mask);

>+	}
>+
>+	/* For TX MSI */
>+	for (i = 0; i < plat->tx_queues_to_use; i++) {
>+		res->tx_irq[i] = pci_irq_vector(pdev,
>+						plat->msi_tx_base_vec + i * 2);

		cpumask_clear(&cpu_mask);
		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
		irq_set_affinity_hint(res->tx_irq[i], &cpu_mask);

>+	}
>+
>+	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
>+		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
>+	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
>+		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
>+	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
>+		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
>+	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
>+		res->sfty_ce_irq = pci_irq_vector(pdev, plat-
>>msi_sfty_ce_vec);
>+	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
>+		res->sfty_ue_irq = pci_irq_vector(pdev, plat-
>>msi_sfty_ue_vec);
>+
>+	plat->multi_msi_en = 1;
>+	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n",
>__func__);
>+
>+	return 0;
>+}
diff mbox series

Patch

diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 992294d25706..08b4852eed4c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -492,6 +492,14 @@  static int intel_mgbe_common_data(struct pci_dev *pdev,
 	plat->has_crossts = true;
 	plat->crosststamp = intel_crosststamp;
 
+	/* Setup MSI vector offset specific to Intel mGbE controller */
+	plat->msi_mac_vec = 29;
+	plat->msi_lpi_vec = 28;
+	plat->msi_sfty_ce_vec = 27;
+	plat->msi_sfty_ue_vec = 26;
+	plat->msi_rx_base_vec = 0;
+	plat->msi_tx_base_vec = 1;
+
 	return 0;
 }
 
@@ -776,6 +784,79 @@  static const struct stmmac_pci_info quark_info = {
 	.setup = quark_default_data,
 };
 
+static int stmmac_config_single_msi(struct pci_dev *pdev,
+				    struct plat_stmmacenet_data *plat,
+				    struct stmmac_resources *res)
+{
+	int ret;
+
+	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+	if (ret < 0) {
+		dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
+			 __func__);
+		return ret;
+	}
+
+	res->irq = pci_irq_vector(pdev, 0);
+	res->wol_irq = res->irq;
+	plat->multi_msi_en = 0;
+	dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
+		 __func__);
+
+	return 0;
+}
+
+static int stmmac_config_multi_msi(struct pci_dev *pdev,
+				   struct plat_stmmacenet_data *plat,
+				   struct stmmac_resources *res)
+{
+	int ret;
+	int i;
+
+	if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
+	    plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
+		dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
+			 __func__);
+		return -1;
+	}
+
+	ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
+				    PCI_IRQ_MSI | PCI_IRQ_MSIX);
+	if (ret < 0) {
+		dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
+			 __func__);
+		return ret;
+	}
+
+	/* For RX MSI */
+	for (i = 0; i < plat->rx_queues_to_use; i++) {
+		res->rx_irq[i] = pci_irq_vector(pdev,
+						plat->msi_rx_base_vec + i * 2);
+	}
+
+	/* For TX MSI */
+	for (i = 0; i < plat->tx_queues_to_use; i++) {
+		res->tx_irq[i] = pci_irq_vector(pdev,
+						plat->msi_tx_base_vec + i * 2);
+	}
+
+	if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
+		res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
+	if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
+		res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
+	if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
+		res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
+	if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
+		res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
+	if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
+		res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
+
+	plat->multi_msi_en = 1;
+	dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
+
+	return 0;
+}
+
 /**
  * intel_eth_pci_probe
  *
@@ -833,18 +914,24 @@  static int intel_eth_pci_probe(struct pci_dev *pdev,
 	plat->bsp_priv = intel_priv;
 	intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
 
+	/* Initialize all MSI vectors to invalid so that it can be set
+	 * according to platform data settings below.
+	 * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
+	 */
+	plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
+	plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
+	plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
+	plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
+	plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
+	plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
+	plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
+
 	ret = info->setup(pdev, plat);
 	if (ret)
 		return ret;
 
-	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
-	if (ret < 0)
-		return ret;
-
 	memset(&res, 0, sizeof(res));
 	res.addr = pcim_iomap_table(pdev)[0];
-	res.wol_irq = pci_irq_vector(pdev, 0);
-	res.irq = pci_irq_vector(pdev, 0);
 
 	if (plat->eee_usecs_rate > 0) {
 		u32 tx_lpi_usec;
@@ -853,13 +940,28 @@  static int intel_eth_pci_probe(struct pci_dev *pdev,
 		writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
 	}
 
+	ret = stmmac_config_multi_msi(pdev, plat, &res);
+	if (ret) {
+		ret = stmmac_config_single_msi(pdev, plat, &res);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
+				__func__);
+			goto err_alloc_irq;
+		}
+	}
+
 	ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
 	if (ret) {
-		pci_free_irq_vectors(pdev);
-		clk_disable_unprepare(plat->stmmac_clk);
-		clk_unregister_fixed_rate(plat->stmmac_clk);
+		goto err_dvr_probe;
 	}
 
+	return 0;
+
+err_dvr_probe:
+	pci_free_irq_vectors(pdev);
+err_alloc_irq:
+	clk_disable_unprepare(plat->stmmac_clk);
+	clk_unregister_fixed_rate(plat->stmmac_clk);
 	return ret;
 }