@@ -3005,15 +3005,19 @@ static void stmmac_free_irq(struct net_device *dev,
fallthrough;
case REQ_IRQ_ERR_TX:
for (j = irq_idx - 1; j >= 0; j--) {
- if (priv->tx_irq[j] > 0)
+ if (priv->tx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->tx_irq[j], NULL);
free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
+ }
}
irq_idx = priv->plat->rx_queues_to_use;
fallthrough;
case REQ_IRQ_ERR_RX:
for (j = irq_idx - 1; j >= 0; j--) {
- if (priv->rx_irq[j] > 0)
+ if (priv->rx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->rx_irq[j], NULL);
free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
+ }
}
if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
@@ -3045,6 +3049,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
struct stmmac_priv *priv = netdev_priv(dev);
+ cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
int ret;
@@ -3153,6 +3158,9 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
}
/* Request Tx MSI irq */
@@ -3173,6 +3181,9 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
irq_idx = i;
goto irq_error;
}
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
}
return 0;
Certain platform likes Intel mGBE has independent hardware IRQ resources for TX and RX DMA operation. In preparation to support XDP TX, we add IRQ affinity hint to group both RX and TX queue of the same queue ID to the same CPU. Changes in v2: - IRQ affinity hint need to set to null before IRQ is released. Thanks to issue reported by Song, Yoong Siang. Reported-by: Song, Yoong Siang <yoong.siang.song@intel.com> Signed-off-by: Ong Boon Leong <boon.leong.ong@intel.com> --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-)