@@ -350,6 +350,7 @@ struct napi_config {
u64 gro_flush_timeout;
u64 irq_suspend_timeout;
u32 defer_hard_irqs;
+ cpumask_t affinity_mask;
unsigned int napi_id;
};
@@ -393,6 +394,7 @@ struct napi_struct {
int irq;
int index;
struct napi_config *config;
+ struct irq_affinity_notify affinity_notify;
};
enum {
@@ -2666,10 +2668,30 @@ static inline void *netdev_priv(const struct net_device *dev)
void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
enum netdev_queue_type type,
struct napi_struct *napi);
+static inline void
+netif_napi_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct napi_struct *napi =
+ container_of(notify, struct napi_struct, affinity_notify);
+
+ if (napi->config)
+ cpumask_copy(&napi->config->affinity_mask, mask);
+}
+
+static inline void
+netif_napi_affinity_release(struct kref __always_unused *ref) {}
static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
{
napi->irq = irq;
+
+ if (irq > 0 && napi->config) {
+ napi->affinity_notify.notify = netif_napi_affinity_notify;
+ napi->affinity_notify.release = netif_napi_affinity_release;
+ irq_set_affinity_notifier(irq, &napi->affinity_notify);
+ irq_set_affinity(irq, &napi->config->affinity_mask);
+ }
}
/* Default NAPI poll() weight
@@ -6843,6 +6843,8 @@ void __netif_napi_del(struct napi_struct *napi)
return;
if (napi->config) {
+ if (napi->irq > 0)
+ irq_set_affinity_notifier(napi->irq, NULL);
napi->index = -1;
napi->config = NULL;
}
@@ -11184,7 +11186,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
{
struct net_device *dev;
size_t napi_config_sz;
- unsigned int maxqs;
+ unsigned int maxqs, i;
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -11280,6 +11282,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
if (!dev->napi_config)
goto free_all;
+ for (i = 0; i < maxqs; i++)
+ cpumask_copy(&dev->napi_config[i].affinity_mask,
+ cpu_online_mask);
strscpy(dev->name, name);
dev->name_assign_type = name_assign_type;
A common task for most drivers is to remember the user's CPU affinity to its IRQs. On each netdev reset, the driver must then re-assign the user's setting to the IRQs. Add CPU affinity mask to napi->config. To delegate the CPU affinity management to the core, drivers must: 1 - add a persistent napi config: netif_napi_add_config() 2 - bind an IRQ to the napi instance: netif_napi_set_irq() the core will then make sure to use re-assign affinity to the napi's IRQ. Suggested-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com> --- include/linux/netdevice.h | 22 ++++++++++++++++++++++ net/core/dev.c | 7 ++++++- 2 files changed, 28 insertions(+), 1 deletion(-)