diff mbox series

[iwl-net,1/2] idpf: preserve IRQ affinity settings across resets

Message ID 20241109001206.213581-2-ahmed.zaki@intel.com (mailing list archive)
State Awaiting Upstream
Delegated to: Netdev Maintainers
Headers show
Series idpf: Preserve IRQ affinity and sync IRQ | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net
netdev/apply fail Patch does not apply to net-0

Commit Message

Ahmed Zaki Nov. 9, 2024, 12:12 a.m. UTC
From: Sudheer Mogilappagari <sudheer.mogilappagari@intel.com>

Currently the IRQ affinity settings are getting lost when interface
goes through a soft reset (due to MTU configuration, changing number
of queues etc). Use irq_set_affinity_notifier() callbacks to keep
the IRQ affinity info in sync between driver and kernel.

Fixes: d4d558718266 ("idpf: initialize interrupts and enable vport")
Reviewed-by: Ahmed Zaki <ahmed.zaki@intel.com>
Signed-off-by: Sudheer Mogilappagari <sudheer.mogilappagari@intel.com>
Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com>
---
 drivers/net/ethernet/intel/idpf/idpf_txrx.c | 35 +++++++++++++++++++--
 drivers/net/ethernet/intel/idpf/idpf_txrx.h |  6 +++-
 2 files changed, 38 insertions(+), 3 deletions(-)

Comments

Jakub Kicinski Nov. 12, 2024, 2:53 a.m. UTC | #1
On Fri,  8 Nov 2024 17:12:05 -0700 Ahmed Zaki wrote:
> From: Sudheer Mogilappagari <sudheer.mogilappagari@intel.com>
> 
> Currently the IRQ affinity settings are getting lost when interface
> goes through a soft reset (due to MTU configuration, changing number
> of queues etc). Use irq_set_affinity_notifier() callbacks to keep
> the IRQ affinity info in sync between driver and kernel.

Could you try doing this in the core? Store the mask in napi_struct 
if it has IRQ associated with it?

Barely any drivers get this right.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index a8989dd98272..82e0e3698f10 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3583,7 +3583,7 @@  static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
 		irq_num = adapter->msix_entries[vidx].vector;
 
 		/* clear the affinity_mask in the IRQ descriptor */
-		irq_set_affinity_hint(irq_num, NULL);
+		irq_set_affinity_notifier(irq_num, NULL);
 		kfree(free_irq(irq_num, q_vector));
 	}
 }
@@ -3723,6 +3723,33 @@  void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
 	writel(intval, q_vector->intr_reg.dyn_ctl);
 }
 
+/**
+ * idpf_irq_affinity_notify - Callback for affinity changes
+ * @notify: context as to what irq was changed
+ * @mask: the new affinity mask
+ *
+ * Callback function registered via irq_set_affinity_notifier function
+ * so that river can receive changes to the irq affinity masks.
+ */
+static void
+idpf_irq_affinity_notify(struct irq_affinity_notify *notify,
+			 const cpumask_t *mask)
+{
+	struct idpf_q_vector *q_vector =
+		container_of(notify, struct idpf_q_vector, affinity_notify);
+
+	cpumask_copy(q_vector->affinity_mask, mask);
+}
+
+/**
+ * idpf_irq_affinity_release - Callback for affinity notifier release
+ * @ref: internal core kernel usage
+ *
+ * Callback function registered via irq_set_affinity_notifier function
+ * to inform the driver that it will no longer receive notifications.
+ */
+static void idpf_irq_affinity_release(struct kref __always_unused *ref) {}
+
 /**
  * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
  * @vport: main vport structure
@@ -3730,6 +3757,7 @@  void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
 static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
 {
 	struct idpf_adapter *adapter = vport->adapter;
+	struct irq_affinity_notify *affinity_notify;
 	const char *drv_name, *if_name, *vec_name;
 	int vector, err, irq_num, vidx;
 
@@ -3763,7 +3791,10 @@  static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
 			goto free_q_irqs;
 		}
 		/* assign the mask for this irq */
-		irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
+		affinity_notify = &q_vector->affinity_notify;
+		affinity_notify->notify = idpf_irq_affinity_notify;
+		affinity_notify->release = idpf_irq_affinity_release;
+		irq_set_affinity_notifier(irq_num, affinity_notify);
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index b59aa7d8de2c..48cb58d14eff 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -379,6 +379,7 @@  struct idpf_intr_reg {
  * @rx_itr_idx: RX ITR index
  * @v_idx: Vector index
  * @affinity_mask: CPU affinity mask
+ * @affinity_notify: struct with callbacks for notification of affinity changes
  */
 struct idpf_q_vector {
 	__cacheline_group_begin_aligned(read_mostly);
@@ -416,12 +417,15 @@  struct idpf_q_vector {
 	u16 v_idx;
 
 	cpumask_var_t affinity_mask;
+	struct irq_affinity_notify affinity_notify;
 	__cacheline_group_end_aligned(cold);
 };
+
 libeth_cacheline_set_assert(struct idpf_q_vector, 112,
 			    24 + sizeof(struct napi_struct) +
 			    2 * sizeof(struct dim),
-			    8 + sizeof(cpumask_var_t));
+			    8 + sizeof(cpumask_var_t) +
+			    sizeof(struct irq_affinity_notify));
 
 struct idpf_rx_queue_stats {
 	u64_stats_t packets;