@@ -10885,7 +10885,7 @@ static void bnxt_free_irq(struct bnxt *bp)
irq = &bp->irq_tbl[map_idx];
if (irq->requested) {
if (irq->have_cpumask) {
- irq_set_affinity_hint(irq->vector, NULL);
+ irq_update_affinity_hint(irq->vector, NULL);
free_cpumask_var(irq->cpu_mask);
irq->have_cpumask = 0;
}
@@ -10940,10 +10940,10 @@ static int bnxt_request_irq(struct bnxt *bp)
irq->have_cpumask = 1;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
irq->cpu_mask);
- rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ rc = irq_update_affinity_hint(irq->vector, irq->cpu_mask);
if (rc) {
netdev_warn(bp->dev,
- "Set affinity failed, IRQ = %d\n",
+ "Update affinity hint failed, IRQ = %d\n",
irq->vector);
break;
}
irq_set_affinity_hint() is deprecated, Use irq_update_affinity_hint() instead. This removes the side-effect of actually applying the affinity. The driver does not really need to worry about spreading its IRQs across CPUs. The core code already takes care of that. when the driver applies the affinities by itself, it breaks the users' expectations: 1. The user configures irqbalance with IRQBALANCE_BANNED_CPULIST in order to prevent IRQs from being moved to certain CPUs that run a real-time workload. 2. bnxt_en device reopening will resets the affinity in bnxt_open(). 3. bnxt_en has no idea about irqbalance's config, so it may move an IRQ to a banned CPU. The real-time workload suffers unacceptable latency. Signed-off-by: Mohammad Heib <mheib@redhat.com> --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)