diff mbox series

[V3,09/10] bnxt_en: Add TPH support in BNXT driver

Message ID 20240717205511.2541693-10-wei.huang2@amd.com (mailing list archive)
State Superseded
Delegated to: Bjorn Helgaas
Headers show
Series PCIe TPH and cache direct injection support | expand

Commit Message

Wei Huang July 17, 2024, 8:55 p.m. UTC
From: Manoj Panicker <manoj.panicker2@amd.com>

Implement TPH support in Broadcom BNXT device driver by invoking
pcie_tph_set_st() function when interrupt affinity is changed.

Signed-off-by: Manoj Panicker <manoj.panicker2@amd.com>
Reviewed-by: Wei Huang <wei.huang2@amd.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Andy Gospodarek <andrew.gospodarek@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 54 +++++++++++++++++++++++
 drivers/net/ethernet/broadcom/bnxt/bnxt.h |  4 ++
 2 files changed, 58 insertions(+)

Comments

Bjorn Helgaas July 23, 2024, 4:48 p.m. UTC | #1
On Wed, Jul 17, 2024 at 03:55:10PM -0500, Wei Huang wrote:
> From: Manoj Panicker <manoj.panicker2@amd.com>
> 
> Implement TPH support in Broadcom BNXT device driver by invoking
> pcie_tph_set_st() function when interrupt affinity is changed.

*and* invoking pcie_tph_set_st() when setting up the IRQ in the first
place, I guess?

I guess this gives a significant performance benefit?  The series
includes "pci=nostmode" so the benefit can be quantified, so now I'm
curious about what you measured :)

> +static void bnxt_rtnl_lock_sp(struct bnxt *bp);
> +static void bnxt_rtnl_unlock_sp(struct bnxt *bp);

These duplicate declarations can't be right, can they?  OK for
work-in-progress, but it doesn't look like the final solution.

> +static void __bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
> +				       const cpumask_t *mask)
> +{
> +	struct bnxt_irq *irq;
> +
> +	irq = container_of(notify, struct bnxt_irq, affinity_notify);
> +	cpumask_copy(irq->cpu_mask, mask);
> +
> +	if (!pcie_tph_set_st(irq->bp->pdev, irq->msix_nr,
> +			     cpumask_first(irq->cpu_mask),
> +			     TPH_MEM_TYPE_VM, PCI_TPH_REQ_TPH_ONLY))
> +		netdev_dbg(irq->bp->dev, "error in setting steering tag\n");
> +
> +	if (netif_running(irq->bp->dev)) {
> +		rtnl_lock();
> +		bnxt_close_nic(irq->bp, false, false);
> +		bnxt_open_nic(irq->bp, false, false);
> +		rtnl_unlock();
> +	}
> +}
Wei Huang Aug. 2, 2024, 5:44 a.m. UTC | #2
On 7/23/24 11:48, Bjorn Helgaas wrote:
> On Wed, Jul 17, 2024 at 03:55:10PM -0500, Wei Huang wrote:
>> From: Manoj Panicker <manoj.panicker2@amd.com>
>>
>> Implement TPH support in Broadcom BNXT device driver by invoking
>> pcie_tph_set_st() function when interrupt affinity is changed.
> 
> *and* invoking pcie_tph_set_st() when setting up the IRQ in the first
> place, I guess?
> 
> I guess this gives a significant performance benefit?  The series
> includes "pci=nostmode" so the benefit can be quantified, so now I'm
> curious about what you measured :)

Using network benchmarks, three main metrics were measured: network 
latency, network bandwidth, and memory bandwidth saving.

> 
>> +static void bnxt_rtnl_lock_sp(struct bnxt *bp);
>> +static void bnxt_rtnl_unlock_sp(struct bnxt *bp);
> 
> These duplicate declarations can't be right, can they?  OK for
> work-in-progress, but it doesn't look like the final solution.

Will fix.
Bjorn Helgaas Aug. 2, 2024, 5 p.m. UTC | #3
On Fri, Aug 02, 2024 at 12:44:07AM -0500, Wei Huang wrote:
> 
> 
> On 7/23/24 11:48, Bjorn Helgaas wrote:
> > On Wed, Jul 17, 2024 at 03:55:10PM -0500, Wei Huang wrote:
> > > From: Manoj Panicker <manoj.panicker2@amd.com>
> > > 
> > > Implement TPH support in Broadcom BNXT device driver by invoking
> > > pcie_tph_set_st() function when interrupt affinity is changed.
> > 
> > *and* invoking pcie_tph_set_st() when setting up the IRQ in the first
> > place, I guess?
> > 
> > I guess this gives a significant performance benefit?  The series
> > includes "pci=nostmode" so the benefit can be quantified, so now I'm
> > curious about what you measured :)
> 
> Using network benchmarks, three main metrics were measured: network latency,
> network bandwidth, and memory bandwidth saving.

I was hoping you could share actual percentage improvements to justify
the new code.  If there's no improvement, obviously there would be no
point in adding the code.  If there's significant improvement, it will
encourage using this in other drivers, which will improve the code and
testing for everybody.

Bjorn
diff mbox series

Patch

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c437ca1c0fd3..2207dac8ce18 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -55,6 +55,7 @@ 
 #include <net/page_pool/helpers.h>
 #include <linux/align.h>
 #include <net/netdev_queues.h>
+#include <linux/pci-tph.h>
 
 #include "bnxt_hsi.h"
 #include "bnxt.h"
@@ -10683,6 +10684,8 @@  static void bnxt_free_irq(struct bnxt *bp)
 				free_cpumask_var(irq->cpu_mask);
 				irq->have_cpumask = 0;
 			}
+			if (pcie_tph_intr_vec_supported(bp->pdev))
+				irq_set_affinity_notifier(irq->vector, NULL);
 			free_irq(irq->vector, bp->bnapi[i]);
 		}
 
@@ -10690,6 +10693,45 @@  static void bnxt_free_irq(struct bnxt *bp)
 	}
 }
 
+static void bnxt_rtnl_lock_sp(struct bnxt *bp);
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp);
+static void __bnxt_irq_affinity_notify(struct irq_affinity_notify *notify,
+				       const cpumask_t *mask)
+{
+	struct bnxt_irq *irq;
+
+	irq = container_of(notify, struct bnxt_irq, affinity_notify);
+	cpumask_copy(irq->cpu_mask, mask);
+
+	if (!pcie_tph_set_st(irq->bp->pdev, irq->msix_nr,
+			     cpumask_first(irq->cpu_mask),
+			     TPH_MEM_TYPE_VM, PCI_TPH_REQ_TPH_ONLY))
+		netdev_dbg(irq->bp->dev, "error in setting steering tag\n");
+
+	if (netif_running(irq->bp->dev)) {
+		rtnl_lock();
+		bnxt_close_nic(irq->bp, false, false);
+		bnxt_open_nic(irq->bp, false, false);
+		rtnl_unlock();
+	}
+}
+
+static void __bnxt_irq_affinity_release(struct kref __always_unused *ref)
+{
+}
+
+static inline void bnxt_register_affinity_notifier(struct bnxt_irq *irq)
+{
+	struct irq_affinity_notify *notify;
+
+	notify = &irq->affinity_notify;
+	notify->irq = irq->vector;
+	notify->notify = __bnxt_irq_affinity_notify;
+	notify->release = __bnxt_irq_affinity_release;
+
+	irq_set_affinity_notifier(irq->vector, notify);
+}
+
 static int bnxt_request_irq(struct bnxt *bp)
 {
 	int i, j, rc = 0;
@@ -10735,6 +10777,7 @@  static int bnxt_request_irq(struct bnxt *bp)
 			int numa_node = dev_to_node(&bp->pdev->dev);
 
 			irq->have_cpumask = 1;
+			irq->msix_nr = map_idx;
 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
 					irq->cpu_mask);
 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
@@ -10744,6 +10787,17 @@  static int bnxt_request_irq(struct bnxt *bp)
 					    irq->vector);
 				break;
 			}
+
+			if (pcie_tph_intr_vec_supported(bp->pdev)) {
+				irq->bp = bp;
+				bnxt_register_affinity_notifier(irq);
+
+				/* first setup */
+				if (!pcie_tph_set_st(bp->pdev, i,
+						     cpumask_first(irq->cpu_mask),
+						     TPH_MEM_TYPE_VM, PCI_TPH_REQ_TPH_ONLY))
+					netdev_dbg(bp->dev, "error in setting steering tag\n");
+			}
 		}
 	}
 	return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 656ab81c0272..4a841e8ccfb7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1195,6 +1195,10 @@  struct bnxt_irq {
 	u8		have_cpumask:1;
 	char		name[IFNAMSIZ + 2];
 	cpumask_var_t	cpu_mask;
+
+	int		msix_nr;
+	struct bnxt	*bp;
+	struct irq_affinity_notify affinity_notify;
 };
 
 #define HWRM_RING_ALLOC_TX	0x1