diff mbox series

[RFC,net-next,v2,2/9] bnxt: implement queue api

Message ID 20240502045410.3524155-3-dw@davidwei.uk (mailing list archive)
State RFC
Delegated to: Netdev Maintainers
Headers show
Series bnxt: implement queue api | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 926 this patch: 926
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 937 this patch: 937
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 939 this patch: 939
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 74 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 2 this patch: 2
netdev/source_inline success Was 0 now: 0

Commit Message

David Wei May 2, 2024, 4:54 a.m. UTC
Implement the bare minimum queue API for bnxt. I'm essentially breaking
up the existing bnxt_rx_ring_reset() into two steps:

1. bnxt_queue_stop()
2. bnxt_queue_start()

The other two ndos are left as no-ops for now, so the queue mem is
allocated after the queue has been stopped. Doing this before stopping
the queue is a lot more work, so I'm looking for some feedback first.

Signed-off-by: David Wei <dw@davidwei.uk>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 62 +++++++++++++++++++++++
 1 file changed, 62 insertions(+)

Comments

Mina Almasry May 2, 2024, 5:23 p.m. UTC | #1
On Wed, May 1, 2024 at 9:54 PM David Wei <dw@davidwei.uk> wrote:
>
> Implement the bare minimum queue API for bnxt. I'm essentially breaking
> up the existing bnxt_rx_ring_reset() into two steps:
>
> 1. bnxt_queue_stop()
> 2. bnxt_queue_start()
>
> The other two ndos are left as no-ops for now, so the queue mem is
> allocated after the queue has been stopped. Doing this before stopping
> the queue is a lot more work, so I'm looking for some feedback first.
>
> Signed-off-by: David Wei <dw@davidwei.uk>
> ---
>  drivers/net/ethernet/broadcom/bnxt/bnxt.c | 62 +++++++++++++++++++++++
>  1 file changed, 62 insertions(+)
>
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index 06b7a963bbbd..788c87271eb1 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -14810,6 +14810,67 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
>         .get_base_stats         = bnxt_get_base_stats,
>  };
>
> +static void *bnxt_queue_mem_alloc(struct net_device *dev, int idx)
> +{
> +       struct bnxt *bp = netdev_priv(dev);
> +
> +       return &bp->rx_ring[idx];
> +}
> +
> +static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
> +{
> +}
> +
> +static int bnxt_queue_start(struct net_device *dev, int idx, void *qmem)
> +{
> +       struct bnxt_rx_ring_info *rxr = qmem;
> +       struct bnxt *bp = netdev_priv(dev);
> +
> +       bnxt_alloc_one_rx_ring(bp, idx);
> +
> +       if (bp->flags & BNXT_FLAG_AGG_RINGS)
> +               bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
> +       bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
> +
> +       if (bp->flags & BNXT_FLAG_TPA)
> +               bnxt_set_tpa(bp, true);
> +
> +       return 0;
> +}
> +
> +static int bnxt_queue_stop(struct net_device *dev, int idx, void **out_qmem)
> +{
> +       struct bnxt *bp = netdev_priv(dev);
> +       struct bnxt_rx_ring_info *rxr;
> +       struct bnxt_cp_ring_info *cpr;
> +       int rc;
> +
> +       rc = bnxt_hwrm_rx_ring_reset(bp, idx);
> +       if (rc)
> +               return rc;
> +
> +       bnxt_free_one_rx_ring_skbs(bp, idx);
> +       rxr = &bp->rx_ring[idx];
> +       rxr->rx_prod = 0;
> +       rxr->rx_agg_prod = 0;
> +       rxr->rx_sw_agg_prod = 0;
> +       rxr->rx_next_cons = 0;
> +
> +       cpr = &rxr->bnapi->cp_ring;
> +       cpr->sw_stats.rx.rx_resets++;
> +
> +       *out_qmem = rxr;

Oh, I'm not sure you can do this, no?

rxr is a stack variable, it goes away after the function returns, no?

You have to kzalloc sizeof(struct bnext_rx_ring_info), no?

Other than that, the code looks very similar to what we do for GVE,
and good to me.

> +
> +       return 0;
> +}
> +
> +static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
> +       .ndo_queue_mem_alloc    = bnxt_queue_mem_alloc,
> +       .ndo_queue_mem_free     = bnxt_queue_mem_free,
> +       .ndo_queue_start        = bnxt_queue_start,
> +       .ndo_queue_stop         = bnxt_queue_stop,
> +};
> +
>  static void bnxt_remove_one(struct pci_dev *pdev)
>  {
>         struct net_device *dev = pci_get_drvdata(pdev);
> @@ -15275,6 +15336,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
>         dev->stat_ops = &bnxt_stat_ops;
>         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
>         dev->ethtool_ops = &bnxt_ethtool_ops;
> +       dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
>         pci_set_drvdata(pdev, dev);
>
>         rc = bnxt_alloc_hwrm_resources(bp);
> --
> 2.43.0
>
David Wei May 6, 2024, 12:36 a.m. UTC | #2
On 2024-05-02 10:23, Mina Almasry wrote:
> On Wed, May 1, 2024 at 9:54 PM David Wei <dw@davidwei.uk> wrote:

...

>> +
>> +static int bnxt_queue_stop(struct net_device *dev, int idx, void **out_qmem)
>> +{
>> +       struct bnxt *bp = netdev_priv(dev);
>> +       struct bnxt_rx_ring_info *rxr;
>> +       struct bnxt_cp_ring_info *cpr;
>> +       int rc;
>> +
>> +       rc = bnxt_hwrm_rx_ring_reset(bp, idx);
>> +       if (rc)
>> +               return rc;
>> +
>> +       bnxt_free_one_rx_ring_skbs(bp, idx);
>> +       rxr = &bp->rx_ring[idx];
>> +       rxr->rx_prod = 0;
>> +       rxr->rx_agg_prod = 0;
>> +       rxr->rx_sw_agg_prod = 0;
>> +       rxr->rx_next_cons = 0;
>> +
>> +       cpr = &rxr->bnapi->cp_ring;
>> +       cpr->sw_stats.rx.rx_resets++;
>> +
>> +       *out_qmem = rxr;
> 
> Oh, I'm not sure you can do this, no?
> 
> rxr is a stack variable, it goes away after the function returns, no?

Not for bnxt, where rx_ring is a dynamically allocated array.

In later patches I will change how the ndos are implemented. Next series
I'll squash these intermediate patches that are now useless.

> 
> You have to kzalloc sizeof(struct bnext_rx_ring_info), no?
> 
> Other than that, the code looks very similar to what we do for GVE,
> and good to me.

Thanks.

> 
>> +
>> +       return 0;
>> +}
>> +
>> +static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
>> +       .ndo_queue_mem_alloc    = bnxt_queue_mem_alloc,
>> +       .ndo_queue_mem_free     = bnxt_queue_mem_free,
>> +       .ndo_queue_start        = bnxt_queue_start,
>> +       .ndo_queue_stop         = bnxt_queue_stop,
>> +};
>> +
>>  static void bnxt_remove_one(struct pci_dev *pdev)
>>  {
>>         struct net_device *dev = pci_get_drvdata(pdev);
>> @@ -15275,6 +15336,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
>>         dev->stat_ops = &bnxt_stat_ops;
>>         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
>>         dev->ethtool_ops = &bnxt_ethtool_ops;
>> +       dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
>>         pci_set_drvdata(pdev, dev);
>>
>>         rc = bnxt_alloc_hwrm_resources(bp);
>> --
>> 2.43.0
>>
> 
>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 06b7a963bbbd..788c87271eb1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -14810,6 +14810,67 @@  static const struct netdev_stat_ops bnxt_stat_ops = {
 	.get_base_stats		= bnxt_get_base_stats,
 };
 
+static void *bnxt_queue_mem_alloc(struct net_device *dev, int idx)
+{
+	struct bnxt *bp = netdev_priv(dev);
+
+	return &bp->rx_ring[idx];
+}
+
+static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
+{
+}
+
+static int bnxt_queue_start(struct net_device *dev, int idx, void *qmem)
+{
+	struct bnxt_rx_ring_info *rxr = qmem;
+	struct bnxt *bp = netdev_priv(dev);
+
+	bnxt_alloc_one_rx_ring(bp, idx);
+
+	if (bp->flags & BNXT_FLAG_AGG_RINGS)
+		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+
+	if (bp->flags & BNXT_FLAG_TPA)
+		bnxt_set_tpa(bp, true);
+
+	return 0;
+}
+
+static int bnxt_queue_stop(struct net_device *dev, int idx, void **out_qmem)
+{
+	struct bnxt *bp = netdev_priv(dev);
+	struct bnxt_rx_ring_info *rxr;
+	struct bnxt_cp_ring_info *cpr;
+	int rc;
+
+	rc = bnxt_hwrm_rx_ring_reset(bp, idx);
+	if (rc)
+		return rc;
+
+	bnxt_free_one_rx_ring_skbs(bp, idx);
+	rxr = &bp->rx_ring[idx];
+	rxr->rx_prod = 0;
+	rxr->rx_agg_prod = 0;
+	rxr->rx_sw_agg_prod = 0;
+	rxr->rx_next_cons = 0;
+
+	cpr = &rxr->bnapi->cp_ring;
+	cpr->sw_stats.rx.rx_resets++;
+
+	*out_qmem = rxr;
+
+	return 0;
+}
+
+static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
+	.ndo_queue_mem_alloc	= bnxt_queue_mem_alloc,
+	.ndo_queue_mem_free	= bnxt_queue_mem_free,
+	.ndo_queue_start	= bnxt_queue_start,
+	.ndo_queue_stop		= bnxt_queue_stop,
+};
+
 static void bnxt_remove_one(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
@@ -15275,6 +15336,7 @@  static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	dev->stat_ops = &bnxt_stat_ops;
 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
 	dev->ethtool_ops = &bnxt_ethtool_ops;
+	dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
 	pci_set_drvdata(pdev, dev);
 
 	rc = bnxt_alloc_hwrm_resources(bp);