diff mbox series

[net-next,09/10] bnxt_en: Extend queue stop/start for Tx rings

Message ID 20250113063927.4017173-10-michael.chan@broadcom.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series bnxt_en: Add NPAR 1.2 and TPH support | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 1 this patch: 10
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang fail Errors and warnings before: 2 this patch: 12
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 1 this patch: 10
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 2 this patch: 2
netdev/source_inline success Was 0 now: 0

Commit Message

Michael Chan Jan. 13, 2025, 6:39 a.m. UTC
From: Somnath Kotur <somnath.kotur@broadcom.com>

In order to use queue_stop/queue_start to support the new Steering
Tags, we need to free the TX ring and TX completion ring if it is a
combined channel with TX/RX sharing the same NAPI.  Otherwise
TX completions will not have the updated Steering Tag.  With that
we can now add napi_disable() and napi_enable() during queue_stop()/
queue_start().  This will guarantee that NAPI will stop processing
the completion entries in case there are additional pending entries
in the completion rings after queue_stop().

There could be some NQEs sitting unprocessed while NAPI is disabled
thereby leaving the NQ unarmed.  Explictily Re-arm the NQ after
napi_enable() in queue start so that NAPI will resume properly.

Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: Michael Chan <michael.chan@broadcom.com>
---
Cc: David Wei <dw@davidwei.uk>

Discussion about adding napi_disable()/napi_enable():

https://lore.kernel.org/netdev/5336d624-8d8b-40a6-b732-b020e4a119a2@davidwei.uk/#t
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 99 ++++++++++++++++++++++-
 1 file changed, 98 insertions(+), 1 deletion(-)

Comments

Michal Swiatkowski Jan. 13, 2025, 8:40 a.m. UTC | #1
On Sun, Jan 12, 2025 at 10:39:26PM -0800, Michael Chan wrote:
> From: Somnath Kotur <somnath.kotur@broadcom.com>
> 
> In order to use queue_stop/queue_start to support the new Steering
> Tags, we need to free the TX ring and TX completion ring if it is a
> combined channel with TX/RX sharing the same NAPI.  Otherwise
> TX completions will not have the updated Steering Tag.  With that
> we can now add napi_disable() and napi_enable() during queue_stop()/
> queue_start().  This will guarantee that NAPI will stop processing
> the completion entries in case there are additional pending entries
> in the completion rings after queue_stop().
> 
> There could be some NQEs sitting unprocessed while NAPI is disabled
> thereby leaving the NQ unarmed.  Explictily Re-arm the NQ after
> napi_enable() in queue start so that NAPI will resume properly.
> 
> Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
> Reviewed-by: Michael Chan <michael.chan@broadcom.com>
> ---
> Cc: David Wei <dw@davidwei.uk>
> 
> Discussion about adding napi_disable()/napi_enable():
> 
> https://lore.kernel.org/netdev/5336d624-8d8b-40a6-b732-b020e4a119a2@davidwei.uk/#t
> ---
>  drivers/net/ethernet/broadcom/bnxt/bnxt.c | 99 ++++++++++++++++++++++-
>  1 file changed, 98 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index fe350d0ba99c..eddb4de959c6 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -7341,6 +7341,22 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
>  	return 0;
>  }
>  
> +static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
> +				   bool close_path)
> +{
> +	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
> +	u32 cmpl_ring_id;
> +
> +	if (ring->fw_ring_id == INVALID_HW_RING_ID)
> +		return;
> +
> +	cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
> +		       INVALID_HW_RING_ID;
> +	hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
> +				cmpl_ring_id);
> +	ring->fw_ring_id = INVALID_HW_RING_ID;
> +}
> +
>  static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
>  				   struct bnxt_rx_ring_info *rxr,
>  				   bool close_path)
> @@ -11247,6 +11263,69 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
>  	return 0;
>  }
>  
> +static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
> +{
> +	struct bnxt_tx_ring_info *txr;
> +	struct netdev_queue *txq;
> +	struct bnxt_napi *bnapi;
> +	int i;
> +
> +	bnapi = bp->bnapi[idx];
> +	bnxt_for_each_napi_tx(i, bnapi, txr) {
> +		WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
> +		synchronize_net();
> +
> +		if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
> +			txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
> +			if (txq) {
> +				__netif_tx_lock_bh(txq);
> +				netif_tx_stop_queue(txq);
> +				__netif_tx_unlock_bh(txq);
> +			}
> +		}
> +		bnxt_hwrm_tx_ring_free(bp, txr, true);
> +		bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
> +		bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
> +		bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
> +	}
> +}
> +
> +static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
> +{
> +	struct bnxt_tx_ring_info *txr;
> +	struct netdev_queue *txq;
> +	struct bnxt_napi *bnapi;
> +	int rc, i;
> +
> +	bnapi = bp->bnapi[idx];
> +	bnxt_for_each_napi_tx(i, bnapi, txr) {
> +		rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
> +		if (rc)
> +			return rc;
> +
> +		rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
> +		if (rc) {
> +			bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
What about ring allocated in previous steps? Don't you need to free them
too?

> +			return rc;
> +		}
> +		txr->tx_prod = 0;
> +		txr->tx_cons = 0;
> +		txr->tx_hw_cons = 0;
> +
> +		WRITE_ONCE(txr->dev_state, 0);
> +		synchronize_net();
> +
> +		if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
> +			continue;
> +
> +		txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
> +		if (txq)
> +			netif_tx_start_queue(txq);
> +	}
> +
> +	return 0;
> +}
> +
>  static void bnxt_free_irq(struct bnxt *bp)
>  {
>  	struct bnxt_irq *irq;
> @@ -15647,6 +15726,16 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
>  	cpr = &rxr->bnapi->cp_ring;
>  	cpr->sw_stats->rx.rx_resets++;
>  
> +	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
> +		rc = bnxt_tx_queue_start(bp, idx);
> +		if (rc)
> +			netdev_warn(bp->dev,
> +				    "tx queue restart failed: rc=%d\n", rc);
> +	}
> +
> +	napi_enable(&rxr->bnapi->napi);
> +	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
> +
>  	for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
>  		vnic = &bp->vnic_info[i];
>  
> @@ -15675,6 +15764,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
>  	struct bnxt *bp = netdev_priv(dev);
>  	struct bnxt_rx_ring_info *rxr;
>  	struct bnxt_vnic_info *vnic;
> +	struct bnxt_napi *bnapi;
>  	int i;
>  
>  	for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
> @@ -15686,15 +15776,22 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
>  	/* Make sure NAPI sees that the VNIC is disabled */
>  	synchronize_net();
>  	rxr = &bp->rx_ring[idx];
> -	cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
> +	bnapi = rxr->bnapi;
> +	cancel_work_sync(&bnapi->cp_ring.dim.work);
>  	bnxt_hwrm_rx_ring_free(bp, rxr, false);
>  	bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
>  	page_pool_disable_direct_recycling(rxr->page_pool);
>  	if (bnxt_separate_head_pool())
>  		page_pool_disable_direct_recycling(rxr->head_pool);
>  
> +	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
> +		bnxt_tx_queue_stop(bp, idx);
> +
> +	napi_disable(&bnapi->napi);
> +
>  	bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
>  	bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
> +	bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
>  
>  	memcpy(qmem, rxr, sizeof(*rxr));
>  	bnxt_init_rx_ring_struct(bp, qmem);
> -- 
> 2.30.1
Bjorn Helgaas Jan. 13, 2025, 4:01 p.m. UTC | #2
On Sun, Jan 12, 2025 at 10:39:26PM -0800, Michael Chan wrote:
> From: Somnath Kotur <somnath.kotur@broadcom.com>
> 
> In order to use queue_stop/queue_start to support the new Steering
> Tags, we need to free the TX ring and TX completion ring if it is a
> combined channel with TX/RX sharing the same NAPI.  Otherwise
> TX completions will not have the updated Steering Tag.  With that
> we can now add napi_disable() and napi_enable() during queue_stop()/
> queue_start().  This will guarantee that NAPI will stop processing
> the completion entries in case there are additional pending entries
> in the completion rings after queue_stop().
> 
> There could be some NQEs sitting unprocessed while NAPI is disabled
> thereby leaving the NQ unarmed.  Explictily Re-arm the NQ after
> napi_enable() in queue start so that NAPI will resume properly.

s/Explictily Re-arm/Explicitly re-arm/ (typo + capitalization)

There's a mix of "TX/RX" vs "Tx/Rx" styles in the subjects and commit
logs of this series.
Somnath Kotur Jan. 14, 2025, 2:05 a.m. UTC | #3
On Mon, 13 Jan, 2025, 14:13 Michal Swiatkowski, <
michal.swiatkowski@linux.intel.com> wrote:

> On Sun, Jan 12, 2025 at 10:39:26PM -0800, Michael Chan wrote:
> > From: Somnath Kotur <somnath.kotur@broadcom.com>
> >
> > In order to use queue_stop/queue_start to support the new Steering
> > Tags, we need to free the TX ring and TX completion ring if it is a
> > combined channel with TX/RX sharing the same NAPI.  Otherwise
> > TX completions will not have the updated Steering Tag.  With that
> > we can now add napi_disable() and napi_enable() during queue_stop()/
> > queue_start().  This will guarantee that NAPI will stop processing
> > the completion entries in case there are additional pending entries
> > in the completion rings after queue_stop().
> >
> > There could be some NQEs sitting unprocessed while NAPI is disabled
> > thereby leaving the NQ unarmed.  Explictily Re-arm the NQ after
> > napi_enable() in queue start so that NAPI will resume properly.
> >
> > Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> > Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
> > Reviewed-by: Michael Chan <michael.chan@broadcom.com>
> > ---
> > Cc: David Wei <dw@davidwei.uk>
> >
> > Discussion about adding napi_disable()/napi_enable():
> >
> >
> https://lore.kernel.org/netdev/5336d624-8d8b-40a6-b732-b020e4a119a2@davidwei.uk/#t
> > ---
> >  drivers/net/ethernet/broadcom/bnxt/bnxt.c | 99 ++++++++++++++++++++++-
> >  1 file changed, 98 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > index fe350d0ba99c..eddb4de959c6 100644
> > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > @@ -7341,6 +7341,22 @@ static int hwrm_ring_free_send_msg(struct bnxt
> *bp,
> >       return 0;
> >  }
> >
> > +static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, struct
> bnxt_tx_ring_info *txr,
> > +                                bool close_path)
> > +{
> > +     struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
> > +     u32 cmpl_ring_id;
> > +
> > +     if (ring->fw_ring_id == INVALID_HW_RING_ID)
> > +             return;
> > +
> > +     cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
> > +                    INVALID_HW_RING_ID;
> > +     hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
> > +                             cmpl_ring_id);
> > +     ring->fw_ring_id = INVALID_HW_RING_ID;
> > +}
> > +
> >  static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
> >                                  struct bnxt_rx_ring_info *rxr,
> >                                  bool close_path)
> > @@ -11247,6 +11263,69 @@ int bnxt_reserve_rings(struct bnxt *bp, bool
> irq_re_init)
> >       return 0;
> >  }
> >
> > +static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
> > +{
> > +     struct bnxt_tx_ring_info *txr;
> > +     struct netdev_queue *txq;
> > +     struct bnxt_napi *bnapi;
> > +     int i;
> > +
> > +     bnapi = bp->bnapi[idx];
> > +     bnxt_for_each_napi_tx(i, bnapi, txr) {
> > +             WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
> > +             synchronize_net();
> > +
> > +             if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
> > +                     txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
> > +                     if (txq) {
> > +                             __netif_tx_lock_bh(txq);
> > +                             netif_tx_stop_queue(txq);
> > +                             __netif_tx_unlock_bh(txq);
> > +                     }
> > +             }
> > +             bnxt_hwrm_tx_ring_free(bp, txr, true);
> > +             bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
> > +             bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
> > +             bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
> > +     }
> > +}
> > +
> > +static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
> > +{
> > +     struct bnxt_tx_ring_info *txr;
> > +     struct netdev_queue *txq;
> > +     struct bnxt_napi *bnapi;
> > +     int rc, i;
> > +
> > +     bnapi = bp->bnapi[idx];
> > +     bnxt_for_each_napi_tx(i, bnapi, txr) {
> > +             rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
> > +             if (rc)
> > +                     return rc;
> > +
> > +             rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
> > +             if (rc) {
> > +                     bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
> What about ring allocated in previous steps? Don't you need to free them
> too?
>
Sure thanks, will take care

>
> > +                     return rc;
> > +             }
> > +             txr->tx_prod = 0;
> > +             txr->tx_cons = 0;
> > +             txr->tx_hw_cons = 0;
> > +
> > +             WRITE_ONCE(txr->dev_state, 0);
> > +             synchronize_net();
> > +
> > +             if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
> > +                     continue;
> > +
> > +             txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
> > +             if (txq)
> > +                     netif_tx_start_queue(txq);
> > +     }
> > +
> > +     return 0;
> > +}
> > +
> >  static void bnxt_free_irq(struct bnxt *bp)
> >  {
> >       struct bnxt_irq *irq;
> > @@ -15647,6 +15726,16 @@ static int bnxt_queue_start(struct net_device
> *dev, void *qmem, int idx)
> >       cpr = &rxr->bnapi->cp_ring;
> >       cpr->sw_stats->rx.rx_resets++;
> >
> > +     if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
> > +             rc = bnxt_tx_queue_start(bp, idx);
> > +             if (rc)
> > +                     netdev_warn(bp->dev,
> > +                                 "tx queue restart failed: rc=%d\n",
> rc);
> > +     }
> > +
> > +     napi_enable(&rxr->bnapi->napi);
> > +     bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
> > +
> >       for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
> >               vnic = &bp->vnic_info[i];
> >
> > @@ -15675,6 +15764,7 @@ static int bnxt_queue_stop(struct net_device
> *dev, void *qmem, int idx)
> >       struct bnxt *bp = netdev_priv(dev);
> >       struct bnxt_rx_ring_info *rxr;
> >       struct bnxt_vnic_info *vnic;
> > +     struct bnxt_napi *bnapi;
> >       int i;
> >
> >       for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
> > @@ -15686,15 +15776,22 @@ static int bnxt_queue_stop(struct net_device
> *dev, void *qmem, int idx)
> >       /* Make sure NAPI sees that the VNIC is disabled */
> >       synchronize_net();
> >       rxr = &bp->rx_ring[idx];
> > -     cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
> > +     bnapi = rxr->bnapi;
> > +     cancel_work_sync(&bnapi->cp_ring.dim.work);
> >       bnxt_hwrm_rx_ring_free(bp, rxr, false);
> >       bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
> >       page_pool_disable_direct_recycling(rxr->page_pool);
> >       if (bnxt_separate_head_pool())
> >               page_pool_disable_direct_recycling(rxr->head_pool);
> >
> > +     if (bp->flags & BNXT_FLAG_SHARED_RINGS)
> > +             bnxt_tx_queue_stop(bp, idx);
> > +
> > +     napi_disable(&bnapi->napi);
> > +
> >       bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
> >       bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
> > +     bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
> >
> >       memcpy(qmem, rxr, sizeof(*rxr));
> >       bnxt_init_rx_ring_struct(bp, qmem);
> > --
> > 2.30.1
>
Somnath Kotur Jan. 14, 2025, 4:57 a.m. UTC | #4
On Mon, Jan 13, 2025 at 9:31 PM Bjorn Helgaas <helgaas@kernel.org> wrote:
>
> On Sun, Jan 12, 2025 at 10:39:26PM -0800, Michael Chan wrote:
> > From: Somnath Kotur <somnath.kotur@broadcom.com>
> >
> > In order to use queue_stop/queue_start to support the new Steering
> > Tags, we need to free the TX ring and TX completion ring if it is a
> > combined channel with TX/RX sharing the same NAPI.  Otherwise
> > TX completions will not have the updated Steering Tag.  With that
> > we can now add napi_disable() and napi_enable() during queue_stop()/
> > queue_start().  This will guarantee that NAPI will stop processing
> > the completion entries in case there are additional pending entries
> > in the completion rings after queue_stop().
> >
> > There could be some NQEs sitting unprocessed while NAPI is disabled
> > thereby leaving the NQ unarmed.  Explictily Re-arm the NQ after
> > napi_enable() in queue start so that NAPI will resume properly.
>
> s/Explictily Re-arm/Explicitly re-arm/ (typo + capitalization)
>
> There's a mix of "TX/RX" vs "Tx/Rx" styles in the subjects and commit
> logs of this series.
Sure, thank you will take care of this
kernel test robot Jan. 14, 2025, 8:48 a.m. UTC | #5
Hi Michael,

kernel test robot noticed the following build errors:

[auto build test ERROR on net-next/main]

url:    https://github.com/intel-lab-lkp/linux/commits/Michael-Chan/bnxt_en-Set-NAPR-1-2-support-when-registering-with-firmware/20250113-144205
base:   net-next/main
patch link:    https://lore.kernel.org/r/20250113063927.4017173-10-michael.chan%40broadcom.com
patch subject: [PATCH net-next 09/10] bnxt_en: Extend queue stop/start for Tx rings
config: csky-randconfig-001-20250114 (https://download.01.org/0day-ci/archive/20250114/202501141605.iErAEuFQ-lkp@intel.com/config)
compiler: csky-linux-gcc (GCC) 14.2.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250114/202501141605.iErAEuFQ-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501141605.iErAEuFQ-lkp@intel.com/

All errors (new ones prefixed by >>):

   drivers/net/ethernet/broadcom/bnxt/bnxt.c: In function 'bnxt_request_irq':
   drivers/net/ethernet/broadcom/bnxt/bnxt.c:11360:16: warning: variable 'j' set but not used [-Wunused-but-set-variable]
   11360 |         int i, j, rc = 0;
         |                ^
   drivers/net/ethernet/broadcom/bnxt/bnxt.c: In function 'bnxt_queue_stop':
>> drivers/net/ethernet/broadcom/bnxt/bnxt.c:15794:25: error: 'cpr' undeclared (first use in this function); did you mean 'cpu'?
   15794 |         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
         |                         ^~~
         |                         cpu
   drivers/net/ethernet/broadcom/bnxt/bnxt.c:15794:25: note: each undeclared identifier is reported only once for each function it appears in


vim +15794 drivers/net/ethernet/broadcom/bnxt/bnxt.c

 15761	
 15762	static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
 15763	{
 15764		struct bnxt *bp = netdev_priv(dev);
 15765		struct bnxt_rx_ring_info *rxr;
 15766		struct bnxt_vnic_info *vnic;
 15767		struct bnxt_napi *bnapi;
 15768		int i;
 15769	
 15770		for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
 15771			vnic = &bp->vnic_info[i];
 15772			vnic->mru = 0;
 15773			bnxt_hwrm_vnic_update(bp, vnic,
 15774					      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
 15775		}
 15776		/* Make sure NAPI sees that the VNIC is disabled */
 15777		synchronize_net();
 15778		rxr = &bp->rx_ring[idx];
 15779		bnapi = rxr->bnapi;
 15780		cancel_work_sync(&bnapi->cp_ring.dim.work);
 15781		bnxt_hwrm_rx_ring_free(bp, rxr, false);
 15782		bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
 15783		page_pool_disable_direct_recycling(rxr->page_pool);
 15784		if (bnxt_separate_head_pool())
 15785			page_pool_disable_direct_recycling(rxr->head_pool);
 15786	
 15787		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
 15788			bnxt_tx_queue_stop(bp, idx);
 15789	
 15790		napi_disable(&bnapi->napi);
 15791	
 15792		bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
 15793		bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
 15794		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
 15795	
 15796		memcpy(qmem, rxr, sizeof(*rxr));
 15797		bnxt_init_rx_ring_struct(bp, qmem);
 15798	
 15799		return 0;
 15800	}
 15801
kernel test robot Jan. 14, 2025, 11:23 a.m. UTC | #6
Hi Michael,

kernel test robot noticed the following build errors:

[auto build test ERROR on net-next/main]

url:    https://github.com/intel-lab-lkp/linux/commits/Michael-Chan/bnxt_en-Set-NAPR-1-2-support-when-registering-with-firmware/20250113-144205
base:   net-next/main
patch link:    https://lore.kernel.org/r/20250113063927.4017173-10-michael.chan%40broadcom.com
patch subject: [PATCH net-next 09/10] bnxt_en: Extend queue stop/start for Tx rings
config: s390-allmodconfig (https://download.01.org/0day-ci/archive/20250114/202501141828.3alxlzbA-lkp@intel.com/config)
compiler: clang version 19.1.3 (https://github.com/llvm/llvm-project ab51eccf88f5321e7c60591c5546b254b6afab99)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250114/202501141828.3alxlzbA-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202501141828.3alxlzbA-lkp@intel.com/

All errors (new ones prefixed by >>):

   In file included from drivers/net/ethernet/broadcom/bnxt/bnxt.c:11:
   In file included from include/linux/module.h:19:
   In file included from include/linux/elf.h:6:
   In file included from arch/s390/include/asm/elf.h:181:
   In file included from arch/s390/include/asm/mmu_context.h:11:
   In file included from arch/s390/include/asm/pgalloc.h:18:
   In file included from include/linux/mm.h:2224:
   include/linux/vmstat.h:504:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     504 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     505 |                            item];
         |                            ~~~~
   include/linux/vmstat.h:511:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     511 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     512 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
   include/linux/vmstat.h:524:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     524 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     525 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/broadcom/bnxt/bnxt.c:15794:18: error: use of undeclared identifier 'cpr'
    15794 |         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
          |                         ^
   drivers/net/ethernet/broadcom/bnxt/bnxt.c:15794:30: error: use of undeclared identifier 'cpr'
    15794 |         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
          |                                     ^
   3 warnings and 2 errors generated.


vim +/cpr +15794 drivers/net/ethernet/broadcom/bnxt/bnxt.c

 15761	
 15762	static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
 15763	{
 15764		struct bnxt *bp = netdev_priv(dev);
 15765		struct bnxt_rx_ring_info *rxr;
 15766		struct bnxt_vnic_info *vnic;
 15767		struct bnxt_napi *bnapi;
 15768		int i;
 15769	
 15770		for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
 15771			vnic = &bp->vnic_info[i];
 15772			vnic->mru = 0;
 15773			bnxt_hwrm_vnic_update(bp, vnic,
 15774					      VNIC_UPDATE_REQ_ENABLES_MRU_VALID);
 15775		}
 15776		/* Make sure NAPI sees that the VNIC is disabled */
 15777		synchronize_net();
 15778		rxr = &bp->rx_ring[idx];
 15779		bnapi = rxr->bnapi;
 15780		cancel_work_sync(&bnapi->cp_ring.dim.work);
 15781		bnxt_hwrm_rx_ring_free(bp, rxr, false);
 15782		bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
 15783		page_pool_disable_direct_recycling(rxr->page_pool);
 15784		if (bnxt_separate_head_pool())
 15785			page_pool_disable_direct_recycling(rxr->head_pool);
 15786	
 15787		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
 15788			bnxt_tx_queue_stop(bp, idx);
 15789	
 15790		napi_disable(&bnapi->napi);
 15791	
 15792		bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
 15793		bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
 15794		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
 15795	
 15796		memcpy(qmem, rxr, sizeof(*rxr));
 15797		bnxt_init_rx_ring_struct(bp, qmem);
 15798	
 15799		return 0;
 15800	}
 15801
diff mbox series

Patch

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index fe350d0ba99c..eddb4de959c6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7341,6 +7341,22 @@  static int hwrm_ring_free_send_msg(struct bnxt *bp,
 	return 0;
 }
 
+static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+				   bool close_path)
+{
+	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+	u32 cmpl_ring_id;
+
+	if (ring->fw_ring_id == INVALID_HW_RING_ID)
+		return;
+
+	cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
+		       INVALID_HW_RING_ID;
+	hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
+				cmpl_ring_id);
+	ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
 static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
 				   struct bnxt_rx_ring_info *rxr,
 				   bool close_path)
@@ -11247,6 +11263,69 @@  int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
 	return 0;
 }
 
+static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
+{
+	struct bnxt_tx_ring_info *txr;
+	struct netdev_queue *txq;
+	struct bnxt_napi *bnapi;
+	int i;
+
+	bnapi = bp->bnapi[idx];
+	bnxt_for_each_napi_tx(i, bnapi, txr) {
+		WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
+		synchronize_net();
+
+		if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
+			txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+			if (txq) {
+				__netif_tx_lock_bh(txq);
+				netif_tx_stop_queue(txq);
+				__netif_tx_unlock_bh(txq);
+			}
+		}
+		bnxt_hwrm_tx_ring_free(bp, txr, true);
+		bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+		bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
+		bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
+	}
+}
+
+static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
+{
+	struct bnxt_tx_ring_info *txr;
+	struct netdev_queue *txq;
+	struct bnxt_napi *bnapi;
+	int rc, i;
+
+	bnapi = bp->bnapi[idx];
+	bnxt_for_each_napi_tx(i, bnapi, txr) {
+		rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
+		if (rc)
+			return rc;
+
+		rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
+		if (rc) {
+			bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
+			return rc;
+		}
+		txr->tx_prod = 0;
+		txr->tx_cons = 0;
+		txr->tx_hw_cons = 0;
+
+		WRITE_ONCE(txr->dev_state, 0);
+		synchronize_net();
+
+		if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
+			continue;
+
+		txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
+		if (txq)
+			netif_tx_start_queue(txq);
+	}
+
+	return 0;
+}
+
 static void bnxt_free_irq(struct bnxt *bp)
 {
 	struct bnxt_irq *irq;
@@ -15647,6 +15726,16 @@  static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
 	cpr = &rxr->bnapi->cp_ring;
 	cpr->sw_stats->rx.rx_resets++;
 
+	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
+		rc = bnxt_tx_queue_start(bp, idx);
+		if (rc)
+			netdev_warn(bp->dev,
+				    "tx queue restart failed: rc=%d\n", rc);
+	}
+
+	napi_enable(&rxr->bnapi->napi);
+	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
+
 	for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
 		vnic = &bp->vnic_info[i];
 
@@ -15675,6 +15764,7 @@  static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
 	struct bnxt *bp = netdev_priv(dev);
 	struct bnxt_rx_ring_info *rxr;
 	struct bnxt_vnic_info *vnic;
+	struct bnxt_napi *bnapi;
 	int i;
 
 	for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
@@ -15686,15 +15776,22 @@  static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
 	/* Make sure NAPI sees that the VNIC is disabled */
 	synchronize_net();
 	rxr = &bp->rx_ring[idx];
-	cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
+	bnapi = rxr->bnapi;
+	cancel_work_sync(&bnapi->cp_ring.dim.work);
 	bnxt_hwrm_rx_ring_free(bp, rxr, false);
 	bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
 	page_pool_disable_direct_recycling(rxr->page_pool);
 	if (bnxt_separate_head_pool())
 		page_pool_disable_direct_recycling(rxr->head_pool);
 
+	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+		bnxt_tx_queue_stop(bp, idx);
+
+	napi_disable(&bnapi->napi);
+
 	bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
 	bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
+	bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
 
 	memcpy(qmem, rxr, sizeof(*rxr));
 	bnxt_init_rx_ring_struct(bp, qmem);