Message ID | 20250113063927.4017173-9-michael.chan@broadcom.com (mailing list archive) |
---|---|
State | New |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | bnxt_en: Add NPAR 1.2 and TPH support | expand |
On Sun, Jan 12, 2025 at 10:39:25PM -0800, Michael Chan wrote: > From: Somnath Kotur <somnath.kotur@broadcom.com> > > In order to program the correct Steering Tag during an IRQ affinity > change, we need to free/re-allocate the Rx completion ring during > queue_restart. Call FW to free the Rx completion ring and clear the > ring entries in queue_stop(). Re-allocate it in queue_start(). > > Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com> > Reviewed-by: Michael Chan <michael.chan@broadcom.com> > --- > Cc: David Wei <dw@davidwei.uk> > --- > drivers/net/ethernet/broadcom/bnxt/bnxt.c | 26 +++++++++++++++++++++-- > 1 file changed, 24 insertions(+), 2 deletions(-) > > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c > index 30a57bbc407c..fe350d0ba99c 100644 > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c > @@ -7399,6 +7399,19 @@ static void bnxt_hwrm_cp_ring_free(struct bnxt *bp, > ring->fw_ring_id = INVALID_HW_RING_ID; > } > > +static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) > +{ > + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; > + int i, size = ring->ring_mem.page_size; > + > + cpr->cp_raw_cons = 0; > + cpr->toggle = 0; > + > + for (i = 0; i < bp->cp_nr_pages; i++) > + if (cpr->cp_desc_ring[i]) > + memset(cpr->cp_desc_ring[i], 0, size); > +} > + > static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) > { > u32 type; > @@ -15618,10 +15631,15 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) > rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); > if (rc) > return rc; > - rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); > + > + rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); > if (rc) > goto err_free_hwrm_rx_ring; > > + rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); > + if (rc) > + goto err_free_hwrm_cp_ring; > + > bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); > if (bp->flags & BNXT_FLAG_AGG_RINGS) > bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); > @@ -15645,6 +15663,8 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) > > return 0; > > +err_free_hwrm_cp_ring: > + bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr); > err_free_hwrm_rx_ring: > bnxt_hwrm_rx_ring_free(bp, rxr, false); > return rc; > @@ -15669,11 +15689,13 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) > cancel_work_sync(&rxr->bnapi->cp_ring.dim.work); > bnxt_hwrm_rx_ring_free(bp, rxr, false); > bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); > - rxr->rx_next_cons = 0; Unrelated? > page_pool_disable_direct_recycling(rxr->page_pool); > if (bnxt_separate_head_pool()) > page_pool_disable_direct_recycling(rxr->head_pool); > > + bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr); > + bnxt_clear_one_cp_ring(bp, rxr->rx_cpr); > + > memcpy(qmem, rxr, sizeof(*rxr)); > bnxt_init_rx_ring_struct(bp, qmem); > Rest looks fine: Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com> > -- > 2.30.1
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 30a57bbc407c..fe350d0ba99c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7399,6 +7399,19 @@ static void bnxt_hwrm_cp_ring_free(struct bnxt *bp, ring->fw_ring_id = INVALID_HW_RING_ID; } +static void bnxt_clear_one_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + int i, size = ring->ring_mem.page_size; + + cpr->cp_raw_cons = 0; + cpr->toggle = 0; + + for (i = 0; i < bp->cp_nr_pages; i++) + if (cpr->cp_desc_ring[i]) + memset(cpr->cp_desc_ring[i], 0, size); +} + static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) { u32 type; @@ -15618,10 +15631,15 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); if (rc) return rc; - rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); + + rc = bnxt_hwrm_cp_ring_alloc_p5(bp, rxr->rx_cpr); if (rc) goto err_free_hwrm_rx_ring; + rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); + if (rc) + goto err_free_hwrm_cp_ring; + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); if (bp->flags & BNXT_FLAG_AGG_RINGS) bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); @@ -15645,6 +15663,8 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) return 0; +err_free_hwrm_cp_ring: + bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr); err_free_hwrm_rx_ring: bnxt_hwrm_rx_ring_free(bp, rxr, false); return rc; @@ -15669,11 +15689,13 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) cancel_work_sync(&rxr->bnapi->cp_ring.dim.work); bnxt_hwrm_rx_ring_free(bp, rxr, false); bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); - rxr->rx_next_cons = 0; page_pool_disable_direct_recycling(rxr->page_pool); if (bnxt_separate_head_pool()) page_pool_disable_direct_recycling(rxr->head_pool); + bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr); + bnxt_clear_one_cp_ring(bp, rxr->rx_cpr); + memcpy(qmem, rxr, sizeof(*rxr)); bnxt_init_rx_ring_struct(bp, qmem);