Message ID | 20241015125841.1075560-11-wei.fang@nxp.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | add basic support for i.MX95 NETC | expand |
> -----Original Message----- > From: Wei Fang <wei.fang@nxp.com> > Sent: Tuesday, October 15, 2024 3:59 PM [...] > Subject: [PATCH v2 net-next 10/13] net: enetc: extract > enetc_int_vector_init/destroy() from enetc_alloc_msix() > > From: Clark Wang <xiaoning.wang@nxp.com> > > Extract enetc_int_vector_init() and enetc_int_vector_destroy() from > enetc_alloc_msix() so that the code is more concise and readable. > > Signed-off-by: Clark Wang <xiaoning.wang@nxp.com> > Signed-off-by: Wei Fang <wei.fang@nxp.com> > --- > v2 changes: > This patch is separated from v1 patch 9 ("net: enetc: optimize the > allocation of tx_bdr"). Separate enetc_int_vector_init() from the > original patch. In addition, add new help function > enetc_int_vector_destroy(). > --- > drivers/net/ethernet/freescale/enetc/enetc.c | 174 +++++++++---------- > 1 file changed, 87 insertions(+), 87 deletions(-) > > diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c > b/drivers/net/ethernet/freescale/enetc/enetc.c > index 032d8eadd003..d36af3f8ba31 100644 > --- a/drivers/net/ethernet/freescale/enetc/enetc.c > +++ b/drivers/net/ethernet/freescale/enetc/enetc.c > @@ -2965,6 +2965,87 @@ int enetc_ioctl(struct net_device *ndev, struct > ifreq *rq, int cmd) > } > EXPORT_SYMBOL_GPL(enetc_ioctl); > > +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, > + int v_tx_rings) > +{ > + struct enetc_int_vector *v __free(kfree); > + struct enetc_bdr *bdr; > + int j, err; > + > + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > + if (!v) > + return -ENOMEM; > + > + bdr = &v->rx_ring; > + bdr->index = i; > + bdr->ndev = priv->ndev; > + bdr->dev = priv->dev; > + bdr->bd_count = priv->rx_bd_count; > + bdr->buffer_offset = ENETC_RXB_PAD; > + priv->rx_ring[i] = bdr; > + > + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); > + if (err) > + return err; > + > + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, > + MEM_TYPE_PAGE_SHARED, NULL); > + if (err) { > + xdp_rxq_info_unreg(&bdr->xdp.rxq); > + return err; > + } > + > + /* init defaults for adaptive IC */ > + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { > + v->rx_ictt = 0x1; > + v->rx_dim_en = true; > + } > + > + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); > + netif_napi_add(priv->ndev, &v->napi, enetc_poll); > + v->count_tx_rings = v_tx_rings; > + > + for (j = 0; j < v_tx_rings; j++) { > + int idx; > + > + /* default tx ring mapping policy */ > + idx = priv->bdr_int_num * j + i; > + __set_bit(idx, &v->tx_rings_map); > + bdr = &v->tx_ring[j]; > + bdr->index = idx; > + bdr->ndev = priv->ndev; > + bdr->dev = priv->dev; > + bdr->bd_count = priv->tx_bd_count; > + priv->tx_ring[idx] = bdr; > + } > + > + priv->int_vector[i] = no_free_ptr(v); This is new, and looks like it's a fix on its own. It's fixing a dangling reference in int_vectror[i], if I'm not wrong. Other than that, like for the original patch: Reviewed-by: Claudiu Manoil <claudiu.manoil@nxp.com>
On Tue, Oct 15, 2024 at 08:58:38PM +0800, Wei Fang wrote: > From: Clark Wang <xiaoning.wang@nxp.com> > > Extract enetc_int_vector_init() and enetc_int_vector_destroy() from > enetc_alloc_msix() so that the code is more concise and readable. > > Signed-off-by: Clark Wang <xiaoning.wang@nxp.com> > Signed-off-by: Wei Fang <wei.fang@nxp.com> > --- > v2 changes: > This patch is separated from v1 patch 9 ("net: enetc: optimize the > allocation of tx_bdr"). Separate enetc_int_vector_init() from the > original patch. In addition, add new help function > enetc_int_vector_destroy(). > --- > drivers/net/ethernet/freescale/enetc/enetc.c | 174 +++++++++---------- > 1 file changed, 87 insertions(+), 87 deletions(-) > > diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c > index 032d8eadd003..d36af3f8ba31 100644 > --- a/drivers/net/ethernet/freescale/enetc/enetc.c > +++ b/drivers/net/ethernet/freescale/enetc/enetc.c > @@ -2965,6 +2965,87 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) > } > EXPORT_SYMBOL_GPL(enetc_ioctl); > > +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, > + int v_tx_rings) > +{ > + struct enetc_int_vector *v __free(kfree); Old code have not use cleanup. Please keep exact same as old codes. Or you should mention at commit log at least. Frank > + struct enetc_bdr *bdr; > + int j, err; > + > + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > + if (!v) > + return -ENOMEM; > + > + bdr = &v->rx_ring; > + bdr->index = i; > + bdr->ndev = priv->ndev; > + bdr->dev = priv->dev; > + bdr->bd_count = priv->rx_bd_count; > + bdr->buffer_offset = ENETC_RXB_PAD; > + priv->rx_ring[i] = bdr; > + > + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); > + if (err) > + return err; > + > + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, > + MEM_TYPE_PAGE_SHARED, NULL); > + if (err) { > + xdp_rxq_info_unreg(&bdr->xdp.rxq); > + return err; > + } > + > + /* init defaults for adaptive IC */ > + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { > + v->rx_ictt = 0x1; > + v->rx_dim_en = true; > + } > + > + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); > + netif_napi_add(priv->ndev, &v->napi, enetc_poll); > + v->count_tx_rings = v_tx_rings; > + > + for (j = 0; j < v_tx_rings; j++) { > + int idx; > + > + /* default tx ring mapping policy */ > + idx = priv->bdr_int_num * j + i; > + __set_bit(idx, &v->tx_rings_map); > + bdr = &v->tx_ring[j]; > + bdr->index = idx; > + bdr->ndev = priv->ndev; > + bdr->dev = priv->dev; > + bdr->bd_count = priv->tx_bd_count; > + priv->tx_ring[idx] = bdr; > + } > + > + priv->int_vector[i] = no_free_ptr(v); > + > + return 0; > +} > + > +static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i) > +{ > + struct enetc_int_vector *v = priv->int_vector[i]; > + struct enetc_bdr *rx_ring = &v->rx_ring; > + int j, tx_ring_index; > + > + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); > + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); > + netif_napi_del(&v->napi); > + cancel_work_sync(&v->rx_dim.work); > + > + priv->rx_ring[i] = NULL; > + > + for (j = 0; j < v->count_tx_rings; j++) { > + tx_ring_index = priv->bdr_int_num * j + i; > + priv->tx_ring[tx_ring_index] = NULL; > + } > + > + kfree(v); > + priv->int_vector[i] = NULL; > +} > + > int enetc_alloc_msix(struct enetc_ndev_priv *priv) > { > struct pci_dev *pdev = priv->si->pdev; > @@ -2986,64 +3067,9 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) > /* # of tx rings per int vector */ > v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; > > - for (i = 0; i < priv->bdr_int_num; i++) { > - struct enetc_int_vector *v; > - struct enetc_bdr *bdr; > - int j; > - > - v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > - if (!v) { > - err = -ENOMEM; > + for (i = 0; i < priv->bdr_int_num; i++) > + if (enetc_int_vector_init(priv, i, v_tx_rings)) > goto fail; > - } > - > - priv->int_vector[i] = v; > - > - bdr = &v->rx_ring; > - bdr->index = i; > - bdr->ndev = priv->ndev; > - bdr->dev = priv->dev; > - bdr->bd_count = priv->rx_bd_count; > - bdr->buffer_offset = ENETC_RXB_PAD; > - priv->rx_ring[i] = bdr; > - > - err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); > - if (err) { > - kfree(v); > - goto fail; > - } > - > - err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, > - MEM_TYPE_PAGE_SHARED, NULL); > - if (err) { > - xdp_rxq_info_unreg(&bdr->xdp.rxq); > - kfree(v); > - goto fail; > - } > - > - /* init defaults for adaptive IC */ > - if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { > - v->rx_ictt = 0x1; > - v->rx_dim_en = true; > - } > - INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); > - netif_napi_add(priv->ndev, &v->napi, enetc_poll); > - v->count_tx_rings = v_tx_rings; > - > - for (j = 0; j < v_tx_rings; j++) { > - int idx; > - > - /* default tx ring mapping policy */ > - idx = priv->bdr_int_num * j + i; > - __set_bit(idx, &v->tx_rings_map); > - bdr = &v->tx_ring[j]; > - bdr->index = idx; > - bdr->ndev = priv->ndev; > - bdr->dev = priv->dev; > - bdr->bd_count = priv->tx_bd_count; > - priv->tx_ring[idx] = bdr; > - } > - } > > num_stack_tx_queues = enetc_num_stack_tx_queues(priv); > > @@ -3062,16 +3088,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) > return 0; > > fail: > - while (i--) { > - struct enetc_int_vector *v = priv->int_vector[i]; > - struct enetc_bdr *rx_ring = &v->rx_ring; > - > - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); > - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); > - netif_napi_del(&v->napi); > - cancel_work_sync(&v->rx_dim.work); > - kfree(v); > - } > + while (i--) > + enetc_int_vector_destroy(priv, i); > > pci_free_irq_vectors(pdev); > > @@ -3083,26 +3101,8 @@ void enetc_free_msix(struct enetc_ndev_priv *priv) > { > int i; > > - for (i = 0; i < priv->bdr_int_num; i++) { > - struct enetc_int_vector *v = priv->int_vector[i]; > - struct enetc_bdr *rx_ring = &v->rx_ring; > - > - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); > - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); > - netif_napi_del(&v->napi); > - cancel_work_sync(&v->rx_dim.work); > - } > - > - for (i = 0; i < priv->num_rx_rings; i++) > - priv->rx_ring[i] = NULL; > - > - for (i = 0; i < priv->num_tx_rings; i++) > - priv->tx_ring[i] = NULL; > - > - for (i = 0; i < priv->bdr_int_num; i++) { > - kfree(priv->int_vector[i]); > - priv->int_vector[i] = NULL; > - } > + for (i = 0; i < priv->bdr_int_num; i++) > + enetc_int_vector_destroy(priv, i); > > /* disable all MSIX for this device */ > pci_free_irq_vectors(priv->si->pdev); > -- > 2.34.1 >
> -----Original Message----- > From: Frank Li <frank.li@nxp.com> > Sent: 2024年10月16日 0:54 > To: Wei Fang <wei.fang@nxp.com> > Cc: davem@davemloft.net; edumazet@google.com; kuba@kernel.org; > pabeni@redhat.com; robh@kernel.org; krzk+dt@kernel.org; > conor+dt@kernel.org; Vladimir Oltean <vladimir.oltean@nxp.com>; Claudiu > Manoil <claudiu.manoil@nxp.com>; Clark Wang <xiaoning.wang@nxp.com>; > christophe.leroy@csgroup.eu; linux@armlinux.org.uk; bhelgaas@google.com; > horms@kernel.org; imx@lists.linux.dev; netdev@vger.kernel.org; > devicetree@vger.kernel.org; linux-kernel@vger.kernel.org; > linux-pci@vger.kernel.org > Subject: Re: [PATCH v2 net-next 10/13] net: enetc: extract > enetc_int_vector_init/destroy() from enetc_alloc_msix() > > On Tue, Oct 15, 2024 at 08:58:38PM +0800, Wei Fang wrote: > > From: Clark Wang <xiaoning.wang@nxp.com> > > > > Extract enetc_int_vector_init() and enetc_int_vector_destroy() from > > enetc_alloc_msix() so that the code is more concise and readable. > > > > Signed-off-by: Clark Wang <xiaoning.wang@nxp.com> > > Signed-off-by: Wei Fang <wei.fang@nxp.com> > > --- > > v2 changes: > > This patch is separated from v1 patch 9 ("net: enetc: optimize the > > allocation of tx_bdr"). Separate enetc_int_vector_init() from the > > original patch. In addition, add new help function > > enetc_int_vector_destroy(). > > --- > > drivers/net/ethernet/freescale/enetc/enetc.c | 174 > > +++++++++---------- > > 1 file changed, 87 insertions(+), 87 deletions(-) > > > > diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c > > b/drivers/net/ethernet/freescale/enetc/enetc.c > > index 032d8eadd003..d36af3f8ba31 100644 > > --- a/drivers/net/ethernet/freescale/enetc/enetc.c > > +++ b/drivers/net/ethernet/freescale/enetc/enetc.c > > @@ -2965,6 +2965,87 @@ int enetc_ioctl(struct net_device *ndev, struct > > ifreq *rq, int cmd) } EXPORT_SYMBOL_GPL(enetc_ioctl); > > > > +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, > > + int v_tx_rings) > > +{ > > + struct enetc_int_vector *v __free(kfree); > > Old code have not use cleanup. Please keep exact same as old codes. > Or you should mention at commit log at least. > Okay, I will mention it at the commit message. > > > + struct enetc_bdr *bdr; > > + int j, err; > > + > > + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > > + if (!v) > > + return -ENOMEM; > > + > > + bdr = &v->rx_ring; > > + bdr->index = i; > > + bdr->ndev = priv->ndev; > > + bdr->dev = priv->dev; > > + bdr->bd_count = priv->rx_bd_count; > > + bdr->buffer_offset = ENETC_RXB_PAD; > > + priv->rx_ring[i] = bdr; > > + > > + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); > > + if (err) > > + return err; > > + > > + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, > > + MEM_TYPE_PAGE_SHARED, NULL); > > + if (err) { > > + xdp_rxq_info_unreg(&bdr->xdp.rxq); > > + return err; > > + } > > + > > + /* init defaults for adaptive IC */ > > + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { > > + v->rx_ictt = 0x1; > > + v->rx_dim_en = true; > > + } > > + > > + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); > > + netif_napi_add(priv->ndev, &v->napi, enetc_poll); > > + v->count_tx_rings = v_tx_rings; > > + > > + for (j = 0; j < v_tx_rings; j++) { > > + int idx; > > + > > + /* default tx ring mapping policy */ > > + idx = priv->bdr_int_num * j + i; > > + __set_bit(idx, &v->tx_rings_map); > > + bdr = &v->tx_ring[j]; > > + bdr->index = idx; > > + bdr->ndev = priv->ndev; > > + bdr->dev = priv->dev; > > + bdr->bd_count = priv->tx_bd_count; > > + priv->tx_ring[idx] = bdr; > > + } > > + > > + priv->int_vector[i] = no_free_ptr(v); > > + > > + return 0; > > +} > > + > > +static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, > > +int i) { > > + struct enetc_int_vector *v = priv->int_vector[i]; > > + struct enetc_bdr *rx_ring = &v->rx_ring; > > + int j, tx_ring_index; > > + > > + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); > > + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); > > + netif_napi_del(&v->napi); > > + cancel_work_sync(&v->rx_dim.work); > > + > > + priv->rx_ring[i] = NULL; > > + > > + for (j = 0; j < v->count_tx_rings; j++) { > > + tx_ring_index = priv->bdr_int_num * j + i; > > + priv->tx_ring[tx_ring_index] = NULL; > > + } > > + > > + kfree(v); > > + priv->int_vector[i] = NULL; > > +} > > + > > int enetc_alloc_msix(struct enetc_ndev_priv *priv) { > > struct pci_dev *pdev = priv->si->pdev; @@ -2986,64 +3067,9 @@ int > > enetc_alloc_msix(struct enetc_ndev_priv *priv) > > /* # of tx rings per int vector */ > > v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; > > > > - for (i = 0; i < priv->bdr_int_num; i++) { > > - struct enetc_int_vector *v; > > - struct enetc_bdr *bdr; > > - int j; > > - > > - v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > > - if (!v) { > > - err = -ENOMEM; > > + for (i = 0; i < priv->bdr_int_num; i++) > > + if (enetc_int_vector_init(priv, i, v_tx_rings)) > > goto fail; > > - } > > - > > - priv->int_vector[i] = v; > > - > > - bdr = &v->rx_ring; > > - bdr->index = i; > > - bdr->ndev = priv->ndev; > > - bdr->dev = priv->dev; > > - bdr->bd_count = priv->rx_bd_count; > > - bdr->buffer_offset = ENETC_RXB_PAD; > > - priv->rx_ring[i] = bdr; > > - > > - err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); > > - if (err) { > > - kfree(v); > > - goto fail; > > - } > > - > > - err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, > > - MEM_TYPE_PAGE_SHARED, NULL); > > - if (err) { > > - xdp_rxq_info_unreg(&bdr->xdp.rxq); > > - kfree(v); > > - goto fail; > > - } > > - > > - /* init defaults for adaptive IC */ > > - if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { > > - v->rx_ictt = 0x1; > > - v->rx_dim_en = true; > > - } > > - INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); > > - netif_napi_add(priv->ndev, &v->napi, enetc_poll); > > - v->count_tx_rings = v_tx_rings; > > - > > - for (j = 0; j < v_tx_rings; j++) { > > - int idx; > > - > > - /* default tx ring mapping policy */ > > - idx = priv->bdr_int_num * j + i; > > - __set_bit(idx, &v->tx_rings_map); > > - bdr = &v->tx_ring[j]; > > - bdr->index = idx; > > - bdr->ndev = priv->ndev; > > - bdr->dev = priv->dev; > > - bdr->bd_count = priv->tx_bd_count; > > - priv->tx_ring[idx] = bdr; > > - } > > - } > > > > num_stack_tx_queues = enetc_num_stack_tx_queues(priv); > > > > @@ -3062,16 +3088,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv > *priv) > > return 0; > > > > fail: > > - while (i--) { > > - struct enetc_int_vector *v = priv->int_vector[i]; > > - struct enetc_bdr *rx_ring = &v->rx_ring; > > - > > - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); > > - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); > > - netif_napi_del(&v->napi); > > - cancel_work_sync(&v->rx_dim.work); > > - kfree(v); > > - } > > + while (i--) > > + enetc_int_vector_destroy(priv, i); > > > > pci_free_irq_vectors(pdev); > > > > @@ -3083,26 +3101,8 @@ void enetc_free_msix(struct enetc_ndev_priv > > *priv) { > > int i; > > > > - for (i = 0; i < priv->bdr_int_num; i++) { > > - struct enetc_int_vector *v = priv->int_vector[i]; > > - struct enetc_bdr *rx_ring = &v->rx_ring; > > - > > - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); > > - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); > > - netif_napi_del(&v->napi); > > - cancel_work_sync(&v->rx_dim.work); > > - } > > - > > - for (i = 0; i < priv->num_rx_rings; i++) > > - priv->rx_ring[i] = NULL; > > - > > - for (i = 0; i < priv->num_tx_rings; i++) > > - priv->tx_ring[i] = NULL; > > - > > - for (i = 0; i < priv->bdr_int_num; i++) { > > - kfree(priv->int_vector[i]); > > - priv->int_vector[i] = NULL; > > - } > > + for (i = 0; i < priv->bdr_int_num; i++) > > + enetc_int_vector_destroy(priv, i); > > > > /* disable all MSIX for this device */ > > pci_free_irq_vectors(priv->si->pdev); > > -- > > 2.34.1 > >
> > +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, > > + int v_tx_rings) > > +{ > > + struct enetc_int_vector *v __free(kfree); > > + struct enetc_bdr *bdr; > > + int j, err; > > + > > + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > > + if (!v) > > + return -ENOMEM; > > + > > + bdr = &v->rx_ring; > > + bdr->index = i; > > + bdr->ndev = priv->ndev; > > + bdr->dev = priv->dev; > > + bdr->bd_count = priv->rx_bd_count; > > + bdr->buffer_offset = ENETC_RXB_PAD; > > + priv->rx_ring[i] = bdr; > > + > > + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); > > + if (err) > > + return err; > > + > > + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, > > + MEM_TYPE_PAGE_SHARED, NULL); > > + if (err) { > > + xdp_rxq_info_unreg(&bdr->xdp.rxq); > > + return err; > > + } > > + > > + /* init defaults for adaptive IC */ > > + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { > > + v->rx_ictt = 0x1; > > + v->rx_dim_en = true; > > + } > > + > > + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); > > + netif_napi_add(priv->ndev, &v->napi, enetc_poll); > > + v->count_tx_rings = v_tx_rings; > > + > > + for (j = 0; j < v_tx_rings; j++) { > > + int idx; > > + > > + /* default tx ring mapping policy */ > > + idx = priv->bdr_int_num * j + i; > > + __set_bit(idx, &v->tx_rings_map); > > + bdr = &v->tx_ring[j]; > > + bdr->index = idx; > > + bdr->ndev = priv->ndev; > > + bdr->dev = priv->dev; > > + bdr->bd_count = priv->tx_bd_count; > > + priv->tx_ring[idx] = bdr; > > + } > > + > > + priv->int_vector[i] = no_free_ptr(v); > > This is new, and looks like it's a fix on its own. It's fixing a dangling reference in > int_vectror[i], > if I'm not wrong. This is slightly different from the original code, using cleanup to manage dynamically allocated memory resources. > > Other than that, like for the original patch: > Reviewed-by: Claudiu Manoil <claudiu.manoil@nxp.com>
On Tue, Oct 15, 2024 at 08:58:38PM +0800, Wei Fang wrote: > From: Clark Wang <xiaoning.wang@nxp.com> > > Extract enetc_int_vector_init() and enetc_int_vector_destroy() from > enetc_alloc_msix() so that the code is more concise and readable. > > Signed-off-by: Clark Wang <xiaoning.wang@nxp.com> > Signed-off-by: Wei Fang <wei.fang@nxp.com> > --- > v2 changes: > This patch is separated from v1 patch 9 ("net: enetc: optimize the > allocation of tx_bdr"). Separate enetc_int_vector_init() from the > original patch. In addition, add new help function > enetc_int_vector_destroy(). > --- > drivers/net/ethernet/freescale/enetc/enetc.c | 174 +++++++++---------- > 1 file changed, 87 insertions(+), 87 deletions(-) > > diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c > index 032d8eadd003..d36af3f8ba31 100644 > --- a/drivers/net/ethernet/freescale/enetc/enetc.c > +++ b/drivers/net/ethernet/freescale/enetc/enetc.c > @@ -2965,6 +2965,87 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) > } > EXPORT_SYMBOL_GPL(enetc_ioctl); > > +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, > + int v_tx_rings) > +{ > + struct enetc_int_vector *v __free(kfree); > + struct enetc_bdr *bdr; > + int j, err; > + > + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > + if (!v) > + return -ENOMEM; ... > int enetc_alloc_msix(struct enetc_ndev_priv *priv) > { > struct pci_dev *pdev = priv->si->pdev; ... > @@ -2986,64 +3067,9 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) > /* # of tx rings per int vector */ > v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; > > - for (i = 0; i < priv->bdr_int_num; i++) { > - struct enetc_int_vector *v; > - struct enetc_bdr *bdr; > - int j; > - > - v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > - if (!v) { > - err = -ENOMEM; > + for (i = 0; i < priv->bdr_int_num; i++) > + if (enetc_int_vector_init(priv, i, v_tx_rings)) > goto fail; Hi Wei Fang, It looks like, if we reach this error handling during the first iteration of the for loop then err, which will be return value returned by the function, is ininitialised. Perhaps this would be better expressed as follows? (Completely untested!) err = enetc_int_vector_init(priv, i, v_tx_rings); if (err) goto fail; Flagged by Smatch. ...
> > On Tue, Oct 15, 2024 at 08:58:38PM +0800, Wei Fang wrote: > > drivers/net/ethernet/freescale/enetc/enetc.c | 174 +++++++++---------- > > 1 file changed, 87 insertions(+), 87 deletions(-) > > > > diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c > b/drivers/net/ethernet/freescale/enetc/enetc.c > > index 032d8eadd003..d36af3f8ba31 100644 > > --- a/drivers/net/ethernet/freescale/enetc/enetc.c > > +++ b/drivers/net/ethernet/freescale/enetc/enetc.c > > @@ -2965,6 +2965,87 @@ int enetc_ioctl(struct net_device *ndev, struct > ifreq *rq, int cmd) > > } > > EXPORT_SYMBOL_GPL(enetc_ioctl); > > > > +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, > > + int v_tx_rings) > > +{ > > + struct enetc_int_vector *v __free(kfree); > > + struct enetc_bdr *bdr; > > + int j, err; > > + > > + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > > + if (!v) > > + return -ENOMEM; > > ... > > > int enetc_alloc_msix(struct enetc_ndev_priv *priv) > > { > > struct pci_dev *pdev = priv->si->pdev; > > ... > > > @@ -2986,64 +3067,9 @@ int enetc_alloc_msix(struct enetc_ndev_priv > *priv) > > /* # of tx rings per int vector */ > > v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; > > > > - for (i = 0; i < priv->bdr_int_num; i++) { > > - struct enetc_int_vector *v; > > - struct enetc_bdr *bdr; > > - int j; > > - > > - v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); > > - if (!v) { > > - err = -ENOMEM; > > + for (i = 0; i < priv->bdr_int_num; i++) > > + if (enetc_int_vector_init(priv, i, v_tx_rings)) > > goto fail; > > Hi Wei Fang, > > It looks like, if we reach this error handling during the first iteration > of the for loop then err, which will be return value returned by the function, > is ininitialised. Perhaps this would be better expressed as follows? > (Completely untested!) > > err = enetc_int_vector_init(priv, i, v_tx_rings); > if (err) > goto fail; > > Flagged by Smatch. > Thanks, you are correct, I will fix it.
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 032d8eadd003..d36af3f8ba31 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2965,6 +2965,87 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) } EXPORT_SYMBOL_GPL(enetc_ioctl); +static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i, + int v_tx_rings) +{ + struct enetc_int_vector *v __free(kfree); + struct enetc_bdr *bdr; + int j, err; + + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); + if (!v) + return -ENOMEM; + + bdr = &v->rx_ring; + bdr->index = i; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->rx_bd_count; + bdr->buffer_offset = ENETC_RXB_PAD; + priv->rx_ring[i] = bdr; + + err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); + if (err) + return err; + + err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, + MEM_TYPE_PAGE_SHARED, NULL); + if (err) { + xdp_rxq_info_unreg(&bdr->xdp.rxq); + return err; + } + + /* init defaults for adaptive IC */ + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { + v->rx_ictt = 0x1; + v->rx_dim_en = true; + } + + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); + netif_napi_add(priv->ndev, &v->napi, enetc_poll); + v->count_tx_rings = v_tx_rings; + + for (j = 0; j < v_tx_rings; j++) { + int idx; + + /* default tx ring mapping policy */ + idx = priv->bdr_int_num * j + i; + __set_bit(idx, &v->tx_rings_map); + bdr = &v->tx_ring[j]; + bdr->index = idx; + bdr->ndev = priv->ndev; + bdr->dev = priv->dev; + bdr->bd_count = priv->tx_bd_count; + priv->tx_ring[idx] = bdr; + } + + priv->int_vector[i] = no_free_ptr(v); + + return 0; +} + +static void enetc_int_vector_destroy(struct enetc_ndev_priv *priv, int i) +{ + struct enetc_int_vector *v = priv->int_vector[i]; + struct enetc_bdr *rx_ring = &v->rx_ring; + int j, tx_ring_index; + + xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); + xdp_rxq_info_unreg(&rx_ring->xdp.rxq); + netif_napi_del(&v->napi); + cancel_work_sync(&v->rx_dim.work); + + priv->rx_ring[i] = NULL; + + for (j = 0; j < v->count_tx_rings; j++) { + tx_ring_index = priv->bdr_int_num * j + i; + priv->tx_ring[tx_ring_index] = NULL; + } + + kfree(v); + priv->int_vector[i] = NULL; +} + int enetc_alloc_msix(struct enetc_ndev_priv *priv) { struct pci_dev *pdev = priv->si->pdev; @@ -2986,64 +3067,9 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) /* # of tx rings per int vector */ v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; - for (i = 0; i < priv->bdr_int_num; i++) { - struct enetc_int_vector *v; - struct enetc_bdr *bdr; - int j; - - v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); - if (!v) { - err = -ENOMEM; + for (i = 0; i < priv->bdr_int_num; i++) + if (enetc_int_vector_init(priv, i, v_tx_rings)) goto fail; - } - - priv->int_vector[i] = v; - - bdr = &v->rx_ring; - bdr->index = i; - bdr->ndev = priv->ndev; - bdr->dev = priv->dev; - bdr->bd_count = priv->rx_bd_count; - bdr->buffer_offset = ENETC_RXB_PAD; - priv->rx_ring[i] = bdr; - - err = xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0); - if (err) { - kfree(v); - goto fail; - } - - err = xdp_rxq_info_reg_mem_model(&bdr->xdp.rxq, - MEM_TYPE_PAGE_SHARED, NULL); - if (err) { - xdp_rxq_info_unreg(&bdr->xdp.rxq); - kfree(v); - goto fail; - } - - /* init defaults for adaptive IC */ - if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { - v->rx_ictt = 0x1; - v->rx_dim_en = true; - } - INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); - netif_napi_add(priv->ndev, &v->napi, enetc_poll); - v->count_tx_rings = v_tx_rings; - - for (j = 0; j < v_tx_rings; j++) { - int idx; - - /* default tx ring mapping policy */ - idx = priv->bdr_int_num * j + i; - __set_bit(idx, &v->tx_rings_map); - bdr = &v->tx_ring[j]; - bdr->index = idx; - bdr->ndev = priv->ndev; - bdr->dev = priv->dev; - bdr->bd_count = priv->tx_bd_count; - priv->tx_ring[idx] = bdr; - } - } num_stack_tx_queues = enetc_num_stack_tx_queues(priv); @@ -3062,16 +3088,8 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) return 0; fail: - while (i--) { - struct enetc_int_vector *v = priv->int_vector[i]; - struct enetc_bdr *rx_ring = &v->rx_ring; - - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); - netif_napi_del(&v->napi); - cancel_work_sync(&v->rx_dim.work); - kfree(v); - } + while (i--) + enetc_int_vector_destroy(priv, i); pci_free_irq_vectors(pdev); @@ -3083,26 +3101,8 @@ void enetc_free_msix(struct enetc_ndev_priv *priv) { int i; - for (i = 0; i < priv->bdr_int_num; i++) { - struct enetc_int_vector *v = priv->int_vector[i]; - struct enetc_bdr *rx_ring = &v->rx_ring; - - xdp_rxq_info_unreg_mem_model(&rx_ring->xdp.rxq); - xdp_rxq_info_unreg(&rx_ring->xdp.rxq); - netif_napi_del(&v->napi); - cancel_work_sync(&v->rx_dim.work); - } - - for (i = 0; i < priv->num_rx_rings; i++) - priv->rx_ring[i] = NULL; - - for (i = 0; i < priv->num_tx_rings; i++) - priv->tx_ring[i] = NULL; - - for (i = 0; i < priv->bdr_int_num; i++) { - kfree(priv->int_vector[i]); - priv->int_vector[i] = NULL; - } + for (i = 0; i < priv->bdr_int_num; i++) + enetc_int_vector_destroy(priv, i); /* disable all MSIX for this device */ pci_free_irq_vectors(priv->si->pdev);