diff mbox series

[RFC,net-next,1/5] net: mvneta: fix transmit path dma-unmapping on error

Message ID E1pjOwe-00Fmki-6p@rmk-PC.armlinux.org.uk (mailing list archive)
State RFC
Delegated to: Netdev Maintainers
Headers show
Series net: mvneta: reduce size of TSO header allocation | expand

Commit Message

Russell King (Oracle) April 3, 2023, 6:30 p.m. UTC
The transmit code assumes that the transmit descriptors that are used
begin with the first descriptor in the ring, but this may not be the
case. Fix this by providing a new function that dma-unmaps a range of
numbered descriptor entries, and use that to do the unmapping.

Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
---
 drivers/net/ethernet/marvell/mvneta.c | 53 +++++++++++++++++----------
 1 file changed, 33 insertions(+), 20 deletions(-)

Comments

Eric Dumazet April 4, 2023, 5:09 a.m. UTC | #1
On Mon, Apr 3, 2023 at 8:30 PM Russell King (Oracle)
<rmk+kernel@armlinux.org.uk> wrote:
>
> The transmit code assumes that the transmit descriptors that are used
> begin with the first descriptor in the ring, but this may not be the
> case. Fix this by providing a new function that dma-unmaps a range of
> numbered descriptor entries, and use that to do the unmapping.
>
> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>

Nice patch series !

I guess this one will need to be backported to stable versions. It
would be nice adding:

Fixes: 2adb719d74f6 ("net: mvneta: Implement software TSO")

Thanks.

> ---
>  drivers/net/ethernet/marvell/mvneta.c | 53 +++++++++++++++++----------
>  1 file changed, 33 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
> index 2cad76d0a50e..62400ff61e34 100644
> --- a/drivers/net/ethernet/marvell/mvneta.c
> +++ b/drivers/net/ethernet/marvell/mvneta.c
> @@ -2714,14 +2714,40 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
>         return 0;
>  }
>
> +static void mvneta_release_descs(struct mvneta_port *pp,
> +                                struct mvneta_tx_queue *txq,
> +                                int first, int num)
> +{
> +       int desc_idx, i;
> +
> +       desc_idx = first + num;
> +       if (desc_idx >= txq->size)
> +               desc_idx -= txq->size;
> +
> +       for (i = num; i >= 0; i--) {
> +               struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx;
> +
> +               if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
> +                       dma_unmap_single(pp->dev->dev.parent,
> +                                        tx_desc->buf_phys_addr,
> +                                        tx_desc->data_size,
> +                                        DMA_TO_DEVICE);
> +
> +               mvneta_txq_desc_put(txq);
> +
> +               if (desc_idx == 0)
> +                       desc_idx = txq->size;
> +               desc_idx -= 1;
> +       }
> +}
> +
>  static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
>                          struct mvneta_tx_queue *txq)
>  {
>         int hdr_len, total_len, data_left;
> -       int desc_count = 0;
> +       int first_desc, desc_count = 0;
>         struct mvneta_port *pp = netdev_priv(dev);
>         struct tso_t tso;
> -       int i;
>
>         /* Count needed descriptors */
>         if ((txq->count + tso_count_descs(skb)) >= txq->size)
> @@ -2732,6 +2758,8 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
>                 return 0;
>         }
>
> +       first_desc = txq->txq_put_index;
> +
>         /* Initialize the TSO handler, and prepare the first payload */
>         hdr_len = tso_start(skb, &tso);
>
> @@ -2772,15 +2800,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
>         /* Release all used data descriptors; header descriptors must not
>          * be DMA-unmapped.
>          */
> -       for (i = desc_count - 1; i >= 0; i--) {
> -               struct mvneta_tx_desc *tx_desc = txq->descs + i;
> -               if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
> -                       dma_unmap_single(pp->dev->dev.parent,
> -                                        tx_desc->buf_phys_addr,
> -                                        tx_desc->data_size,
> -                                        DMA_TO_DEVICE);
> -               mvneta_txq_desc_put(txq);
> -       }
> +       mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
>         return 0;
>  }
>
> @@ -2790,6 +2810,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
>  {
>         struct mvneta_tx_desc *tx_desc;
>         int i, nr_frags = skb_shinfo(skb)->nr_frags;
> +       int first_desc = txq->txq_put_index;
>
>         for (i = 0; i < nr_frags; i++) {
>                 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
> @@ -2828,15 +2849,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
>         /* Release all descriptors that were used to map fragments of
>          * this packet, as well as the corresponding DMA mappings
>          */
> -       for (i = i - 1; i >= 0; i--) {
> -               tx_desc = txq->descs + i;
> -               dma_unmap_single(pp->dev->dev.parent,
> -                                tx_desc->buf_phys_addr,
> -                                tx_desc->data_size,
> -                                DMA_TO_DEVICE);
> -               mvneta_txq_desc_put(txq);
> -       }
> -
> +       mvneta_release_descs(pp, txq, first_desc, i - 1);
>         return -ENOMEM;
>  }
>
> --
> 2.30.2
>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 2cad76d0a50e..62400ff61e34 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2714,14 +2714,40 @@  mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
 	return 0;
 }
 
+static void mvneta_release_descs(struct mvneta_port *pp,
+				 struct mvneta_tx_queue *txq,
+				 int first, int num)
+{
+	int desc_idx, i;
+
+	desc_idx = first + num;
+	if (desc_idx >= txq->size)
+		desc_idx -= txq->size;
+
+	for (i = num; i >= 0; i--) {
+		struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx;
+
+		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+			dma_unmap_single(pp->dev->dev.parent,
+					 tx_desc->buf_phys_addr,
+					 tx_desc->data_size,
+					 DMA_TO_DEVICE);
+
+		mvneta_txq_desc_put(txq);
+
+		if (desc_idx == 0)
+			desc_idx = txq->size;
+		desc_idx -= 1;
+	}
+}
+
 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
 			 struct mvneta_tx_queue *txq)
 {
 	int hdr_len, total_len, data_left;
-	int desc_count = 0;
+	int first_desc, desc_count = 0;
 	struct mvneta_port *pp = netdev_priv(dev);
 	struct tso_t tso;
-	int i;
 
 	/* Count needed descriptors */
 	if ((txq->count + tso_count_descs(skb)) >= txq->size)
@@ -2732,6 +2758,8 @@  static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
 		return 0;
 	}
 
+	first_desc = txq->txq_put_index;
+
 	/* Initialize the TSO handler, and prepare the first payload */
 	hdr_len = tso_start(skb, &tso);
 
@@ -2772,15 +2800,7 @@  static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
 	/* Release all used data descriptors; header descriptors must not
 	 * be DMA-unmapped.
 	 */
-	for (i = desc_count - 1; i >= 0; i--) {
-		struct mvneta_tx_desc *tx_desc = txq->descs + i;
-		if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
-			dma_unmap_single(pp->dev->dev.parent,
-					 tx_desc->buf_phys_addr,
-					 tx_desc->data_size,
-					 DMA_TO_DEVICE);
-		mvneta_txq_desc_put(txq);
-	}
+	mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
 	return 0;
 }
 
@@ -2790,6 +2810,7 @@  static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
 {
 	struct mvneta_tx_desc *tx_desc;
 	int i, nr_frags = skb_shinfo(skb)->nr_frags;
+	int first_desc = txq->txq_put_index;
 
 	for (i = 0; i < nr_frags; i++) {
 		struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
@@ -2828,15 +2849,7 @@  static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
 	/* Release all descriptors that were used to map fragments of
 	 * this packet, as well as the corresponding DMA mappings
 	 */
-	for (i = i - 1; i >= 0; i--) {
-		tx_desc = txq->descs + i;
-		dma_unmap_single(pp->dev->dev.parent,
-				 tx_desc->buf_phys_addr,
-				 tx_desc->data_size,
-				 DMA_TO_DEVICE);
-		mvneta_txq_desc_put(txq);
-	}
-
+	mvneta_release_descs(pp, txq, first_desc, i - 1);
 	return -ENOMEM;
 }