diff mbox series

[net,2/4] net: ll_temac: Add more error handling of dma_map_single() calls

Message ID 65907810dd82de3fcaad9869f328ab32800c67ea.1582108989.git.esben@geanix.com (mailing list archive)
State New, archived
Headers show
Series net: ll_temac: Bugfixes | expand

Commit Message

Esben Haabendal Feb. 19, 2020, 10:54 a.m. UTC
This adds error handling to the remaining dma_map_single() calls, so that
behavior is well defined if/when we run out of DMA memory.

Fixes: 92744989533c ("net: add Xilinx ll_temac device driver")
Signed-off-by: Esben Haabendal <esben@geanix.com>
---
 drivers/net/ethernet/xilinx/ll_temac_main.c | 26 +++++++++++++++++++--
 1 file changed, 24 insertions(+), 2 deletions(-)

Comments

David Miller Feb. 19, 2020, 6:59 p.m. UTC | #1
From: Esben Haabendal <esben@geanix.com>
Date: Wed, 19 Feb 2020 11:54:00 +0100

> @@ -863,12 +865,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
>  	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
>  				      skb_headlen(skb), DMA_TO_DEVICE);
>  	cur_p->len = cpu_to_be32(skb_headlen(skb));
> +	if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr)))
> +		return NETDEV_TX_BUSY;

The appropriate behavior in this situation is to drop the packet and return
NETDEV_TX_OK.
Esben Haabendal Feb. 20, 2020, 8:32 a.m. UTC | #2
David Miller <davem@davemloft.net> writes:

> From: Esben Haabendal <esben@geanix.com>
> Date: Wed, 19 Feb 2020 11:54:00 +0100
>
>> @@ -863,12 +865,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
>>  	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
>>  				      skb_headlen(skb), DMA_TO_DEVICE);
>>  	cur_p->len = cpu_to_be32(skb_headlen(skb));
>> +	if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr)))
>> +		return NETDEV_TX_BUSY;
>
> The appropriate behavior in this situation is to drop the packet and return
> NETDEV_TX_OK.

Ok, and I guess the same goes for the error handling of dma_map_single()
of one of the fragments later in same function.

/Esben
David Miller Feb. 20, 2020, 6:11 p.m. UTC | #3
From: Esben Haabendal <esben@geanix.com>
Date: Thu, 20 Feb 2020 09:32:58 +0100

> David Miller <davem@davemloft.net> writes:
> 
>> From: Esben Haabendal <esben@geanix.com>
>> Date: Wed, 19 Feb 2020 11:54:00 +0100
>>
>>> @@ -863,12 +865,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
>>>  	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
>>>  				      skb_headlen(skb), DMA_TO_DEVICE);
>>>  	cur_p->len = cpu_to_be32(skb_headlen(skb));
>>> +	if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr)))
>>> +		return NETDEV_TX_BUSY;
>>
>> The appropriate behavior in this situation is to drop the packet and return
>> NETDEV_TX_OK.
> 
> Ok, and I guess the same goes for the error handling of dma_map_single()
> of one of the fragments later in same function.

Yes.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 996004ef8bd4..c368c3914bda 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -367,6 +367,8 @@  static int temac_dma_bd_init(struct net_device *ndev)
 		skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 					      XTE_MAX_JUMBO_FRAME_SIZE,
 					      DMA_FROM_DEVICE);
+		if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
+			goto out;
 		lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
 		lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
 		lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
@@ -863,12 +865,13 @@  temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 	skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 				      skb_headlen(skb), DMA_TO_DEVICE);
 	cur_p->len = cpu_to_be32(skb_headlen(skb));
+	if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr)))
+		return NETDEV_TX_BUSY;
 	cur_p->phys = cpu_to_be32(skb_dma_addr);
 	ptr_to_txbd((void *)skb, cur_p);
 
 	for (ii = 0; ii < num_frag; ii++) {
-		lp->tx_bd_tail++;
-		if (lp->tx_bd_tail >= TX_BD_NUM)
+		if (++lp->tx_bd_tail >= TX_BD_NUM)
 			lp->tx_bd_tail = 0;
 
 		cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -876,6 +879,25 @@  temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 					      skb_frag_address(frag),
 					      skb_frag_size(frag),
 					      DMA_TO_DEVICE);
+		if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
+			if (--lp->tx_bd_tail < 0)
+				lp->tx_bd_tail = TX_BD_NUM - 1;
+			cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+			while (--ii >= 0) {
+				--frag;
+				dma_unmap_single(ndev->dev.parent,
+						 be32_to_cpu(cur_p->phys),
+						 skb_frag_size(frag),
+						 DMA_TO_DEVICE);
+				if (--lp->tx_bd_tail < 0)
+					lp->tx_bd_tail = TX_BD_NUM - 1;
+				cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+			}
+			dma_unmap_single(ndev->dev.parent,
+					 be32_to_cpu(cur_p->phys),
+					 skb_headlen(skb), DMA_TO_DEVICE);
+			return NETDEV_TX_BUSY;
+		}
 		cur_p->phys = cpu_to_be32(skb_dma_addr);
 		cur_p->len = cpu_to_be32(skb_frag_size(frag));
 		cur_p->app0 = 0;