diff mbox series

[06/14] net: axienet: Check for DMA mapping errors

Message ID 20200110115415.75683-7-andre.przywara@arm.com (mailing list archive)
State New, archived
Headers show
Series net: axienet: Error handling, SGMII and 64-bit DMA fixes | expand

Commit Message

Andre Przywara Jan. 10, 2020, 11:54 a.m. UTC
Especially with the default 32-bit DMA mask, DMA buffers are a limited
resource, so their allocation can fail.
So as the DMA API documentation requires, add error checking code after
dma_map_single() calls to catch the case where we run out of "low" memory.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 .../net/ethernet/xilinx/xilinx_axienet_main.c | 22 ++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)

Comments

Radhey Shyam Pandey Jan. 13, 2020, 5:54 a.m. UTC | #1
> -----Original Message-----
> From: Andre Przywara <andre.przywara@arm.com>
> Sent: Friday, January 10, 2020 5:24 PM
> To: David S . Miller <davem@davemloft.net>; Radhey Shyam Pandey
> <radheys@xilinx.com>
> Cc: Michal Simek <michals@xilinx.com>; Robert Hancock
> <hancock@sedsystems.ca>; netdev@vger.kernel.org; linux-arm-
> kernel@lists.infradead.org; linux-kernel@vger.kernel.org
> Subject: [PATCH 06/14] net: axienet: Check for DMA mapping errors
> 
> Especially with the default 32-bit DMA mask, DMA buffers are a limited
> resource, so their allocation can fail.
> So as the DMA API documentation requires, add error checking code after
> dma_map_single() calls to catch the case where we run out of "low" memory.
> 
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
>  .../net/ethernet/xilinx/xilinx_axienet_main.c | 22 ++++++++++++++++++-
>  1 file changed, 21 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> index 82abe2b0f16a..8d2b67cbecf9 100644
> --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> @@ -248,6 +248,11 @@ static int axienet_dma_bd_init(struct net_device
> *ndev)
>  						     skb->data,
>  						     lp->max_frm_size,
>  						     DMA_FROM_DEVICE);
> +		if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys))

Prefer using unlikely compiler hint for dma_mapping_error. 
Also, we need to add error print to report this condition to the user,
in case it isn't there in dma_mapping_error implementation.

> {
> +			dev_kfree_skb(skb);

free of skb is already handled in _release. We can reuse that?
> +			goto out;
> +		}
> +
>  		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
>  	}
> 
> @@ -668,6 +673,7 @@ axienet_start_xmit(struct sk_buff *skb, struct
> net_device *ndev)
>  	dma_addr_t tail_p;
>  	struct axienet_local *lp = netdev_priv(ndev);
>  	struct axidma_bd *cur_p;
> +	u32 orig_tail_ptr = lp->tx_bd_tail;
> 
>  	num_frag = skb_shinfo(skb)->nr_frags;
>  	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
> @@ -703,9 +709,11 @@ axienet_start_xmit(struct sk_buff *skb, struct
> net_device *ndev)
>  		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
>  	}
> 
> -	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
>  	cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
>  				     skb_headlen(skb), DMA_TO_DEVICE);
> +	if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
> +		return NETDEV_TX_BUSY;

This is not ideally tx busy and related to available mem mapping in the system. 
I just looked at other eth drivers and it seems they return TX_OK with drop
count stats incremented.

> +	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
> 
>  	for (ii = 0; ii < num_frag; ii++) {
>  		if (++lp->tx_bd_tail >= lp->tx_bd_num) @@ -716,6 +724,13 @@
> axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
>  					     skb_frag_address(frag),
>  					     skb_frag_size(frag),
>  					     DMA_TO_DEVICE);
> +		if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
> +			axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
> +					      NULL);
> +			lp->tx_bd_tail = orig_tail_ptr;
> +
> +			return NETDEV_TX_BUSY;
> +		}
>  		cur_p->cntrl = skb_frag_size(frag);
>  	}
> 
> @@ -796,6 +811,11 @@ static void axienet_recv(struct net_device *ndev)
>  		cur_p->phys = dma_map_single(ndev->dev.parent, new_skb-
> >data,
>  					     lp->max_frm_size,
>  					     DMA_FROM_DEVICE);
> +		if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
> +			dev_kfree_skb(new_skb);
> +			return;
> +		}
> +
>  		cur_p->cntrl = lp->max_frm_size;
>  		cur_p->status = 0;
>  		cur_p->skb = new_skb;
> --
> 2.17.1
diff mbox series

Patch

diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 82abe2b0f16a..8d2b67cbecf9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -248,6 +248,11 @@  static int axienet_dma_bd_init(struct net_device *ndev)
 						     skb->data,
 						     lp->max_frm_size,
 						     DMA_FROM_DEVICE);
+		if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) {
+			dev_kfree_skb(skb);
+			goto out;
+		}
+
 		lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 	}
 
@@ -668,6 +673,7 @@  axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 	dma_addr_t tail_p;
 	struct axienet_local *lp = netdev_priv(ndev);
 	struct axidma_bd *cur_p;
+	u32 orig_tail_ptr = lp->tx_bd_tail;
 
 	num_frag = skb_shinfo(skb)->nr_frags;
 	cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -703,9 +709,11 @@  axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 		cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
 	}
 
-	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 	cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
 				     skb_headlen(skb), DMA_TO_DEVICE);
+	if (dma_mapping_error(ndev->dev.parent, cur_p->phys))
+		return NETDEV_TX_BUSY;
+	cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 
 	for (ii = 0; ii < num_frag; ii++) {
 		if (++lp->tx_bd_tail >= lp->tx_bd_num)
@@ -716,6 +724,13 @@  axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 					     skb_frag_address(frag),
 					     skb_frag_size(frag),
 					     DMA_TO_DEVICE);
+		if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
+			axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
+					      NULL);
+			lp->tx_bd_tail = orig_tail_ptr;
+
+			return NETDEV_TX_BUSY;
+		}
 		cur_p->cntrl = skb_frag_size(frag);
 	}
 
@@ -796,6 +811,11 @@  static void axienet_recv(struct net_device *ndev)
 		cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
 					     lp->max_frm_size,
 					     DMA_FROM_DEVICE);
+		if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) {
+			dev_kfree_skb(new_skb);
+			return;
+		}
+
 		cur_p->cntrl = lp->max_frm_size;
 		cur_p->status = 0;
 		cur_p->skb = new_skb;