diff mbox series

[net,v2,4/9] net: axienet: add missing memory barriers

Message ID 20220112173700.873002-5-robert.hancock@calian.com (mailing list archive)
State New, archived
Headers show
Series Xilinx axienet fixes | expand

Commit Message

Robert Hancock Jan. 12, 2022, 5:36 p.m. UTC
This driver was missing some required memory barriers:

Use dma_rmb to ensure we see all updates to the descriptor after we see
that an entry has been completed.

Use wmb and rmb to avoid stale descriptor status between the TX path and
TX complete IRQ path.

Fixes: 8a3b7a252dca9 ("drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet driver")
Signed-off-by: Robert Hancock <robert.hancock@calian.com>
---
 drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)

Comments

Radhey Shyam Pandey Jan. 13, 2022, 12:09 p.m. UTC | #1
> -----Original Message-----
> From: Robert Hancock <robert.hancock@calian.com>
> Sent: Wednesday, January 12, 2022 11:07 PM
> To: netdev@vger.kernel.org
> Cc: Radhey Shyam Pandey <radheys@xilinx.com>; davem@davemloft.net;
> kuba@kernel.org; linux-arm-kernel@lists.infradead.org; Michal Simek
> <michals@xilinx.com>; ariane.keller@tik.ee.ethz.ch; daniel@iogearbox.net;
> Robert Hancock <robert.hancock@calian.com>
> Subject: [PATCH net v2 4/9] net: axienet: add missing memory barriers
> 
> This driver was missing some required memory barriers:
> 
> Use dma_rmb to ensure we see all updates to the descriptor after we see that
> an entry has been completed.
> 
> Use wmb and rmb to avoid stale descriptor status between the TX path and TX
> complete IRQ path.
> 
> Fixes: 8a3b7a252dca9 ("drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet
> driver")
> Signed-off-by: Robert Hancock <robert.hancock@calian.com>
> ---
>  drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 11 ++++++++++-
>  1 file changed, 10 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> index f4ae035bed35..de8f85175a6c 100644
> --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> @@ -632,6 +632,8 @@ static int axienet_free_tx_chain(struct net_device
> *ndev, u32 first_bd,
>  		if (nr_bds == -1 && !(status &
> XAXIDMA_BD_STS_COMPLETE_MASK))
>  			break;
> 
> +		/* Ensure we see complete descriptor update */
> +		dma_rmb();
>  		phys = desc_get_phys_addr(lp, cur_p);
>  		dma_unmap_single(ndev->dev.parent, phys,
>  				 (cur_p->cntrl &
> XAXIDMA_BD_CTRL_LENGTH_MASK), @@ -645,8 +647,10 @@ static int
> axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
>  		cur_p->app1 = 0;
>  		cur_p->app2 = 0;
>  		cur_p->app4 = 0;
> -		cur_p->status = 0;
>  		cur_p->skb = NULL;
> +		/* ensure our transmit path and device don't prematurely see
> status cleared */
> +		wmb();
> +		cur_p->status = 0;

Any reason for moving status initialization down?

> 
>  		if (sizep)
>  			*sizep += status &
> XAXIDMA_BD_STS_ACTUAL_LEN_MASK; @@ -704,6 +708,9 @@ static inline
> int axienet_check_tx_bd_space(struct axienet_local *lp,
>  					    int num_frag)
>  {
>  	struct axidma_bd *cur_p;
> +
> +	/* Ensure we see all descriptor updates from device or TX IRQ path */
> +	rmb();
>  	cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
>  	if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
>  		return NETDEV_TX_BUSY;
> @@ -843,6 +850,8 @@ static void axienet_recv(struct net_device *ndev)
> 
>  		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
> 
> +		/* Ensure we see complete descriptor update */
> +		dma_rmb();
>  		phys = desc_get_phys_addr(lp, cur_p);
>  		dma_unmap_single(ndev->dev.parent, phys, lp-
> >max_frm_size,
>  				 DMA_FROM_DEVICE);

Ideally we would also need a write barrier in xmit function just before 
updating tail descriptor.

> --
> 2.31.1
Robert Hancock Jan. 13, 2022, 4:22 p.m. UTC | #2
On Thu, 2022-01-13 at 12:09 +0000, Radhey Shyam Pandey wrote:
> > -----Original Message-----
> > From: Robert Hancock <robert.hancock@calian.com>
> > Sent: Wednesday, January 12, 2022 11:07 PM
> > To: netdev@vger.kernel.org
> > Cc: Radhey Shyam Pandey <radheys@xilinx.com>; davem@davemloft.net;
> > kuba@kernel.org; linux-arm-kernel@lists.infradead.org; Michal Simek
> > <michals@xilinx.com>; ariane.keller@tik.ee.ethz.ch; daniel@iogearbox.net;
> > Robert Hancock <robert.hancock@calian.com>
> > Subject: [PATCH net v2 4/9] net: axienet: add missing memory barriers
> > 
> > This driver was missing some required memory barriers:
> > 
> > Use dma_rmb to ensure we see all updates to the descriptor after we see
> > that
> > an entry has been completed.
> > 
> > Use wmb and rmb to avoid stale descriptor status between the TX path and TX
> > complete IRQ path.
> > 
> > Fixes: 8a3b7a252dca9 ("drivers/net/ethernet/xilinx: added Xilinx AXI
> > Ethernet
> > driver")
> > Signed-off-by: Robert Hancock <robert.hancock@calian.com>
> > ---
> >  drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 11 ++++++++++-
> >  1 file changed, 10 insertions(+), 1 deletion(-)
> > 
> > diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> > b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> > index f4ae035bed35..de8f85175a6c 100644
> > --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> > +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
> > @@ -632,6 +632,8 @@ static int axienet_free_tx_chain(struct net_device
> > *ndev, u32 first_bd,
> >  		if (nr_bds == -1 && !(status &
> > XAXIDMA_BD_STS_COMPLETE_MASK))
> >  			break;
> > 
> > +		/* Ensure we see complete descriptor update */
> > +		dma_rmb();
> >  		phys = desc_get_phys_addr(lp, cur_p);
> >  		dma_unmap_single(ndev->dev.parent, phys,
> >  				 (cur_p->cntrl &
> > XAXIDMA_BD_CTRL_LENGTH_MASK), @@ -645,8 +647,10 @@ static int
> > axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
> >  		cur_p->app1 = 0;
> >  		cur_p->app2 = 0;
> >  		cur_p->app4 = 0;
> > -		cur_p->status = 0;
> >  		cur_p->skb = NULL;
> > +		/* ensure our transmit path and device don't prematurely see
> > status cleared */
> > +		wmb();
> > +		cur_p->status = 0;
> 
> Any reason for moving status initialization down?

Probably not strictly necessary, but the idea was to ensure that any of the
other writes to the descriptor were visible before the device saw the status
being cleared (indicating it is available to be read by the device).

> 
> >  		if (sizep)
> >  			*sizep += status &
> > XAXIDMA_BD_STS_ACTUAL_LEN_MASK; @@ -704,6 +708,9 @@ static inline
> > int axienet_check_tx_bd_space(struct axienet_local *lp,
> >  					    int num_frag)
> >  {
> >  	struct axidma_bd *cur_p;
> > +
> > +	/* Ensure we see all descriptor updates from device or TX IRQ path */
> > +	rmb();
> >  	cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
> >  	if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
> >  		return NETDEV_TX_BUSY;
> > @@ -843,6 +850,8 @@ static void axienet_recv(struct net_device *ndev)
> > 
> >  		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
> > 
> > +		/* Ensure we see complete descriptor update */
> > +		dma_rmb();
> >  		phys = desc_get_phys_addr(lp, cur_p);
> >  		dma_unmap_single(ndev->dev.parent, phys, lp-
> > > max_frm_size,
> >  				 DMA_FROM_DEVICE);
> 
> Ideally we would also need a write barrier in xmit function just before 
> updating tail descriptor.

I don't think it should be needed there because there is an implicit barrier on
the MMIO write.

> 
> > --
> > 2.31.1
diff mbox series

Patch

diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index f4ae035bed35..de8f85175a6c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -632,6 +632,8 @@  static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
 		if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
 			break;
 
+		/* Ensure we see complete descriptor update */
+		dma_rmb();
 		phys = desc_get_phys_addr(lp, cur_p);
 		dma_unmap_single(ndev->dev.parent, phys,
 				 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
@@ -645,8 +647,10 @@  static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
 		cur_p->app1 = 0;
 		cur_p->app2 = 0;
 		cur_p->app4 = 0;
-		cur_p->status = 0;
 		cur_p->skb = NULL;
+		/* ensure our transmit path and device don't prematurely see status cleared */
+		wmb();
+		cur_p->status = 0;
 
 		if (sizep)
 			*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
@@ -704,6 +708,9 @@  static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 					    int num_frag)
 {
 	struct axidma_bd *cur_p;
+
+	/* Ensure we see all descriptor updates from device or TX IRQ path */
+	rmb();
 	cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
 	if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
 		return NETDEV_TX_BUSY;
@@ -843,6 +850,8 @@  static void axienet_recv(struct net_device *ndev)
 
 		tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 
+		/* Ensure we see complete descriptor update */
+		dma_rmb();
 		phys = desc_get_phys_addr(lp, cur_p);
 		dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
 				 DMA_FROM_DEVICE);