diff mbox series

[v2] net: stmmac: only enable DMA interrupts when ready

Message ID 20220224113829.1092859-1-vincent.whitchurch@axis.com (mailing list archive)
State New, archived
Headers show
Series [v2] net: stmmac: only enable DMA interrupts when ready | expand

Commit Message

Vincent Whitchurch Feb. 24, 2022, 11:38 a.m. UTC
In this driver's ->ndo_open() callback, it enables DMA interrupts,
starts the DMA channels, then requests interrupts with request_irq(),
and then finally enables napi.

If RX DMA interrupts are received before napi is enabled, no processing
is done because napi_schedule_prep() will return false.  If the network
has a lot of broadcast/multicast traffic, then the RX ring could fill up
completely before napi is enabled.  When this happens, no further RX
interrupts will be delivered, and the driver will fail to receive any
packets.

Fix this by only enabling DMA interrupts after all other initialization
is complete.

Fixes: 523f11b5d4fd72efb ("net: stmmac: move hardware setup for stmmac_open to new function")
Reported-by: Lars Persson <larper@axis.com>
Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
---
 .../net/ethernet/stmicro/stmmac/stmmac_main.c | 28 +++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)

Comments

Denis Kirjanov Feb. 24, 2022, 12:53 p.m. UTC | #1
2/24/22 14:38, Vincent Whitchurch пишет:
> In this driver's ->ndo_open() callback, it enables DMA interrupts,
> starts the DMA channels, then requests interrupts with request_irq(),
> and then finally enables napi.
> 
> If RX DMA interrupts are received before napi is enabled, no processing
> is done because napi_schedule_prep() will return false.  If the network
> has a lot of broadcast/multicast traffic, then the RX ring could fill up
> completely before napi is enabled.  When this happens, no further RX
> interrupts will be delivered, and the driver will fail to receive any
> packets.
> 
> Fix this by only enabling DMA interrupts after all other initialization
> is complete.
> 
> Fixes: 523f11b5d4fd72efb ("net: stmmac: move hardware setup for stmmac_open to new function")
> Reported-by: Lars Persson <larper@axis.com>
> Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
> ---
>   .../net/ethernet/stmicro/stmmac/stmmac_main.c | 28 +++++++++++++++++--
>   1 file changed, 26 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
> index 6708ca2aa4f7..43978558d6c0 100644
> --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
> +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
> @@ -2260,6 +2260,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
>   	stmmac_stop_tx(priv, priv->ioaddr, chan);
>   }
>   
> +static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
> +{
> +	u32 rx_channels_count = priv->plat->rx_queues_to_use;
> +	u32 tx_channels_count = priv->plat->tx_queues_to_use;
> +	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
> +	u32 chan;
> +
> +	for (chan = 0; chan < dma_csr_ch; chan++) {
> +		struct stmmac_channel *ch = &priv->channel[chan];
> +		unsigned long flags;
> +
> +		spin_lock_irqsave(&ch->lock, flags);
> +		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
> +		spin_unlock_irqrestore(&ch->lock, flags);
> +	}
> +}
> +
>   /**
>    * stmmac_start_all_dma - start all RX and TX DMA channels
>    * @priv: driver private structure
> @@ -2902,8 +2919,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
>   		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
>   
>   	/* DMA CSR Channel configuration */
> -	for (chan = 0; chan < dma_csr_ch; chan++)
> +	for (chan = 0; chan < dma_csr_ch; chan++) {
>   		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
Did you miss to take a channel lock?
> +		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
> +	}
>   
>   	/* DMA RX Channel Configuration */
>   	for (chan = 0; chan < rx_channels_count; chan++) {
> @@ -3759,6 +3778,7 @@ static int stmmac_open(struct net_device *dev)
>   
>   	stmmac_enable_all_queues(priv);
>   	netif_tx_start_all_queues(priv->dev);
> +	stmmac_enable_all_dma_irq(priv);
>   
>   	return 0;
>   
> @@ -6508,8 +6528,10 @@ int stmmac_xdp_open(struct net_device *dev)
>   	}
>   
>   	/* DMA CSR Channel configuration */
> -	for (chan = 0; chan < dma_csr_ch; chan++)
> +	for (chan = 0; chan < dma_csr_ch; chan++) {
>   		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
> +		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
> +	}
>   
>   	/* Adjust Split header */
>   	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
> @@ -6570,6 +6592,7 @@ int stmmac_xdp_open(struct net_device *dev)
>   	stmmac_enable_all_queues(priv);
>   	netif_carrier_on(dev);
>   	netif_tx_start_all_queues(dev);
> +	stmmac_enable_all_dma_irq(priv);
>   
>   	return 0;
>   
> @@ -7447,6 +7470,7 @@ int stmmac_resume(struct device *dev)
>   	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
>   
>   	stmmac_enable_all_queues(priv);
> +	stmmac_enable_all_dma_irq(priv);
>   
>   	mutex_unlock(&priv->lock);
>   	rtnl_unlock();
Vincent Whitchurch Feb. 24, 2022, 1:40 p.m. UTC | #2
On Thu, Feb 24, 2022 at 01:53:33PM +0100, Denis Kirjanov wrote:
> 2/24/22 14:38, Vincent Whitchurch пишет:
> > diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
> > index 6708ca2aa4f7..43978558d6c0 100644
> > --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
> > +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
> > @@ -2260,6 +2260,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
> >   	stmmac_stop_tx(priv, priv->ioaddr, chan);
> >   }
> >   
> > +static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
> > +{
> > +	u32 rx_channels_count = priv->plat->rx_queues_to_use;
> > +	u32 tx_channels_count = priv->plat->tx_queues_to_use;
> > +	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
> > +	u32 chan;
> > +
> > +	for (chan = 0; chan < dma_csr_ch; chan++) {
> > +		struct stmmac_channel *ch = &priv->channel[chan];
> > +		unsigned long flags;
> > +
> > +		spin_lock_irqsave(&ch->lock, flags);
> > +		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
> > +		spin_unlock_irqrestore(&ch->lock, flags);
> > +	}
> > +}
> > +
> >   /**
> >    * stmmac_start_all_dma - start all RX and TX DMA channels
> >    * @priv: driver private structure
> > @@ -2902,8 +2919,10 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
> >   		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
> >   
> >   	/* DMA CSR Channel configuration */
> > -	for (chan = 0; chan < dma_csr_ch; chan++)
> > +	for (chan = 0; chan < dma_csr_ch; chan++) {
> >   		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
> Did you miss to take a channel lock?

I didn't add it on purpose.  At this point during initialization there
is no-one who can race with the register write in
stmmac_disable_dma_irq().  The call to stmmac_init_chan() (in the
existing code) writes the same register without the lock. 

> > +		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
> > +	}
> >
patchwork-bot+netdevbpf@kernel.org Feb. 25, 2022, 10:50 a.m. UTC | #3
Hello:

This patch was applied to netdev/net.git (master)
by David S. Miller <davem@davemloft.net>:

On Thu, 24 Feb 2022 12:38:29 +0100 you wrote:
> In this driver's ->ndo_open() callback, it enables DMA interrupts,
> starts the DMA channels, then requests interrupts with request_irq(),
> and then finally enables napi.
> 
> If RX DMA interrupts are received before napi is enabled, no processing
> is done because napi_schedule_prep() will return false.  If the network
> has a lot of broadcast/multicast traffic, then the RX ring could fill up
> completely before napi is enabled.  When this happens, no further RX
> interrupts will be delivered, and the driver will fail to receive any
> packets.
> 
> [...]

Here is the summary with links:
  - [v2] net: stmmac: only enable DMA interrupts when ready
    https://git.kernel.org/netdev/net/c/087a7b944c5d

You are awesome, thank you!
diff mbox series

Patch

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6708ca2aa4f7..43978558d6c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2260,6 +2260,23 @@  static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
 	stmmac_stop_tx(priv, priv->ioaddr, chan);
 }
 
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+	u32 rx_channels_count = priv->plat->rx_queues_to_use;
+	u32 tx_channels_count = priv->plat->tx_queues_to_use;
+	u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+	u32 chan;
+
+	for (chan = 0; chan < dma_csr_ch; chan++) {
+		struct stmmac_channel *ch = &priv->channel[chan];
+		unsigned long flags;
+
+		spin_lock_irqsave(&ch->lock, flags);
+		stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+		spin_unlock_irqrestore(&ch->lock, flags);
+	}
+}
+
 /**
  * stmmac_start_all_dma - start all RX and TX DMA channels
  * @priv: driver private structure
@@ -2902,8 +2919,10 @@  static int stmmac_init_dma_engine(struct stmmac_priv *priv)
 		stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
 
 	/* DMA CSR Channel configuration */
-	for (chan = 0; chan < dma_csr_ch; chan++)
+	for (chan = 0; chan < dma_csr_ch; chan++) {
 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+	}
 
 	/* DMA RX Channel Configuration */
 	for (chan = 0; chan < rx_channels_count; chan++) {
@@ -3759,6 +3778,7 @@  static int stmmac_open(struct net_device *dev)
 
 	stmmac_enable_all_queues(priv);
 	netif_tx_start_all_queues(priv->dev);
+	stmmac_enable_all_dma_irq(priv);
 
 	return 0;
 
@@ -6508,8 +6528,10 @@  int stmmac_xdp_open(struct net_device *dev)
 	}
 
 	/* DMA CSR Channel configuration */
-	for (chan = 0; chan < dma_csr_ch; chan++)
+	for (chan = 0; chan < dma_csr_ch; chan++) {
 		stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+		stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+	}
 
 	/* Adjust Split header */
 	sph_en = (priv->hw->rx_csum > 0) && priv->sph;
@@ -6570,6 +6592,7 @@  int stmmac_xdp_open(struct net_device *dev)
 	stmmac_enable_all_queues(priv);
 	netif_carrier_on(dev);
 	netif_tx_start_all_queues(dev);
+	stmmac_enable_all_dma_irq(priv);
 
 	return 0;
 
@@ -7447,6 +7470,7 @@  int stmmac_resume(struct device *dev)
 	stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
 
 	stmmac_enable_all_queues(priv);
+	stmmac_enable_all_dma_irq(priv);
 
 	mutex_unlock(&priv->lock);
 	rtnl_unlock();