diff mbox series

[net,v3] ravb: Fix races between ravb_tx_timeout_work() and net related ops

Message ID 20231115022644.2316961-1-yoshihiro.shimoda.uh@renesas.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series [net,v3] ravb: Fix races between ravb_tx_timeout_work() and net related ops | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for net
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1134 this patch: 1134
netdev/cc_maintainers warning 2 maintainers not CCed: wsa+renesas@sang-engineering.com geert+renesas@glider.be
netdev/build_clang success Errors and warnings before: 1161 this patch: 1161
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 1161 this patch: 1161
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Yoshihiro Shimoda Nov. 15, 2023, 2:26 a.m. UTC
Fix races between ravb_tx_timeout_work() and functions of net_device_ops
and ethtool_ops by using rtnl_trylock() and rtnl_unlock(). Note that
since ravb_close() is under the rtnl lock and calls cancel_work_sync(),
ravb_tx_timeout_work() should calls rtnl_trylock(). Otherwise, a deadlock
may happen in ravb_tx_timeout_work() like below:

CPU0			CPU1
			ravb_tx_timeout()
			schedule_work()
...
__dev_close_many()
// Under rtnl lock
ravb_close()
cancel_work_sync()
// Waiting
			ravb_tx_timeout_work()
			rtnl_lock()
			// This is possible to cause a deadlock

And, if rtnl_trylock() fails and the netif is still running,
rescheduling the work with 1 msec delayed. So, using
schedule_delayed_work() instead of schedule_work().

Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
Reviewed-by: Simon Horman <horms@kernel.org>
---
Changes from v2:
https://lore.kernel.org/netdev/20231019113308.1133944-1-yoshihiro.shimoda.uh@renesas.com/
 - Add rescheduling if rtnl_trylock() fails and the netif is still running
   and update commit description for it.
 - Add Reviewed-by tags.

Changes from v1:
https://lore.kernel.org/all/20231017085341.813335-1-yoshihiro.shimoda.uh@renesas.com/
 - Modify commit description.
 - Use goto in a error path.

 drivers/net/ethernet/renesas/ravb.h      |  2 +-
 drivers/net/ethernet/renesas/ravb_main.c | 20 +++++++++++++++-----
 2 files changed, 16 insertions(+), 6 deletions(-)

Comments

Sergey Shtylyov Nov. 15, 2023, 6:37 p.m. UTC | #1
On 11/15/23 5:26 AM, Yoshihiro Shimoda wrote:

> Fix races between ravb_tx_timeout_work() and functions of net_device_ops
> and ethtool_ops by using rtnl_trylock() and rtnl_unlock(). Note that
> since ravb_close() is under the rtnl lock and calls cancel_work_sync(),
> ravb_tx_timeout_work() should calls rtnl_trylock(). Otherwise, a deadlock
> may happen in ravb_tx_timeout_work() like below:
> 
> CPU0			CPU1
> 			ravb_tx_timeout()
> 			schedule_work()
> ...
> __dev_close_many()
> // Under rtnl lock
> ravb_close()
> cancel_work_sync()
> // Waiting
> 			ravb_tx_timeout_work()
> 			rtnl_lock()
> 			// This is possible to cause a deadlock
> 
> And, if rtnl_trylock() fails and the netif is still running,
> rescheduling the work with 1 msec delayed. So, using
> schedule_delayed_work() instead of schedule_work().
> 
> Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
> Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>

   Hm, I haven't reviewed this version... :-)

> Reviewed-by: Simon Horman <horms@kernel.org>
[...]
> diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
> index e0f8276cffed..e9bb8ee3ba2d 100644
> --- a/drivers/net/ethernet/renesas/ravb.h
> +++ b/drivers/net/ethernet/renesas/ravb.h
> @@ -1081,7 +1081,7 @@ struct ravb_private {
>  	u32 cur_tx[NUM_TX_QUEUE];
>  	u32 dirty_tx[NUM_TX_QUEUE];
>  	struct napi_struct napi[NUM_RX_QUEUE];
> -	struct work_struct work;
> +	struct delayed_work work;

   Not sure this is justified...
   Then again, what do I know about workqueues? Not much... :-)

[...]
> diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
> index c70cff80cc99..ca7db8a5b412 100644
> --- a/drivers/net/ethernet/renesas/ravb_main.c
> +++ b/drivers/net/ethernet/renesas/ravb_main.c
> @@ -1863,17 +1863,24 @@ static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
>  	/* tx_errors count up */
>  	ndev->stats.tx_errors++;
>  
> -	schedule_work(&priv->work);
> +	schedule_delayed_work(&priv->work, 0);
>  }
>  
>  static void ravb_tx_timeout_work(struct work_struct *work)
>  {
> -	struct ravb_private *priv = container_of(work, struct ravb_private,
> +	struct delayed_work *dwork = to_delayed_work(work);
> +	struct ravb_private *priv = container_of(dwork, struct ravb_private,
>  						 work);
>  	const struct ravb_hw_info *info = priv->info;
>  	struct net_device *ndev = priv->ndev;
>  	int error;
>  
> +	if (!rtnl_trylock()) {
> +		if (netif_running(ndev))
> +			schedule_delayed_work(&priv->work, msecs_to_jiffies(10));

   The delay is rather arbitrary. Why not e.g. 1 ms?

[...]

MBR, Sergey
Yoshihiro Shimoda Nov. 16, 2023, 2:43 a.m. UTC | #2
Hello,

> From: Sergey Shtylyov, Sent: Thursday, November 16, 2023 3:37 AM
> 
> On 11/15/23 5:26 AM, Yoshihiro Shimoda wrote:
> 
> > Fix races between ravb_tx_timeout_work() and functions of net_device_ops
> > and ethtool_ops by using rtnl_trylock() and rtnl_unlock(). Note that
> > since ravb_close() is under the rtnl lock and calls cancel_work_sync(),
> > ravb_tx_timeout_work() should calls rtnl_trylock(). Otherwise, a deadlock
> > may happen in ravb_tx_timeout_work() like below:
> >
> > CPU0			CPU1
> > 			ravb_tx_timeout()
> > 			schedule_work()
> > ...
> > __dev_close_many()
> > // Under rtnl lock
> > ravb_close()
> > cancel_work_sync()
> > // Waiting
> > 			ravb_tx_timeout_work()
> > 			rtnl_lock()
> > 			// This is possible to cause a deadlock
> >
> > And, if rtnl_trylock() fails and the netif is still running,
> > rescheduling the work with 1 msec delayed. So, using
> > schedule_delayed_work() instead of schedule_work().
> >
> > Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
> > Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
> > Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
> 
>    Hm, I haven't reviewed this version... :-)

Oops, I should have dropped the tag...

> > Reviewed-by: Simon Horman <horms@kernel.org>
> [...]
> > diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
> > index e0f8276cffed..e9bb8ee3ba2d 100644
> > --- a/drivers/net/ethernet/renesas/ravb.h
> > +++ b/drivers/net/ethernet/renesas/ravb.h
> > @@ -1081,7 +1081,7 @@ struct ravb_private {
> >  	u32 cur_tx[NUM_TX_QUEUE];
> >  	u32 dirty_tx[NUM_TX_QUEUE];
> >  	struct napi_struct napi[NUM_RX_QUEUE];
> > -	struct work_struct work;
> > +	struct delayed_work work;
> 
>    Not sure this is justified...
>    Then again, what do I know about workqueues? Not much... :-)

I thought that the schedule_work() called the work function immediately.
So, I thought call*ing the schedule_work() from the work function caused
endless loop. However, it is not true. The schedule_work() just inserts
a work queue, and then the kernel calls the work function later.

So, changing from work_struct to delayed_work is not needed for fixing
this issue, I think now. However, I have another concern about rescheduling
this work by schedule_work() here because it's possible to cause high CPU load
while the rtnl_lock() is held. So, I think we should call a sleep function
like usleep_range(1000, 2000) for instance before schedule_work().
But, what do you think?

> [...]
> > diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
> > index c70cff80cc99..ca7db8a5b412 100644
> > --- a/drivers/net/ethernet/renesas/ravb_main.c
> > +++ b/drivers/net/ethernet/renesas/ravb_main.c
> > @@ -1863,17 +1863,24 @@ static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
> >  	/* tx_errors count up */
> >  	ndev->stats.tx_errors++;
> >
> > -	schedule_work(&priv->work);
> > +	schedule_delayed_work(&priv->work, 0);
> >  }
> >
> >  static void ravb_tx_timeout_work(struct work_struct *work)
> >  {
> > -	struct ravb_private *priv = container_of(work, struct ravb_private,
> > +	struct delayed_work *dwork = to_delayed_work(work);
> > +	struct ravb_private *priv = container_of(dwork, struct ravb_private,
> >  						 work);
> >  	const struct ravb_hw_info *info = priv->info;
> >  	struct net_device *ndev = priv->ndev;
> >  	int error;
> >
> > +	if (!rtnl_trylock()) {
> > +		if (netif_running(ndev))
> > +			schedule_delayed_work(&priv->work, msecs_to_jiffies(10));
> 
>    The delay is rather arbitrary. Why not e.g. 1 ms?

I think that 1 ms is enough.

Best regards,
Yoshihiro Shimoda

> [...]
> 
> MBR, Sergey
Sergey Shtylyov Nov. 16, 2023, 6:21 p.m. UTC | #3
On 11/16/23 5:43 AM, Yoshihiro Shimoda wrote:
[...]

>>> Fix races between ravb_tx_timeout_work() and functions of net_device_ops
>>> and ethtool_ops by using rtnl_trylock() and rtnl_unlock(). Note that
>>> since ravb_close() is under the rtnl lock and calls cancel_work_sync(),
>>> ravb_tx_timeout_work() should calls rtnl_trylock(). Otherwise, a deadlock
>>> may happen in ravb_tx_timeout_work() like below:
>>>
>>> CPU0			CPU1
>>> 			ravb_tx_timeout()
>>> 			schedule_work()
>>> ...
>>> __dev_close_many()
>>> // Under rtnl lock
>>> ravb_close()
>>> cancel_work_sync()
>>> // Waiting
>>> 			ravb_tx_timeout_work()
>>> 			rtnl_lock()
>>> 			// This is possible to cause a deadlock
>>>
>>> And, if rtnl_trylock() fails and the netif is still running,
>>> rescheduling the work with 1 msec delayed. So, using

   Ah, you say 1 ms here but 10 ms in the code! Not good... :-)

>>> schedule_delayed_work() instead of schedule_work().
>>>
>>> Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
>>> Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
>>> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
>>
>>    Hm, I haven't reviewed this version... :-)
> 
> Oops, I should have dropped the tag...
> 
>>> Reviewed-by: Simon Horman <horms@kernel.org>
>> [...]
>>> diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
>>> index e0f8276cffed..e9bb8ee3ba2d 100644
>>> --- a/drivers/net/ethernet/renesas/ravb.h
>>> +++ b/drivers/net/ethernet/renesas/ravb.h
>>> @@ -1081,7 +1081,7 @@ struct ravb_private {
>>>  	u32 cur_tx[NUM_TX_QUEUE];
>>>  	u32 dirty_tx[NUM_TX_QUEUE];
>>>  	struct napi_struct napi[NUM_RX_QUEUE];
>>> -	struct work_struct work;
>>> +	struct delayed_work work;
>>
>>    Not sure this is justified...
>>    Then again, what do I know about workqueues? Not much... :-)
> 
> I thought that the schedule_work() called the work function immediately.
> So, I thought call*ing the schedule_work() from the work function caused
> endless loop. However, it is not true. The schedule_work() just inserts
> a work queue, and then the kernel calls the work function later.
> 
> So, changing from work_struct to delayed_work is not needed for fixing
> this issue, I think now. However, I have another concern about rescheduling
> this work by schedule_work() here because it's possible to cause high CPU load
> while the rtnl_lock() is held. So, I think we should call a sleep function
> like usleep_range(1000, 2000) for instance before schedule_work().
> But, what do you think?

   I think that a sleep before requeuing is pretty much the same as using
a delayed work...

>> [...]
>>> diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
>>> index c70cff80cc99..ca7db8a5b412 100644
>>> --- a/drivers/net/ethernet/renesas/ravb_main.c
>>> +++ b/drivers/net/ethernet/renesas/ravb_main.c
>>> @@ -1863,17 +1863,24 @@ static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
>>>  	/* tx_errors count up */
>>>  	ndev->stats.tx_errors++;
>>>
>>> -	schedule_work(&priv->work);
>>> +	schedule_delayed_work(&priv->work, 0);
>>>  }
>>>
>>>  static void ravb_tx_timeout_work(struct work_struct *work)
>>>  {
>>> -	struct ravb_private *priv = container_of(work, struct ravb_private,
>>> +	struct delayed_work *dwork = to_delayed_work(work);
>>> +	struct ravb_private *priv = container_of(dwork, struct ravb_private,
>>>  						 work);
>>>  	const struct ravb_hw_info *info = priv->info;
>>>  	struct net_device *ndev = priv->ndev;
>>>  	int error;
>>>
>>> +	if (!rtnl_trylock()) {
>>> +		if (netif_running(ndev))
>>> +			schedule_delayed_work(&priv->work, msecs_to_jiffies(10));

  You could reuse dwork instead of &priv->work here...

>>    The delay is rather arbitrary. Why not e.g. 1 ms?
> 
> I think that 1 ms is enough.

   Seeing now that 1 ms was intended...

> Best regards,
> Yoshihiro Shimoda

[...]

MBR, Sergey
Yoshihiro Shimoda Nov. 17, 2023, 12:16 a.m. UTC | #4
> From: Sergey Shtylyov, Sent: Friday, November 17, 2023 3:22 AM
> 
> On 11/16/23 5:43 AM, Yoshihiro Shimoda wrote:
> [...]
> 
> >>> Fix races between ravb_tx_timeout_work() and functions of net_device_ops
> >>> and ethtool_ops by using rtnl_trylock() and rtnl_unlock(). Note that
> >>> since ravb_close() is under the rtnl lock and calls cancel_work_sync(),
> >>> ravb_tx_timeout_work() should calls rtnl_trylock(). Otherwise, a deadlock
> >>> may happen in ravb_tx_timeout_work() like below:
> >>>
> >>> CPU0			CPU1
> >>> 			ravb_tx_timeout()
> >>> 			schedule_work()
> >>> ...
> >>> __dev_close_many()
> >>> // Under rtnl lock
> >>> ravb_close()
> >>> cancel_work_sync()
> >>> // Waiting
> >>> 			ravb_tx_timeout_work()
> >>> 			rtnl_lock()
> >>> 			// This is possible to cause a deadlock
> >>>
> >>> And, if rtnl_trylock() fails and the netif is still running,
> >>> rescheduling the work with 1 msec delayed. So, using
> 
>    Ah, you say 1 ms here but 10 ms in the code! Not good... :-)

Indeed...

> >>> schedule_delayed_work() instead of schedule_work().
> >>>
> >>> Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
> >>> Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
> >>> Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
> >>
> >>    Hm, I haven't reviewed this version... :-)
> >
> > Oops, I should have dropped the tag...
> >
> >>> Reviewed-by: Simon Horman <horms@kernel.org>
> >> [...]
> >>> diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
> >>> index e0f8276cffed..e9bb8ee3ba2d 100644
> >>> --- a/drivers/net/ethernet/renesas/ravb.h
> >>> +++ b/drivers/net/ethernet/renesas/ravb.h
> >>> @@ -1081,7 +1081,7 @@ struct ravb_private {
> >>>  	u32 cur_tx[NUM_TX_QUEUE];
> >>>  	u32 dirty_tx[NUM_TX_QUEUE];
> >>>  	struct napi_struct napi[NUM_RX_QUEUE];
> >>> -	struct work_struct work;
> >>> +	struct delayed_work work;
> >>
> >>    Not sure this is justified...
> >>    Then again, what do I know about workqueues? Not much... :-)
> >
> > I thought that the schedule_work() called the work function immediately.
> > So, I thought call*ing the schedule_work() from the work function caused
> > endless loop. However, it is not true. The schedule_work() just inserts
> > a work queue, and then the kernel calls the work function later.
> >
> > So, changing from work_struct to delayed_work is not needed for fixing
> > this issue, I think now. However, I have another concern about rescheduling
> > this work by schedule_work() here because it's possible to cause high CPU load
> > while the rtnl_lock() is held. So, I think we should call a sleep function
> > like usleep_range(1000, 2000) for instance before schedule_work().
> > But, what do you think?
> 
>    I think that a sleep before requeuing is pretty much the same as using
> a delayed work...

I think so. Since this is a fixed patch, using a sleep function is better than
converting delayed_work, I think. But, what do you think?

> >> [...]
> >>> diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
> >>> index c70cff80cc99..ca7db8a5b412 100644
> >>> --- a/drivers/net/ethernet/renesas/ravb_main.c
> >>> +++ b/drivers/net/ethernet/renesas/ravb_main.c
> >>> @@ -1863,17 +1863,24 @@ static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
> >>>  	/* tx_errors count up */
> >>>  	ndev->stats.tx_errors++;
> >>>
> >>> -	schedule_work(&priv->work);
> >>> +	schedule_delayed_work(&priv->work, 0);
> >>>  }
> >>>
> >>>  static void ravb_tx_timeout_work(struct work_struct *work)
> >>>  {
> >>> -	struct ravb_private *priv = container_of(work, struct ravb_private,
> >>> +	struct delayed_work *dwork = to_delayed_work(work);
> >>> +	struct ravb_private *priv = container_of(dwork, struct ravb_private,
> >>>  						 work);
> >>>  	const struct ravb_hw_info *info = priv->info;
> >>>  	struct net_device *ndev = priv->ndev;
> >>>  	int error;
> >>>
> >>> +	if (!rtnl_trylock()) {
> >>> +		if (netif_running(ndev))
> >>> +			schedule_delayed_work(&priv->work, msecs_to_jiffies(10));
> 
>   You could reuse dwork instead of &priv->work here...

I think so.

> >>    The delay is rather arbitrary. Why not e.g. 1 ms?
> >
> > I think that 1 ms is enough.
> 
>    Seeing now that 1 ms was intended...

Yes...

Best regards,
Yoshihiro Shimoda

> > Best regards,
> > Yoshihiro Shimoda
> 
> [...]
> 
> MBR, Sergey
diff mbox series

Patch

diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index e0f8276cffed..e9bb8ee3ba2d 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1081,7 +1081,7 @@  struct ravb_private {
 	u32 cur_tx[NUM_TX_QUEUE];
 	u32 dirty_tx[NUM_TX_QUEUE];
 	struct napi_struct napi[NUM_RX_QUEUE];
-	struct work_struct work;
+	struct delayed_work work;
 	/* MII transceiver section. */
 	struct mii_bus *mii_bus;	/* MDIO bus control */
 	int link;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c70cff80cc99..ca7db8a5b412 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1863,17 +1863,24 @@  static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue)
 	/* tx_errors count up */
 	ndev->stats.tx_errors++;
 
-	schedule_work(&priv->work);
+	schedule_delayed_work(&priv->work, 0);
 }
 
 static void ravb_tx_timeout_work(struct work_struct *work)
 {
-	struct ravb_private *priv = container_of(work, struct ravb_private,
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct ravb_private *priv = container_of(dwork, struct ravb_private,
 						 work);
 	const struct ravb_hw_info *info = priv->info;
 	struct net_device *ndev = priv->ndev;
 	int error;
 
+	if (!rtnl_trylock()) {
+		if (netif_running(ndev))
+			schedule_delayed_work(&priv->work, msecs_to_jiffies(10));
+		return;
+	}
+
 	netif_tx_stop_all_queues(ndev);
 
 	/* Stop PTP Clock driver */
@@ -1907,7 +1914,7 @@  static void ravb_tx_timeout_work(struct work_struct *work)
 		 */
 		netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
 			   __func__, error);
-		return;
+		goto out_unlock;
 	}
 	ravb_emac_init(ndev);
 
@@ -1917,6 +1924,9 @@  static void ravb_tx_timeout_work(struct work_struct *work)
 		ravb_ptp_init(ndev, priv->pdev);
 
 	netif_tx_start_all_queues(ndev);
+
+out_unlock:
+	rtnl_unlock();
 }
 
 /* Packet transmit function for Ethernet AVB */
@@ -2167,7 +2177,7 @@  static int ravb_close(struct net_device *ndev)
 			of_phy_deregister_fixed_link(np);
 	}
 
-	cancel_work_sync(&priv->work);
+	cancel_delayed_work_sync(&priv->work);
 
 	if (info->multi_irqs) {
 		free_irq(priv->tx_irqs[RAVB_NC], ndev);
@@ -2687,7 +2697,7 @@  static int ravb_probe(struct platform_device *pdev)
 	ndev->base_addr = res->start;
 
 	spin_lock_init(&priv->lock);
-	INIT_WORK(&priv->work, ravb_tx_timeout_work);
+	INIT_DELAYED_WORK(&priv->work, ravb_tx_timeout_work);
 
 	error = of_get_phy_mode(np, &priv->phy_interface);
 	if (error && error != -ENODEV)