diff mbox series

[net,v3,2/2] virtio_net: fix a spurious deadlock issue

Message ID 20240528134116.117426-3-hengqi@linux.alibaba.com (mailing list archive)
State Accepted
Commit d1f0bd01bc58f35b5353ad9dbe5f7249a8f3368e
Delegated to: Netdev Maintainers
Headers show
Series virtio_net: fix lock warning and unrecoverable state | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 45 this patch: 45
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 1 maintainers not CCed: eperezma@redhat.com
netdev/build_clang fail Errors and warnings before: 36 this patch: 36
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn fail Errors and warnings before: 45 this patch: 45
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 72 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Heng Qi May 28, 2024, 1:41 p.m. UTC
When the following snippet is run, lockdep will report a deadlock[1].

  /* Acquire all queues dim_locks */
  for (i = 0; i < vi->max_queue_pairs; i++)
          mutex_lock(&vi->rq[i].dim_lock);

There's no deadlock here because the vq locks are always taken
in the same order, but lockdep can not figure it out. So refactoring
the code to alleviate the problem.

[1]
========================================================
WARNING: possible recursive locking detected
6.9.0-rc7+ #319 Not tainted
--------------------------------------------
ethtool/962 is trying to acquire lock:

but task is already holding lock:

other info that might help us debug this:
Possible unsafe locking scenario:

      CPU0
      ----
 lock(&vi->rq[i].dim_lock);
 lock(&vi->rq[i].dim_lock);

*** DEADLOCK ***

 May be due to missing lock nesting notation

3 locks held by ethtool/962:
 #0: ffffffff82dbaab0 (cb_lock){++++}-{3:3}, at: genl_rcv+0x19/0x40
 #1: ffffffff82dad0a8 (rtnl_mutex){+.+.}-{3:3}, at:
				ethnl_default_set_doit+0xbe/0x1e0

stack backtrace:
CPU: 6 PID: 962 Comm: ethtool Not tainted 6.9.0-rc7+ #319
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
	   rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
Call Trace:
 <TASK>
 dump_stack_lvl+0x79/0xb0
 check_deadlock+0x130/0x220
 __lock_acquire+0x861/0x990
 lock_acquire.part.0+0x72/0x1d0
 ? lock_acquire+0xf8/0x130
 __mutex_lock+0x71/0xd50
 virtnet_set_coalesce+0x151/0x190
 __ethnl_set_coalesce.isra.0+0x3f8/0x4d0
 ethnl_set_coalesce+0x34/0x90
 ethnl_default_set_doit+0xdd/0x1e0
 genl_family_rcv_msg_doit+0xdc/0x130
 genl_family_rcv_msg+0x154/0x230
 ? __pfx_ethnl_default_set_doit+0x10/0x10
 genl_rcv_msg+0x4b/0xa0
 ? __pfx_genl_rcv_msg+0x10/0x10
 netlink_rcv_skb+0x5a/0x110
 genl_rcv+0x28/0x40
 netlink_unicast+0x1af/0x280
 netlink_sendmsg+0x20e/0x460
 __sys_sendto+0x1fe/0x210
 ? find_held_lock+0x2b/0x80
 ? do_user_addr_fault+0x3a2/0x8a0
 ? __lock_release+0x5e/0x160
 ? do_user_addr_fault+0x3a2/0x8a0
 ? lock_release+0x72/0x140
 ? do_user_addr_fault+0x3a7/0x8a0
 __x64_sys_sendto+0x29/0x30
 do_syscall_64+0x78/0x180
 entry_SYSCALL_64_after_hwframe+0x76/0x7e

Fixes: 4d4ac2ececd3 ("virtio_net: Add a lock for per queue RX coalesce")
Signed-off-by: Heng Qi <hengqi@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 36 ++++++++++++++++--------------------
 1 file changed, 16 insertions(+), 20 deletions(-)

Comments

Paolo Abeni May 30, 2024, 8:34 a.m. UTC | #1
On Tue, 2024-05-28 at 21:41 +0800, Heng Qi wrote:
> When the following snippet is run, lockdep will report a deadlock[1].
> 
>   /* Acquire all queues dim_locks */
>   for (i = 0; i < vi->max_queue_pairs; i++)
>           mutex_lock(&vi->rq[i].dim_lock);
> 
> There's no deadlock here because the vq locks are always taken
> in the same order, but lockdep can not figure it out. So refactoring
> the code to alleviate the problem.
> 
> [1]
> ========================================================
> WARNING: possible recursive locking detected
> 6.9.0-rc7+ #319 Not tainted
> --------------------------------------------
> ethtool/962 is trying to acquire lock:
> 
> but task is already holding lock:
> 
> other info that might help us debug this:
> Possible unsafe locking scenario:
> 
>       CPU0
>       ----
>  lock(&vi->rq[i].dim_lock);
>  lock(&vi->rq[i].dim_lock);
> 
> *** DEADLOCK ***
> 
>  May be due to missing lock nesting notation
> 
> 3 locks held by ethtool/962:
>  #0: ffffffff82dbaab0 (cb_lock){++++}-{3:3}, at: genl_rcv+0x19/0x40
>  #1: ffffffff82dad0a8 (rtnl_mutex){+.+.}-{3:3}, at:
> 				ethnl_default_set_doit+0xbe/0x1e0
> 
> stack backtrace:
> CPU: 6 PID: 962 Comm: ethtool Not tainted 6.9.0-rc7+ #319
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
> 	   rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
> Call Trace:
>  <TASK>
>  dump_stack_lvl+0x79/0xb0
>  check_deadlock+0x130/0x220
>  __lock_acquire+0x861/0x990
>  lock_acquire.part.0+0x72/0x1d0
>  ? lock_acquire+0xf8/0x130
>  __mutex_lock+0x71/0xd50
>  virtnet_set_coalesce+0x151/0x190
>  __ethnl_set_coalesce.isra.0+0x3f8/0x4d0
>  ethnl_set_coalesce+0x34/0x90
>  ethnl_default_set_doit+0xdd/0x1e0
>  genl_family_rcv_msg_doit+0xdc/0x130
>  genl_family_rcv_msg+0x154/0x230
>  ? __pfx_ethnl_default_set_doit+0x10/0x10
>  genl_rcv_msg+0x4b/0xa0
>  ? __pfx_genl_rcv_msg+0x10/0x10
>  netlink_rcv_skb+0x5a/0x110
>  genl_rcv+0x28/0x40
>  netlink_unicast+0x1af/0x280
>  netlink_sendmsg+0x20e/0x460
>  __sys_sendto+0x1fe/0x210
>  ? find_held_lock+0x2b/0x80
>  ? do_user_addr_fault+0x3a2/0x8a0
>  ? __lock_release+0x5e/0x160
>  ? do_user_addr_fault+0x3a2/0x8a0
>  ? lock_release+0x72/0x140
>  ? do_user_addr_fault+0x3a7/0x8a0
>  __x64_sys_sendto+0x29/0x30
>  do_syscall_64+0x78/0x180
>  entry_SYSCALL_64_after_hwframe+0x76/0x7e
> 
> Fixes: 4d4ac2ececd3 ("virtio_net: Add a lock for per queue RX coalesce")
> Signed-off-by: Heng Qi <hengqi@linux.alibaba.com>

This would have deserved a changelog after the commit message.

The patch LGTM (for obvious reasons ;), but it deserves an explicit ack
from Jason and/or Michael

Cheers,

Paolo
Heng Qi May 30, 2024, 8:49 a.m. UTC | #2
On Thu, 30 May 2024 10:34:07 +0200, Paolo Abeni <pabeni@redhat.com> wrote:
> On Tue, 2024-05-28 at 21:41 +0800, Heng Qi wrote:
> > When the following snippet is run, lockdep will report a deadlock[1].
> > 
> >   /* Acquire all queues dim_locks */
> >   for (i = 0; i < vi->max_queue_pairs; i++)
> >           mutex_lock(&vi->rq[i].dim_lock);
> > 
> > There's no deadlock here because the vq locks are always taken
> > in the same order, but lockdep can not figure it out. So refactoring
> > the code to alleviate the problem.
> > 
> > [1]
> > ========================================================
> > WARNING: possible recursive locking detected
> > 6.9.0-rc7+ #319 Not tainted
> > --------------------------------------------
> > ethtool/962 is trying to acquire lock:
> > 
> > but task is already holding lock:
> > 
> > other info that might help us debug this:
> > Possible unsafe locking scenario:
> > 
> >       CPU0
> >       ----
> >  lock(&vi->rq[i].dim_lock);
> >  lock(&vi->rq[i].dim_lock);
> > 
> > *** DEADLOCK ***
> > 
> >  May be due to missing lock nesting notation
> > 
> > 3 locks held by ethtool/962:
> >  #0: ffffffff82dbaab0 (cb_lock){++++}-{3:3}, at: genl_rcv+0x19/0x40
> >  #1: ffffffff82dad0a8 (rtnl_mutex){+.+.}-{3:3}, at:
> > 				ethnl_default_set_doit+0xbe/0x1e0
> > 
> > stack backtrace:
> > CPU: 6 PID: 962 Comm: ethtool Not tainted 6.9.0-rc7+ #319
> > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
> > 	   rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
> > Call Trace:
> >  <TASK>
> >  dump_stack_lvl+0x79/0xb0
> >  check_deadlock+0x130/0x220
> >  __lock_acquire+0x861/0x990
> >  lock_acquire.part.0+0x72/0x1d0
> >  ? lock_acquire+0xf8/0x130
> >  __mutex_lock+0x71/0xd50
> >  virtnet_set_coalesce+0x151/0x190
> >  __ethnl_set_coalesce.isra.0+0x3f8/0x4d0
> >  ethnl_set_coalesce+0x34/0x90
> >  ethnl_default_set_doit+0xdd/0x1e0
> >  genl_family_rcv_msg_doit+0xdc/0x130
> >  genl_family_rcv_msg+0x154/0x230
> >  ? __pfx_ethnl_default_set_doit+0x10/0x10
> >  genl_rcv_msg+0x4b/0xa0
> >  ? __pfx_genl_rcv_msg+0x10/0x10
> >  netlink_rcv_skb+0x5a/0x110
> >  genl_rcv+0x28/0x40
> >  netlink_unicast+0x1af/0x280
> >  netlink_sendmsg+0x20e/0x460
> >  __sys_sendto+0x1fe/0x210
> >  ? find_held_lock+0x2b/0x80
> >  ? do_user_addr_fault+0x3a2/0x8a0
> >  ? __lock_release+0x5e/0x160
> >  ? do_user_addr_fault+0x3a2/0x8a0
> >  ? lock_release+0x72/0x140
> >  ? do_user_addr_fault+0x3a7/0x8a0
> >  __x64_sys_sendto+0x29/0x30
> >  do_syscall_64+0x78/0x180
> >  entry_SYSCALL_64_after_hwframe+0x76/0x7e
> > 
> > Fixes: 4d4ac2ececd3 ("virtio_net: Add a lock for per queue RX coalesce")
> > Signed-off-by: Heng Qi <hengqi@linux.alibaba.com>
> 
> This would have deserved a changelog after the commit message.

I declared the changelog in the cover-letter, but I can initiate a new
RESEND version with a changelog in this patch if you want :)

> 
> The patch LGTM (for obvious reasons ;), but it deserves an explicit ack
> from Jason and/or Michael

Thanks.

> 
> Cheers,
> 
> Paolo
>
Michael S. Tsirkin May 30, 2024, 9:16 a.m. UTC | #3
On Tue, May 28, 2024 at 09:41:16PM +0800, Heng Qi wrote:
> When the following snippet is run, lockdep will report a deadlock[1].
> 
>   /* Acquire all queues dim_locks */
>   for (i = 0; i < vi->max_queue_pairs; i++)
>           mutex_lock(&vi->rq[i].dim_lock);
> 
> There's no deadlock here because the vq locks are always taken
> in the same order, but lockdep can not figure it out. So refactoring
> the code to alleviate the problem.
> 
> [1]
> ========================================================
> WARNING: possible recursive locking detected
> 6.9.0-rc7+ #319 Not tainted
> --------------------------------------------
> ethtool/962 is trying to acquire lock:
> 
> but task is already holding lock:
> 
> other info that might help us debug this:
> Possible unsafe locking scenario:
> 
>       CPU0
>       ----
>  lock(&vi->rq[i].dim_lock);
>  lock(&vi->rq[i].dim_lock);
> 
> *** DEADLOCK ***
> 
>  May be due to missing lock nesting notation
> 
> 3 locks held by ethtool/962:
>  #0: ffffffff82dbaab0 (cb_lock){++++}-{3:3}, at: genl_rcv+0x19/0x40
>  #1: ffffffff82dad0a8 (rtnl_mutex){+.+.}-{3:3}, at:
> 				ethnl_default_set_doit+0xbe/0x1e0
> 
> stack backtrace:
> CPU: 6 PID: 962 Comm: ethtool Not tainted 6.9.0-rc7+ #319
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
> 	   rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
> Call Trace:
>  <TASK>
>  dump_stack_lvl+0x79/0xb0
>  check_deadlock+0x130/0x220
>  __lock_acquire+0x861/0x990
>  lock_acquire.part.0+0x72/0x1d0
>  ? lock_acquire+0xf8/0x130
>  __mutex_lock+0x71/0xd50
>  virtnet_set_coalesce+0x151/0x190
>  __ethnl_set_coalesce.isra.0+0x3f8/0x4d0
>  ethnl_set_coalesce+0x34/0x90
>  ethnl_default_set_doit+0xdd/0x1e0
>  genl_family_rcv_msg_doit+0xdc/0x130
>  genl_family_rcv_msg+0x154/0x230
>  ? __pfx_ethnl_default_set_doit+0x10/0x10
>  genl_rcv_msg+0x4b/0xa0
>  ? __pfx_genl_rcv_msg+0x10/0x10
>  netlink_rcv_skb+0x5a/0x110
>  genl_rcv+0x28/0x40
>  netlink_unicast+0x1af/0x280
>  netlink_sendmsg+0x20e/0x460
>  __sys_sendto+0x1fe/0x210
>  ? find_held_lock+0x2b/0x80
>  ? do_user_addr_fault+0x3a2/0x8a0
>  ? __lock_release+0x5e/0x160
>  ? do_user_addr_fault+0x3a2/0x8a0
>  ? lock_release+0x72/0x140
>  ? do_user_addr_fault+0x3a7/0x8a0
>  __x64_sys_sendto+0x29/0x30
>  do_syscall_64+0x78/0x180
>  entry_SYSCALL_64_after_hwframe+0x76/0x7e
> 
> Fixes: 4d4ac2ececd3 ("virtio_net: Add a lock for per queue RX coalesce")
> Signed-off-by: Heng Qi <hengqi@linux.alibaba.com>


Acked-by: Michael S. Tsirkin <mst@redhat.com>


> ---
>  drivers/net/virtio_net.c | 36 ++++++++++++++++--------------------
>  1 file changed, 16 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 4f828a9e5889..ecb5203d0372 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -4257,7 +4257,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
>  	struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
>  	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
>  	struct scatterlist sgs_rx;
> -	int ret = 0;
>  	int i;
>  
>  	if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
> @@ -4267,27 +4266,27 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
>  			       ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
>  		return -EINVAL;
>  
> -	/* Acquire all queues dim_locks */
> -	for (i = 0; i < vi->max_queue_pairs; i++)
> -		mutex_lock(&vi->rq[i].dim_lock);
> -
>  	if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
>  		vi->rx_dim_enabled = true;
> -		for (i = 0; i < vi->max_queue_pairs; i++)
> +		for (i = 0; i < vi->max_queue_pairs; i++) {
> +			mutex_lock(&vi->rq[i].dim_lock);
>  			vi->rq[i].dim_enabled = true;
> -		goto unlock;
> +			mutex_unlock(&vi->rq[i].dim_lock);
> +		}
> +		return 0;
>  	}
>  
>  	coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
> -	if (!coal_rx) {
> -		ret = -ENOMEM;
> -		goto unlock;
> -	}
> +	if (!coal_rx)
> +		return -ENOMEM;
>  
>  	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
>  		vi->rx_dim_enabled = false;
> -		for (i = 0; i < vi->max_queue_pairs; i++)
> +		for (i = 0; i < vi->max_queue_pairs; i++) {
> +			mutex_lock(&vi->rq[i].dim_lock);
>  			vi->rq[i].dim_enabled = false;
> +			mutex_unlock(&vi->rq[i].dim_lock);
> +		}
>  	}
>  
>  	/* Since the per-queue coalescing params can be set,
> @@ -4300,22 +4299,19 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
>  
>  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
>  				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
> -				  &sgs_rx)) {
> -		ret = -EINVAL;
> -		goto unlock;
> -	}
> +				  &sgs_rx))
> +		return -EINVAL;
>  
>  	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
>  	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
>  	for (i = 0; i < vi->max_queue_pairs; i++) {
> +		mutex_lock(&vi->rq[i].dim_lock);
>  		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
>  		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
> -	}
> -unlock:
> -	for (i = vi->max_queue_pairs - 1; i >= 0; i--)
>  		mutex_unlock(&vi->rq[i].dim_lock);
> +	}
>  
> -	return ret;
> +	return 0;
>  }
>  
>  static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
> -- 
> 2.32.0.3.g01195cf9f
Xuan Zhuo May 30, 2024, 9:57 a.m. UTC | #4
On Tue, 28 May 2024 21:41:16 +0800, Heng Qi <hengqi@linux.alibaba.com> wrote:
> When the following snippet is run, lockdep will report a deadlock[1].
>
>   /* Acquire all queues dim_locks */
>   for (i = 0; i < vi->max_queue_pairs; i++)
>           mutex_lock(&vi->rq[i].dim_lock);
>
> There's no deadlock here because the vq locks are always taken
> in the same order, but lockdep can not figure it out. So refactoring
> the code to alleviate the problem.
>
> [1]
> ========================================================
> WARNING: possible recursive locking detected
> 6.9.0-rc7+ #319 Not tainted
> --------------------------------------------
> ethtool/962 is trying to acquire lock:
>
> but task is already holding lock:
>
> other info that might help us debug this:
> Possible unsafe locking scenario:
>
>       CPU0
>       ----
>  lock(&vi->rq[i].dim_lock);
>  lock(&vi->rq[i].dim_lock);
>
> *** DEADLOCK ***
>
>  May be due to missing lock nesting notation
>
> 3 locks held by ethtool/962:
>  #0: ffffffff82dbaab0 (cb_lock){++++}-{3:3}, at: genl_rcv+0x19/0x40
>  #1: ffffffff82dad0a8 (rtnl_mutex){+.+.}-{3:3}, at:
> 				ethnl_default_set_doit+0xbe/0x1e0
>
> stack backtrace:
> CPU: 6 PID: 962 Comm: ethtool Not tainted 6.9.0-rc7+ #319
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
> 	   rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
> Call Trace:
>  <TASK>
>  dump_stack_lvl+0x79/0xb0
>  check_deadlock+0x130/0x220
>  __lock_acquire+0x861/0x990
>  lock_acquire.part.0+0x72/0x1d0
>  ? lock_acquire+0xf8/0x130
>  __mutex_lock+0x71/0xd50
>  virtnet_set_coalesce+0x151/0x190
>  __ethnl_set_coalesce.isra.0+0x3f8/0x4d0
>  ethnl_set_coalesce+0x34/0x90
>  ethnl_default_set_doit+0xdd/0x1e0
>  genl_family_rcv_msg_doit+0xdc/0x130
>  genl_family_rcv_msg+0x154/0x230
>  ? __pfx_ethnl_default_set_doit+0x10/0x10
>  genl_rcv_msg+0x4b/0xa0
>  ? __pfx_genl_rcv_msg+0x10/0x10
>  netlink_rcv_skb+0x5a/0x110
>  genl_rcv+0x28/0x40
>  netlink_unicast+0x1af/0x280
>  netlink_sendmsg+0x20e/0x460
>  __sys_sendto+0x1fe/0x210
>  ? find_held_lock+0x2b/0x80
>  ? do_user_addr_fault+0x3a2/0x8a0
>  ? __lock_release+0x5e/0x160
>  ? do_user_addr_fault+0x3a2/0x8a0
>  ? lock_release+0x72/0x140
>  ? do_user_addr_fault+0x3a7/0x8a0
>  __x64_sys_sendto+0x29/0x30
>  do_syscall_64+0x78/0x180
>  entry_SYSCALL_64_after_hwframe+0x76/0x7e
>
> Fixes: 4d4ac2ececd3 ("virtio_net: Add a lock for per queue RX coalesce")
> Signed-off-by: Heng Qi <hengqi@linux.alibaba.com>

Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

> ---
>  drivers/net/virtio_net.c | 36 ++++++++++++++++--------------------
>  1 file changed, 16 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 4f828a9e5889..ecb5203d0372 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -4257,7 +4257,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
>  	struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
>  	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
>  	struct scatterlist sgs_rx;
> -	int ret = 0;
>  	int i;
>
>  	if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
> @@ -4267,27 +4266,27 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
>  			       ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
>  		return -EINVAL;
>
> -	/* Acquire all queues dim_locks */
> -	for (i = 0; i < vi->max_queue_pairs; i++)
> -		mutex_lock(&vi->rq[i].dim_lock);
> -
>  	if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
>  		vi->rx_dim_enabled = true;
> -		for (i = 0; i < vi->max_queue_pairs; i++)
> +		for (i = 0; i < vi->max_queue_pairs; i++) {
> +			mutex_lock(&vi->rq[i].dim_lock);
>  			vi->rq[i].dim_enabled = true;
> -		goto unlock;
> +			mutex_unlock(&vi->rq[i].dim_lock);
> +		}
> +		return 0;
>  	}
>
>  	coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
> -	if (!coal_rx) {
> -		ret = -ENOMEM;
> -		goto unlock;
> -	}
> +	if (!coal_rx)
> +		return -ENOMEM;
>
>  	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
>  		vi->rx_dim_enabled = false;
> -		for (i = 0; i < vi->max_queue_pairs; i++)
> +		for (i = 0; i < vi->max_queue_pairs; i++) {
> +			mutex_lock(&vi->rq[i].dim_lock);
>  			vi->rq[i].dim_enabled = false;
> +			mutex_unlock(&vi->rq[i].dim_lock);
> +		}
>  	}
>
>  	/* Since the per-queue coalescing params can be set,
> @@ -4300,22 +4299,19 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
>
>  	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
>  				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
> -				  &sgs_rx)) {
> -		ret = -EINVAL;
> -		goto unlock;
> -	}
> +				  &sgs_rx))
> +		return -EINVAL;
>
>  	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
>  	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
>  	for (i = 0; i < vi->max_queue_pairs; i++) {
> +		mutex_lock(&vi->rq[i].dim_lock);
>  		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
>  		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
> -	}
> -unlock:
> -	for (i = vi->max_queue_pairs - 1; i >= 0; i--)
>  		mutex_unlock(&vi->rq[i].dim_lock);
> +	}
>
> -	return ret;
> +	return 0;
>  }
>
>  static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
> --
> 2.32.0.3.g01195cf9f
>
Jason Wang May 30, 2024, 10:34 a.m. UTC | #5
On Thu, May 30, 2024 at 5:17 PM Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Tue, May 28, 2024 at 09:41:16PM +0800, Heng Qi wrote:
> > When the following snippet is run, lockdep will report a deadlock[1].
> >
> >   /* Acquire all queues dim_locks */
> >   for (i = 0; i < vi->max_queue_pairs; i++)
> >           mutex_lock(&vi->rq[i].dim_lock);
> >
> > There's no deadlock here because the vq locks are always taken
> > in the same order, but lockdep can not figure it out. So refactoring
> > the code to alleviate the problem.
> >
> > [1]
> > ========================================================
> > WARNING: possible recursive locking detected
> > 6.9.0-rc7+ #319 Not tainted
> > --------------------------------------------
> > ethtool/962 is trying to acquire lock:
> >
> > but task is already holding lock:
> >
> > other info that might help us debug this:
> > Possible unsafe locking scenario:
> >
> >       CPU0
> >       ----
> >  lock(&vi->rq[i].dim_lock);
> >  lock(&vi->rq[i].dim_lock);
> >
> > *** DEADLOCK ***
> >
> >  May be due to missing lock nesting notation
> >
> > 3 locks held by ethtool/962:
> >  #0: ffffffff82dbaab0 (cb_lock){++++}-{3:3}, at: genl_rcv+0x19/0x40
> >  #1: ffffffff82dad0a8 (rtnl_mutex){+.+.}-{3:3}, at:
> >                               ethnl_default_set_doit+0xbe/0x1e0
> >
> > stack backtrace:
> > CPU: 6 PID: 962 Comm: ethtool Not tainted 6.9.0-rc7+ #319
> > Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS
> >          rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
> > Call Trace:
> >  <TASK>
> >  dump_stack_lvl+0x79/0xb0
> >  check_deadlock+0x130/0x220
> >  __lock_acquire+0x861/0x990
> >  lock_acquire.part.0+0x72/0x1d0
> >  ? lock_acquire+0xf8/0x130
> >  __mutex_lock+0x71/0xd50
> >  virtnet_set_coalesce+0x151/0x190
> >  __ethnl_set_coalesce.isra.0+0x3f8/0x4d0
> >  ethnl_set_coalesce+0x34/0x90
> >  ethnl_default_set_doit+0xdd/0x1e0
> >  genl_family_rcv_msg_doit+0xdc/0x130
> >  genl_family_rcv_msg+0x154/0x230
> >  ? __pfx_ethnl_default_set_doit+0x10/0x10
> >  genl_rcv_msg+0x4b/0xa0
> >  ? __pfx_genl_rcv_msg+0x10/0x10
> >  netlink_rcv_skb+0x5a/0x110
> >  genl_rcv+0x28/0x40
> >  netlink_unicast+0x1af/0x280
> >  netlink_sendmsg+0x20e/0x460
> >  __sys_sendto+0x1fe/0x210
> >  ? find_held_lock+0x2b/0x80
> >  ? do_user_addr_fault+0x3a2/0x8a0
> >  ? __lock_release+0x5e/0x160
> >  ? do_user_addr_fault+0x3a2/0x8a0
> >  ? lock_release+0x72/0x140
> >  ? do_user_addr_fault+0x3a7/0x8a0
> >  __x64_sys_sendto+0x29/0x30
> >  do_syscall_64+0x78/0x180
> >  entry_SYSCALL_64_after_hwframe+0x76/0x7e
> >
> > Fixes: 4d4ac2ececd3 ("virtio_net: Add a lock for per queue RX coalesce")
> > Signed-off-by: Heng Qi <hengqi@linux.alibaba.com>
>
>
> Acked-by: Michael S. Tsirkin <mst@redhat.com>
>

Acked-by: Jason Wang <jasowang@redhat.com>

Btw, adding notation seems to be another way.

Thanks
diff mbox series

Patch

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4f828a9e5889..ecb5203d0372 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -4257,7 +4257,6 @@  static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 	struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
 	bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
 	struct scatterlist sgs_rx;
-	int ret = 0;
 	int i;
 
 	if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
@@ -4267,27 +4266,27 @@  static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 			       ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
 		return -EINVAL;
 
-	/* Acquire all queues dim_locks */
-	for (i = 0; i < vi->max_queue_pairs; i++)
-		mutex_lock(&vi->rq[i].dim_lock);
-
 	if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
 		vi->rx_dim_enabled = true;
-		for (i = 0; i < vi->max_queue_pairs; i++)
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			mutex_lock(&vi->rq[i].dim_lock);
 			vi->rq[i].dim_enabled = true;
-		goto unlock;
+			mutex_unlock(&vi->rq[i].dim_lock);
+		}
+		return 0;
 	}
 
 	coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
-	if (!coal_rx) {
-		ret = -ENOMEM;
-		goto unlock;
-	}
+	if (!coal_rx)
+		return -ENOMEM;
 
 	if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
 		vi->rx_dim_enabled = false;
-		for (i = 0; i < vi->max_queue_pairs; i++)
+		for (i = 0; i < vi->max_queue_pairs; i++) {
+			mutex_lock(&vi->rq[i].dim_lock);
 			vi->rq[i].dim_enabled = false;
+			mutex_unlock(&vi->rq[i].dim_lock);
+		}
 	}
 
 	/* Since the per-queue coalescing params can be set,
@@ -4300,22 +4299,19 @@  static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 
 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
 				  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
-				  &sgs_rx)) {
-		ret = -EINVAL;
-		goto unlock;
-	}
+				  &sgs_rx))
+		return -EINVAL;
 
 	vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
 	vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
 	for (i = 0; i < vi->max_queue_pairs; i++) {
+		mutex_lock(&vi->rq[i].dim_lock);
 		vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
 		vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
-	}
-unlock:
-	for (i = vi->max_queue_pairs - 1; i >= 0; i--)
 		mutex_unlock(&vi->rq[i].dim_lock);
+	}
 
-	return ret;
+	return 0;
 }
 
 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,