Message ID | 20250312223507.805719-10-kuba@kernel.org (mailing list archive) |
---|---|
State | New |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net: skip taking rtnl_lock for queue GET | expand |
On 03/12, Jakub Kicinski wrote: > Read accesses go via xsk_get_pool_from_qid(), the call coming > from the core and gve look safe (other "ops locked" drivers > don't support XSK). > > Write accesses go via xsk_reg_pool_at_qid() and xsk_clear_pool_at_qid(). > Former is already under the ops lock, latter needs to be locked when > coming from the workqueue via xp_clear_dev(). > > Signed-off-by: Jakub Kicinski <kuba@kernel.org> > --- > include/linux/netdevice.h | 1 + > include/net/netdev_rx_queue.h | 6 +++--- > net/xdp/xsk_buff_pool.c | 3 +++ > 3 files changed, 7 insertions(+), 3 deletions(-) > > diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h > index 0fc79ae60ff5..7d802ef1c864 100644 > --- a/include/linux/netdevice.h > +++ b/include/linux/netdevice.h > @@ -688,6 +688,7 @@ struct netdev_queue { > /* Subordinate device that the queue has been assigned to */ > struct net_device *sb_dev; > #ifdef CONFIG_XDP_SOCKETS > + /* "ops protected", see comment about net_device::lock */ > struct xsk_buff_pool *pool; > #endif > > diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h > index b2238b551dce..8cdcd138b33f 100644 > --- a/include/net/netdev_rx_queue.h > +++ b/include/net/netdev_rx_queue.h > @@ -20,12 +20,12 @@ struct netdev_rx_queue { > struct net_device *dev; > netdevice_tracker dev_tracker; > > + /* All fields below are "ops protected", > + * see comment about net_device::lock > + */ > #ifdef CONFIG_XDP_SOCKETS > struct xsk_buff_pool *pool; > #endif > - /* NAPI instance for the queue > - * "ops protected", see comment about net_device::lock > - */ > struct napi_struct *napi; > struct pp_memory_provider_params mp_params; > } ____cacheline_aligned_in_smp; > diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c > index 14716ad3d7bc..60b3adb7b2d7 100644 > --- a/net/xdp/xsk_buff_pool.c > +++ b/net/xdp/xsk_buff_pool.c > @@ -279,9 +279,12 @@ static void xp_release_deferred(struct work_struct *work) > { > struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, > work); > + struct net_device *netdev = pool->netdev; It looks like netdev might be null here. At least xp_clear_dev has an explicit check. Presumably this happens when the device goes down and invalidates the open sockets (in xsk_notifier) and we call xp_release_deferred on socket close afterwards with netdev==null. > rtnl_lock(); > + netdev_lock_ops(netdev); > xp_clear_dev(pool); > + netdev_unlock_ops(netdev); > rtnl_unlock(); > > if (pool->fq) { > -- > 2.48.1 >
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0fc79ae60ff5..7d802ef1c864 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -688,6 +688,7 @@ struct netdev_queue { /* Subordinate device that the queue has been assigned to */ struct net_device *sb_dev; #ifdef CONFIG_XDP_SOCKETS + /* "ops protected", see comment about net_device::lock */ struct xsk_buff_pool *pool; #endif diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h index b2238b551dce..8cdcd138b33f 100644 --- a/include/net/netdev_rx_queue.h +++ b/include/net/netdev_rx_queue.h @@ -20,12 +20,12 @@ struct netdev_rx_queue { struct net_device *dev; netdevice_tracker dev_tracker; + /* All fields below are "ops protected", + * see comment about net_device::lock + */ #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif - /* NAPI instance for the queue - * "ops protected", see comment about net_device::lock - */ struct napi_struct *napi; struct pp_memory_provider_params mp_params; } ____cacheline_aligned_in_smp; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 14716ad3d7bc..60b3adb7b2d7 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -279,9 +279,12 @@ static void xp_release_deferred(struct work_struct *work) { struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, work); + struct net_device *netdev = pool->netdev; rtnl_lock(); + netdev_lock_ops(netdev); xp_clear_dev(pool); + netdev_unlock_ops(netdev); rtnl_unlock(); if (pool->fq) {
Read accesses go via xsk_get_pool_from_qid(), the call coming from the core and gve look safe (other "ops locked" drivers don't support XSK). Write accesses go via xsk_reg_pool_at_qid() and xsk_clear_pool_at_qid(). Former is already under the ops lock, latter needs to be locked when coming from the workqueue via xp_clear_dev(). Signed-off-by: Jakub Kicinski <kuba@kernel.org> --- include/linux/netdevice.h | 1 + include/net/netdev_rx_queue.h | 6 +++--- net/xdp/xsk_buff_pool.c | 3 +++ 3 files changed, 7 insertions(+), 3 deletions(-)