diff mbox series

[net-next,09/11] net: designate XSK pool pointers in queues as "ops protected"

Message ID 20250312223507.805719-10-kuba@kernel.org (mailing list archive)
State New
Delegated to: Netdev Maintainers
Headers show
Series net: skip taking rtnl_lock for queue GET | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 38 this patch: 38
netdev/build_tools success Errors and warnings before: 26 (+0) this patch: 26 (+0)
netdev/cc_maintainers warning 9 maintainers not CCed: jonathan.lemon@gmail.com maciej.fijalkowski@intel.com ast@kernel.org bjorn@kernel.org hawk@kernel.org magnus.karlsson@intel.com bpf@vger.kernel.org john.fastabend@gmail.com daniel@iogearbox.net
netdev/build_clang success Errors and warnings before: 64 this patch: 64
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 4096 this patch: 4096
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 34 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 16 this patch: 16
netdev/source_inline success Was 0 now: 0

Commit Message

Jakub Kicinski March 12, 2025, 10:35 p.m. UTC
Read accesses go via xsk_get_pool_from_qid(), the call coming
from the core and gve look safe (other "ops locked" drivers
don't support XSK).

Write accesses go via xsk_reg_pool_at_qid() and xsk_clear_pool_at_qid().
Former is already under the ops lock, latter needs to be locked when
coming from the workqueue via xp_clear_dev().

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
 include/linux/netdevice.h     | 1 +
 include/net/netdev_rx_queue.h | 6 +++---
 net/xdp/xsk_buff_pool.c       | 3 +++
 3 files changed, 7 insertions(+), 3 deletions(-)

Comments

Stanislav Fomichev March 13, 2025, 8:28 a.m. UTC | #1
On 03/12, Jakub Kicinski wrote:
> Read accesses go via xsk_get_pool_from_qid(), the call coming
> from the core and gve look safe (other "ops locked" drivers
> don't support XSK).
> 
> Write accesses go via xsk_reg_pool_at_qid() and xsk_clear_pool_at_qid().
> Former is already under the ops lock, latter needs to be locked when
> coming from the workqueue via xp_clear_dev().
> 
> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
> ---
>  include/linux/netdevice.h     | 1 +
>  include/net/netdev_rx_queue.h | 6 +++---
>  net/xdp/xsk_buff_pool.c       | 3 +++
>  3 files changed, 7 insertions(+), 3 deletions(-)
> 
> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
> index 0fc79ae60ff5..7d802ef1c864 100644
> --- a/include/linux/netdevice.h
> +++ b/include/linux/netdevice.h
> @@ -688,6 +688,7 @@ struct netdev_queue {
>  	/* Subordinate device that the queue has been assigned to */
>  	struct net_device	*sb_dev;
>  #ifdef CONFIG_XDP_SOCKETS
> +	/* "ops protected", see comment about net_device::lock */
>  	struct xsk_buff_pool    *pool;
>  #endif
>  
> diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
> index b2238b551dce..8cdcd138b33f 100644
> --- a/include/net/netdev_rx_queue.h
> +++ b/include/net/netdev_rx_queue.h
> @@ -20,12 +20,12 @@ struct netdev_rx_queue {
>  	struct net_device		*dev;
>  	netdevice_tracker		dev_tracker;
>  
> +	/* All fields below are "ops protected",
> +	 * see comment about net_device::lock
> +	 */
>  #ifdef CONFIG_XDP_SOCKETS
>  	struct xsk_buff_pool            *pool;
>  #endif
> -	/* NAPI instance for the queue
> -	 * "ops protected", see comment about net_device::lock
> -	 */
>  	struct napi_struct		*napi;
>  	struct pp_memory_provider_params mp_params;
>  } ____cacheline_aligned_in_smp;
> diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
> index 14716ad3d7bc..60b3adb7b2d7 100644
> --- a/net/xdp/xsk_buff_pool.c
> +++ b/net/xdp/xsk_buff_pool.c
> @@ -279,9 +279,12 @@ static void xp_release_deferred(struct work_struct *work)
>  {
>  	struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
>  						  work);
> +	struct net_device *netdev = pool->netdev;

It looks like netdev might be null here. At least xp_clear_dev has
an explicit check. Presumably this happens when the device goes down
and invalidates the open sockets (in xsk_notifier) and we call
xp_release_deferred on socket close afterwards with netdev==null.

>  	rtnl_lock();
> +	netdev_lock_ops(netdev);
>  	xp_clear_dev(pool);
> +	netdev_unlock_ops(netdev);
>  	rtnl_unlock();
>  
>  	if (pool->fq) {
> -- 
> 2.48.1
>
diff mbox series

Patch

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0fc79ae60ff5..7d802ef1c864 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -688,6 +688,7 @@  struct netdev_queue {
 	/* Subordinate device that the queue has been assigned to */
 	struct net_device	*sb_dev;
 #ifdef CONFIG_XDP_SOCKETS
+	/* "ops protected", see comment about net_device::lock */
 	struct xsk_buff_pool    *pool;
 #endif
 
diff --git a/include/net/netdev_rx_queue.h b/include/net/netdev_rx_queue.h
index b2238b551dce..8cdcd138b33f 100644
--- a/include/net/netdev_rx_queue.h
+++ b/include/net/netdev_rx_queue.h
@@ -20,12 +20,12 @@  struct netdev_rx_queue {
 	struct net_device		*dev;
 	netdevice_tracker		dev_tracker;
 
+	/* All fields below are "ops protected",
+	 * see comment about net_device::lock
+	 */
 #ifdef CONFIG_XDP_SOCKETS
 	struct xsk_buff_pool            *pool;
 #endif
-	/* NAPI instance for the queue
-	 * "ops protected", see comment about net_device::lock
-	 */
 	struct napi_struct		*napi;
 	struct pp_memory_provider_params mp_params;
 } ____cacheline_aligned_in_smp;
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 14716ad3d7bc..60b3adb7b2d7 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -279,9 +279,12 @@  static void xp_release_deferred(struct work_struct *work)
 {
 	struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
 						  work);
+	struct net_device *netdev = pool->netdev;
 
 	rtnl_lock();
+	netdev_lock_ops(netdev);
 	xp_clear_dev(pool);
+	netdev_unlock_ops(netdev);
 	rtnl_unlock();
 
 	if (pool->fq) {