@@ -833,6 +833,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
mxbuf.ring = ring;
mxbuf.dev = dev;
+ guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock);
act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp);
length = mxbuf.xdp.data_end - mxbuf.xdp.data;
@@ -269,6 +269,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
u32 act;
int err;
+ guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock);
act = bpf_prog_run_xdp(prog, xdp);
switch (act) {
case XDP_PASS:
@@ -1011,7 +1011,8 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
pkt_off - NFP_NET_RX_BUF_HEADROOM,
pkt_len, true);
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ scoped_guard(local_lock_nested_bh, &bpf_run_lock.redirect_lock)
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
pkt_len = xdp.data_end - xdp.data;
pkt_off += xdp.data - orig_data;
@@ -216,6 +216,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
}
}
+ guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock);
act = bpf_prog_run_xdp(xdp_prog, xrxbuf->xdp);
pkt_len = xrxbuf->xdp->data_end - xrxbuf->xdp->data;
@@ -1130,7 +1130,8 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
pkt_off - NFP_NET_RX_BUF_HEADROOM,
pkt_len, true);
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ scoped_guard(local_lock_nested_bh, &bpf_run_lock.redirect_lock)
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
pkt_len = xdp.data_end - xdp.data;
pkt_off += xdp.data - orig_data;
@@ -291,6 +291,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
rx_buf->len, false);
+ guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
offset = (u8 *)xdp.data - *ehp;
@@ -291,6 +291,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
rx_buf->len, false);
+ guard(local_lock_nested_bh)(&bpf_run_lock.redirect_lock);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
offset = (u8 *)xdp.data - *ehp;
The per-CPU variables used during bpf_prog_run_xdp() invocation and later during xdp_do_redirect() rely on disabled BH for their protection. Without locking in local_bh_disable() on PREEMPT_RT these data structure require explicit locking. This is a follow-up on the previous change which introduced bpf_run_lock.redirect_lock and uses it now within drivers. The simple way is to acquire the lock before bpf_prog_run_xdp() is invoked and hold it until the end of function. This does not always work because some drivers (cpsw, atlantic) invoke xdp_do_flush() in the same context. Acquiring the lock in bpf_prog_run_xdp() and dropping in xdp_do_redirect() (without touching drivers) does not work because not all driver, which use bpf_prog_run_xdp(), do support XDP_REDIRECT (and invoke xdp_do_redirect()). Ideally the minimal locking scope would be bpf_prog_run_xdp() + xdp_do_redirect() and everything else (error recovery, DMA unmapping, free/ alloc of memory, …) would happen outside of the locked section. Cc: Alexei Starovoitov <ast@kernel.org> Cc: Edward Cree <ecree.xilinx@gmail.com> Cc: Jesper Dangaard Brouer <hawk@kernel.org> Cc: John Fastabend <john.fastabend@gmail.com> Cc: Leon Romanovsky <leon@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Martin Habets <habetsm.xilinx@gmail.com> Cc: Saeed Mahameed <saeedm@nvidia.com> Cc: Tariq Toukan <tariqt@nvidia.com> Cc: bpf@vger.kernel.org Cc: linux-net-drivers@amd.com Cc: linux-rdma@vger.kernel.org Cc: oss-drivers@corigine.com Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 1 + drivers/net/ethernet/netronome/nfp/nfd3/dp.c | 3 ++- drivers/net/ethernet/netronome/nfp/nfd3/xsk.c | 1 + drivers/net/ethernet/netronome/nfp/nfdk/dp.c | 3 ++- drivers/net/ethernet/sfc/rx.c | 1 + drivers/net/ethernet/sfc/siena/rx.c | 1 + 7 files changed, 9 insertions(+), 2 deletions(-)