@@ -291,6 +291,12 @@ struct bpf_map {
s64 __percpu *elem_count;
};
+struct bpf_run_lock {
+ local_lock_t redirect_lock;
+};
+
+DECLARE_PER_CPU(struct bpf_run_lock, bpf_run_lock);
+
static inline const char *btf_field_type_name(enum btf_field_type type)
{
switch (type) {
@@ -89,6 +89,11 @@
static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id);
+DEFINE_PER_CPU(struct bpf_run_lock, bpf_run_lock) = {
+ .redirect_lock = INIT_LOCAL_LOCK(redirect_lock),
+};
+EXPORT_PER_CPU_SYMBOL_GPL(bpf_run_lock);
+
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
{
if (in_compat_syscall()) {
The XDP redirect process is two staged: - bpf_prog_run_xdp() is invoked to run a eBPF program which inspects the packet and makes decisions. While doing that, the per-CPU variable bpf_redirect_info is used. - Afterwards xdp_do_redirect() is invoked and accesses bpf_redirect_info and it may also access other per-CPU variables like xskmap_flush_list. At the very end of the NAPI callback, xdp_do_flush() is invoked which does not access bpf_redirect_info but will touch the individual per-CPU lists. The per-CPU variables are only used in the NAPI callback hence disabling bottom halves is the only protection mechanism. Users from preemptible context (like cpu_map_kthread_run()) explicitly disable bottom halves for protections reasons. Without locking in local_bh_disable() on PREEMPT_RT this data structure requires explicit locking. Introduce redirect_lock as a lock to be acquired when access to these per-CPU variables is performed. Usually the lock is part of the per-CPU variable which is about to be protected but since there are a few different per-CPU variables which need to be protected at the same time (and some of the variables depend on a CONFIG setting) a new per-CPU data structure with variable bpf_run_lock is used for this. The lock is a nested-BH lock meaning that on non-PREEMPT_RT kernels this simply results in a lockdep check and ensuring that bottom halves are disabled. On PREEMPT_RT kernels this will provide the needed synchronisation once local_bh_disable() does not act as per-CPU lock. This patch introduces the bpf_run_lock.redirect_lock lock. It will be used by drivers in the following patches. A follow-up step could be to keep bpf_prog_run_xdp() and the XDP_REDIRECT switch case (with xdp_do_redirect()) close together. That would allow a single scoped_guard() macro to cover the two required instaces that require locking instead the whole switch case. Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Hao Luo <haoluo@google.com> Cc: Jesper Dangaard Brouer <hawk@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Fastabend <john.fastabend@gmail.com> Cc: KP Singh <kpsingh@kernel.org> Cc: Martin KaFai Lau <martin.lau@linux.dev> Cc: Song Liu <song@kernel.org> Cc: Stanislav Fomichev <sdf@google.com> Cc: Yonghong Song <yonghong.song@linux.dev> Cc: bpf@vger.kernel.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- include/linux/bpf.h | 6 ++++++ net/core/filter.c | 5 +++++ 2 files changed, 11 insertions(+)