diff mbox series

[bpf-next,v1,1/2] bpf: Summarize sleepable global subprogs

Message ID 20250228162858.1073529-2-memxor@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Global subprogs in RCU/{preempt,irq}-disabled sections | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-49 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-50 success Logs for x86_64-llvm-18 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-gcc / veristat-meta / x86_64-gcc veristat_meta
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-17 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-19 success Logs for s390x-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for aarch64-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-21 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-gcc / veristat-kernel / x86_64-gcc veristat_kernel
bpf/vmtest-bpf-next-VM_Test-43 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-44 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-45 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-18 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-51 success Logs for x86_64-llvm-18 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for aarch64-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-20 success Logs for s390x-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-17 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-46 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-47 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-48 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success Errors and warnings before: 26 (+1) this patch: 26 (+1)
netdev/cc_maintainers fail 2 blamed authors not CCed: martin.lau@linux.dev yonghong.song@linux.dev; 8 maintainers not CCed: song@kernel.org kpsingh@kernel.org john.fastabend@gmail.com jolsa@kernel.org yonghong.song@linux.dev haoluo@google.com sdf@fomichev.me martin.lau@linux.dev
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 70 this patch: 70
netdev/checkpatch warning WARNING: line length of 114 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc

Commit Message

Kumar Kartikeya Dwivedi Feb. 28, 2025, 4:28 p.m. UTC
The verifier currently does not permit global subprog calls when a lock
is held, preemption is disabled, or when IRQs are disabled. This is
because we don't know whether the global subprog calls sleepable
functions or not.

In case of locks, there's an additional reason: functions called by the
global subprog may hold additional locks etc. The verifier won't know
while verifying the global subprog whether it was called in context
where a spin lock is already held by the program.

Perform summarization of the sleepable nature of a global subprog just
like changes_pkt_data and then allow calls to global subprogs for
non-sleepable ones from atomic context.

While making this change, I noticed that RCU read sections had no
protection against sleepable global subprog calls, include it in the
checks and fix this while we're at it.

Care needs to be taken to not allow global subprog calls when regular
bpf_spin_lock is held. When resilient spin locks is held, we want to
potentially have this check relaxed, but not for now.

Tests are included in the next patch to handle all special conditions.

Fixes: 9bb00b2895cb ("bpf: Add kfunc bpf_rcu_read_lock/unlock()")
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 include/linux/bpf_verifier.h |  1 +
 kernel/bpf/verifier.c        | 50 ++++++++++++++++++++++++++----------
 2 files changed, 37 insertions(+), 14 deletions(-)

Comments

Eduard Zingerman Feb. 28, 2025, 8:42 p.m. UTC | #1
On Fri, 2025-02-28 at 08:28 -0800, Kumar Kartikeya Dwivedi wrote:
> The verifier currently does not permit global subprog calls when a lock
> is held, preemption is disabled, or when IRQs are disabled. This is
> because we don't know whether the global subprog calls sleepable
> functions or not.
> 
> In case of locks, there's an additional reason: functions called by the
> global subprog may hold additional locks etc. The verifier won't know
> while verifying the global subprog whether it was called in context
> where a spin lock is already held by the program.
> 
> Perform summarization of the sleepable nature of a global subprog just
> like changes_pkt_data and then allow calls to global subprogs for
> non-sleepable ones from atomic context.
> 
> While making this change, I noticed that RCU read sections had no
> protection against sleepable global subprog calls, include it in the
> checks and fix this while we're at it.
> 
> Care needs to be taken to not allow global subprog calls when regular
> bpf_spin_lock is held. When resilient spin locks is held, we want to
> potentially have this check relaxed, but not for now.
> 
> Tests are included in the next patch to handle all special conditions.
> 
> Fixes: 9bb00b2895cb ("bpf: Add kfunc bpf_rcu_read_lock/unlock()")
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> ---

I think this change also has to deal with freplace for sleepable
sub-programs, e.g. see verifier.c:bpf_check_attach_target(),
part dealing with `tgt_changes_pkt_data`.

Other than that the logic seems ok.

[...]
Kumar Kartikeya Dwivedi Feb. 28, 2025, 8:47 p.m. UTC | #2
On Fri, 28 Feb 2025 at 21:42, Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Fri, 2025-02-28 at 08:28 -0800, Kumar Kartikeya Dwivedi wrote:
> > The verifier currently does not permit global subprog calls when a lock
> > is held, preemption is disabled, or when IRQs are disabled. This is
> > because we don't know whether the global subprog calls sleepable
> > functions or not.
> >
> > In case of locks, there's an additional reason: functions called by the
> > global subprog may hold additional locks etc. The verifier won't know
> > while verifying the global subprog whether it was called in context
> > where a spin lock is already held by the program.
> >
> > Perform summarization of the sleepable nature of a global subprog just
> > like changes_pkt_data and then allow calls to global subprogs for
> > non-sleepable ones from atomic context.
> >
> > While making this change, I noticed that RCU read sections had no
> > protection against sleepable global subprog calls, include it in the
> > checks and fix this while we're at it.
> >
> > Care needs to be taken to not allow global subprog calls when regular
> > bpf_spin_lock is held. When resilient spin locks is held, we want to
> > potentially have this check relaxed, but not for now.
> >
> > Tests are included in the next patch to handle all special conditions.
> >
> > Fixes: 9bb00b2895cb ("bpf: Add kfunc bpf_rcu_read_lock/unlock()")
> > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > ---
>
> I think this change also has to deal with freplace for sleepable
> sub-programs, e.g. see verifier.c:bpf_check_attach_target(),
> part dealing with `tgt_changes_pkt_data`.
>
> Other than that the logic seems ok.

Ah, good catch. Let me fix that and add a test to check it.

>
> [...]
>
Andrii Nakryiko Feb. 28, 2025, 11:18 p.m. UTC | #3
On Fri, Feb 28, 2025 at 8:29 AM Kumar Kartikeya Dwivedi
<memxor@gmail.com> wrote:
>
> The verifier currently does not permit global subprog calls when a lock
> is held, preemption is disabled, or when IRQs are disabled. This is
> because we don't know whether the global subprog calls sleepable
> functions or not.
>
> In case of locks, there's an additional reason: functions called by the
> global subprog may hold additional locks etc. The verifier won't know
> while verifying the global subprog whether it was called in context
> where a spin lock is already held by the program.
>
> Perform summarization of the sleepable nature of a global subprog just
> like changes_pkt_data and then allow calls to global subprogs for
> non-sleepable ones from atomic context.
>
> While making this change, I noticed that RCU read sections had no
> protection against sleepable global subprog calls, include it in the
> checks and fix this while we're at it.
>
> Care needs to be taken to not allow global subprog calls when regular
> bpf_spin_lock is held. When resilient spin locks is held, we want to
> potentially have this check relaxed, but not for now.
>
> Tests are included in the next patch to handle all special conditions.
>
> Fixes: 9bb00b2895cb ("bpf: Add kfunc bpf_rcu_read_lock/unlock()")
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> ---
>  include/linux/bpf_verifier.h |  1 +
>  kernel/bpf/verifier.c        | 50 ++++++++++++++++++++++++++----------
>  2 files changed, 37 insertions(+), 14 deletions(-)
>
> diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
> index bbd013c38ff9..1b3cfa6cb720 100644
> --- a/include/linux/bpf_verifier.h
> +++ b/include/linux/bpf_verifier.h
> @@ -667,6 +667,7 @@ struct bpf_subprog_info {
>         /* true if bpf_fastcall stack region is used by functions that can't be inlined */
>         bool keep_fastcall_stack: 1;
>         bool changes_pkt_data: 1;
> +       bool sleepable: 1;
>
>         enum priv_stack_mode priv_stack_mode;
>         u8 arg_cnt;
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index dcd0da4e62fc..e3560d19d513 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -10317,23 +10317,18 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
>         if (subprog_is_global(env, subprog)) {
>                 const char *sub_name = subprog_name(env, subprog);
>
> -               /* Only global subprogs cannot be called with a lock held. */
>                 if (env->cur_state->active_locks) {
>                         verbose(env, "global function calls are not allowed while holding a lock,\n"
>                                      "use static function instead\n");
>                         return -EINVAL;
>                 }
>
> -               /* Only global subprogs cannot be called with preemption disabled. */
> -               if (env->cur_state->active_preempt_locks) {
> -                       verbose(env, "global function calls are not allowed with preemption disabled,\n"
> -                                    "use static function instead\n");
> -                       return -EINVAL;
> -               }
> -
> -               if (env->cur_state->active_irq_id) {
> -                       verbose(env, "global function calls are not allowed with IRQs disabled,\n"
> -                                    "use static function instead\n");
> +               if (env->subprog_info[subprog].sleepable &&
> +                   (env->cur_state->active_rcu_lock || env->cur_state->active_preempt_locks ||
> +                    env->cur_state->active_irq_id || !in_sleepable(env))) {
> +                       verbose(env, "global functions that may sleep are not allowed in non-sleepable context,\n"
> +                                    "i.e., in a RCU/IRQ/preempt-disabled section, or in\n"
> +                                    "a non-sleepable BPF program context\n");
>                         return -EINVAL;
>                 }
>
> @@ -16703,6 +16698,14 @@ static void mark_subprog_changes_pkt_data(struct bpf_verifier_env *env, int off)
>         subprog->changes_pkt_data = true;
>  }
>
> +static void mark_subprog_sleepable(struct bpf_verifier_env *env, int off)
> +{
> +       struct bpf_subprog_info *subprog;
> +
> +       subprog = find_containing_subprog(env, off);
> +       subprog->sleepable = true;
> +}
> +
>  /* 't' is an index of a call-site.
>   * 'w' is a callee entry point.
>   * Eventually this function would be called when env->cfg.insn_state[w] == EXPLORED.
> @@ -16716,6 +16719,7 @@ static void merge_callee_effects(struct bpf_verifier_env *env, int t, int w)
>         caller = find_containing_subprog(env, t);
>         callee = find_containing_subprog(env, w);
>         caller->changes_pkt_data |= callee->changes_pkt_data;
> +       caller->sleepable |= callee->sleepable;
>  }
>
>  /* non-recursive DFS pseudo code
> @@ -17183,9 +17187,20 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
>                         mark_prune_point(env, t);
>                         mark_jmp_point(env, t);
>                 }
> -               if (bpf_helper_call(insn) && bpf_helper_changes_pkt_data(insn->imm))
> -                       mark_subprog_changes_pkt_data(env, t);
> -               if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
> +               if (bpf_helper_call(insn)) {
> +                       const struct bpf_func_proto *fp;
> +
> +                       ret = get_helper_proto(env, insn->imm, &fp);
> +                       /* If called in a non-sleepable context program will be
> +                        * rejected anyway, so we should end up with precise
> +                        * sleepable marks on subprogs, except for dead code
> +                        * elimination.

TBH, I'm worried that we are regressing to doing all these side effect
analyses disregarding dead code elimination. It's not something
hypothetical to have an .rodata variable controlling whether, say, to
do bpf_probe_read_user() (non-sleepable) vs bpf_copy_from_user()
(sleepable) inside global subprog, depending on some outside
configuration (e.g., whether we'll be doing SEC("iter.s/task") or it's
actually profiler logic called inside SEC("perf_event"), all
controlled by user-space). We do have use cases like this in
production already, and this dead code elimination is important in
such cases. Probably can be worked around with more global functions
and stuff like that, but still, it's worrying we are giving up on such
an important part of the BPF CO-RE approach - disabling parts of code
"dynamically" before loading BPF programs.

> +                        */
> +                       if (ret == 0 && fp->might_sleep)
> +                               mark_subprog_sleepable(env, t);
> +                       if (bpf_helper_changes_pkt_data(insn->imm))
> +                               mark_subprog_changes_pkt_data(env, t);
> +               } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
>                         struct bpf_kfunc_call_arg_meta meta;
>
>                         ret = fetch_kfunc_meta(env, insn, &meta, NULL);
> @@ -17204,6 +17219,13 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
>                                  */
>                                 mark_force_checkpoint(env, t);
>                         }
> +                       /* Same as helpers, if called in a non-sleepable context
> +                        * program will be rejected anyway, so we should end up
> +                        * with precise sleepable marks on subprogs, except for
> +                        * dead code elimination.
> +                        */
> +                       if (ret == 0 && is_kfunc_sleepable(&meta))
> +                               mark_subprog_sleepable(env, t);
>                 }
>                 return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);
>
> --
> 2.43.5
>
Eduard Zingerman Feb. 28, 2025, 11:23 p.m. UTC | #4
On Fri, 2025-02-28 at 15:18 -0800, Andrii Nakryiko wrote:

[...]

> >  /* non-recursive DFS pseudo code
> > @@ -17183,9 +17187,20 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
> >                         mark_prune_point(env, t);
> >                         mark_jmp_point(env, t);
> >                 }
> > -               if (bpf_helper_call(insn) && bpf_helper_changes_pkt_data(insn->imm))
> > -                       mark_subprog_changes_pkt_data(env, t);
> > -               if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
> > +               if (bpf_helper_call(insn)) {
> > +                       const struct bpf_func_proto *fp;
> > +
> > +                       ret = get_helper_proto(env, insn->imm, &fp);
> > +                       /* If called in a non-sleepable context program will be
> > +                        * rejected anyway, so we should end up with precise
> > +                        * sleepable marks on subprogs, except for dead code
> > +                        * elimination.
> 
> TBH, I'm worried that we are regressing to doing all these side effect
> analyses disregarding dead code elimination. It's not something
> hypothetical to have an .rodata variable controlling whether, say, to
> do bpf_probe_read_user() (non-sleepable) vs bpf_copy_from_user()
> (sleepable) inside global subprog, depending on some outside
> configuration (e.g., whether we'll be doing SEC("iter.s/task") or it's
> actually profiler logic called inside SEC("perf_event"), all
> controlled by user-space). We do have use cases like this in
> production already, and this dead code elimination is important in
> such cases. Probably can be worked around with more global functions
> and stuff like that, but still, it's worrying we are giving up on such
> an important part of the BPF CO-RE approach - disabling parts of code
> "dynamically" before loading BPF programs.

There were two alternatives on the table last time:
- add support for tags on global functions;
- verify global subprogram call tree in post-order,
  in order to have the flags ready when needed.

Both were rejected back than.
But we still can reconsider :)

> > +                        */
> > +                       if (ret == 0 && fp->might_sleep)
> > +                               mark_subprog_sleepable(env, t);
> > +                       if (bpf_helper_changes_pkt_data(insn->imm))
> > +                               mark_subprog_changes_pkt_data(env, t);
> > +               } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
> >                         struct bpf_kfunc_call_arg_meta meta;
> > 
> >                         ret = fetch_kfunc_meta(env, insn, &meta, NULL);

[...]
Andrii Nakryiko Feb. 28, 2025, 11:34 p.m. UTC | #5
On Fri, Feb 28, 2025 at 3:23 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Fri, 2025-02-28 at 15:18 -0800, Andrii Nakryiko wrote:
>
> [...]
>
> > >  /* non-recursive DFS pseudo code
> > > @@ -17183,9 +17187,20 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
> > >                         mark_prune_point(env, t);
> > >                         mark_jmp_point(env, t);
> > >                 }
> > > -               if (bpf_helper_call(insn) && bpf_helper_changes_pkt_data(insn->imm))
> > > -                       mark_subprog_changes_pkt_data(env, t);
> > > -               if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
> > > +               if (bpf_helper_call(insn)) {
> > > +                       const struct bpf_func_proto *fp;
> > > +
> > > +                       ret = get_helper_proto(env, insn->imm, &fp);
> > > +                       /* If called in a non-sleepable context program will be
> > > +                        * rejected anyway, so we should end up with precise
> > > +                        * sleepable marks on subprogs, except for dead code
> > > +                        * elimination.
> >
> > TBH, I'm worried that we are regressing to doing all these side effect
> > analyses disregarding dead code elimination. It's not something
> > hypothetical to have an .rodata variable controlling whether, say, to
> > do bpf_probe_read_user() (non-sleepable) vs bpf_copy_from_user()
> > (sleepable) inside global subprog, depending on some outside
> > configuration (e.g., whether we'll be doing SEC("iter.s/task") or it's
> > actually profiler logic called inside SEC("perf_event"), all
> > controlled by user-space). We do have use cases like this in
> > production already, and this dead code elimination is important in
> > such cases. Probably can be worked around with more global functions
> > and stuff like that, but still, it's worrying we are giving up on such
> > an important part of the BPF CO-RE approach - disabling parts of code
> > "dynamically" before loading BPF programs.
>
> There were two alternatives on the table last time:
> - add support for tags on global functions;

I was supportive of this, I believe

> - verify global subprogram call tree in post-order,
>   in order to have the flags ready when needed.

Remind me of the details here? we'd start validating the main prog,
suspend that process when encountering global func, go validate global
func, once done, come back to main prog, right?

Alternatively, we could mark expected properties (restrictions) of
global subprogs as we encounter them, right? E.g, if we come to global
func call inside rcu_read_{lock,unlock}() region, we'd mark it
internally as "needs to be non-sleepable".

>
> Both were rejected back than.
> But we still can reconsider :)
>

yep, though I'm not really feeling hopeful

> > > +                        */
> > > +                       if (ret == 0 && fp->might_sleep)
> > > +                               mark_subprog_sleepable(env, t);
> > > +                       if (bpf_helper_changes_pkt_data(insn->imm))
> > > +                               mark_subprog_changes_pkt_data(env, t);
> > > +               } else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
> > >                         struct bpf_kfunc_call_arg_meta meta;
> > >
> > >                         ret = fetch_kfunc_meta(env, insn, &meta, NULL);
>
> [...]
>
Eduard Zingerman Feb. 28, 2025, 11:57 p.m. UTC | #6
On Fri, 2025-02-28 at 15:34 -0800, Andrii Nakryiko wrote:

[...]

> > There were two alternatives on the table last time:
> > - add support for tags on global functions;
> 
> I was supportive of this, I believe
> 
> > - verify global subprogram call tree in post-order,
> >   in order to have the flags ready when needed.
> 
> Remind me of the details here? we'd start validating the main prog,
> suspend that process when encountering global func, go validate global
> func, once done, come back to main prog, right?

Yes.
The tree can't be built statically if we account for dead code
elimination, as post-order might change.

> Alternatively, we could mark expected properties (restrictions) of
> global subprogs as we encounter them, right? E.g, if we come to global
> func call inside rcu_read_{lock,unlock}() region, we'd mark it
> internally as "needs to be non-sleepable".

For situation like below, suppose verification order is main, foo,
bar, buz:
- main() sleepable
  - foo()
  - bar()
- foo():
  - buz()
- bar():
  - foo() while holding lock
- buz():
  - calls something sleepable

I think, to handle this the call-tree needs to be built on the main
verification pass, and then checked for sleepable.
But that won't work for changes_pkt_data, as verdict has to be known
right-away to decide whether to invalidate packet pointers.

[...]
Kumar Kartikeya Dwivedi March 1, 2025, 1:43 a.m. UTC | #7
On Sat, 1 Mar 2025 at 00:57, Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Fri, 2025-02-28 at 15:34 -0800, Andrii Nakryiko wrote:
>
> [...]
>
> > > There were two alternatives on the table last time:
> > > - add support for tags on global functions;
> >
> > I was supportive of this, I believe
> >
> > > - verify global subprogram call tree in post-order,
> > >   in order to have the flags ready when needed.
> >
> > Remind me of the details here? we'd start validating the main prog,
> > suspend that process when encountering global func, go validate global
> > func, once done, come back to main prog, right?
>
> Yes.
> The tree can't be built statically if we account for dead code
> elimination, as post-order might change.
>
> > Alternatively, we could mark expected properties (restrictions) of
> > global subprogs as we encounter them, right? E.g, if we come to global
> > func call inside rcu_read_{lock,unlock}() region, we'd mark it
> > internally as "needs to be non-sleepable".
>
> For situation like below, suppose verification order is main, foo,
> bar, buz:
> - main() sleepable
>   - foo()
>   - bar()
> - foo():
>   - buz()
> - bar():
>   - foo() while holding lock
> - buz():
>   - calls something sleepable
>
> I think, to handle this the call-tree needs to be built on the main
> verification pass, and then checked for sleepable.
> But that won't work for changes_pkt_data, as verdict has to be known
> right-away to decide whether to invalidate packet pointers.
>

I know over-conservative marking in presence of possible DCE is
non-ideal (that's why I put in the comment, so we revisit it later),
I'm getting the sense from this thread that either option is a lot
more work/complexity, or insufficient.
Except for possibly taggings things properly, but that's been nipped in the bud.

So I'm going to prepare a v2 addressing Eduard's comments, and if we
reach a consensus, I can follow up to address both changes_pkt_data
and sleepable global subprogs.

> [...]
>
diff mbox series

Patch

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index bbd013c38ff9..1b3cfa6cb720 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -667,6 +667,7 @@  struct bpf_subprog_info {
 	/* true if bpf_fastcall stack region is used by functions that can't be inlined */
 	bool keep_fastcall_stack: 1;
 	bool changes_pkt_data: 1;
+	bool sleepable: 1;
 
 	enum priv_stack_mode priv_stack_mode;
 	u8 arg_cnt;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index dcd0da4e62fc..e3560d19d513 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -10317,23 +10317,18 @@  static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 	if (subprog_is_global(env, subprog)) {
 		const char *sub_name = subprog_name(env, subprog);
 
-		/* Only global subprogs cannot be called with a lock held. */
 		if (env->cur_state->active_locks) {
 			verbose(env, "global function calls are not allowed while holding a lock,\n"
 				     "use static function instead\n");
 			return -EINVAL;
 		}
 
-		/* Only global subprogs cannot be called with preemption disabled. */
-		if (env->cur_state->active_preempt_locks) {
-			verbose(env, "global function calls are not allowed with preemption disabled,\n"
-				     "use static function instead\n");
-			return -EINVAL;
-		}
-
-		if (env->cur_state->active_irq_id) {
-			verbose(env, "global function calls are not allowed with IRQs disabled,\n"
-				     "use static function instead\n");
+		if (env->subprog_info[subprog].sleepable &&
+		    (env->cur_state->active_rcu_lock || env->cur_state->active_preempt_locks ||
+		     env->cur_state->active_irq_id || !in_sleepable(env))) {
+			verbose(env, "global functions that may sleep are not allowed in non-sleepable context,\n"
+				     "i.e., in a RCU/IRQ/preempt-disabled section, or in\n"
+				     "a non-sleepable BPF program context\n");
 			return -EINVAL;
 		}
 
@@ -16703,6 +16698,14 @@  static void mark_subprog_changes_pkt_data(struct bpf_verifier_env *env, int off)
 	subprog->changes_pkt_data = true;
 }
 
+static void mark_subprog_sleepable(struct bpf_verifier_env *env, int off)
+{
+	struct bpf_subprog_info *subprog;
+
+	subprog = find_containing_subprog(env, off);
+	subprog->sleepable = true;
+}
+
 /* 't' is an index of a call-site.
  * 'w' is a callee entry point.
  * Eventually this function would be called when env->cfg.insn_state[w] == EXPLORED.
@@ -16716,6 +16719,7 @@  static void merge_callee_effects(struct bpf_verifier_env *env, int t, int w)
 	caller = find_containing_subprog(env, t);
 	callee = find_containing_subprog(env, w);
 	caller->changes_pkt_data |= callee->changes_pkt_data;
+	caller->sleepable |= callee->sleepable;
 }
 
 /* non-recursive DFS pseudo code
@@ -17183,9 +17187,20 @@  static int visit_insn(int t, struct bpf_verifier_env *env)
 			mark_prune_point(env, t);
 			mark_jmp_point(env, t);
 		}
-		if (bpf_helper_call(insn) && bpf_helper_changes_pkt_data(insn->imm))
-			mark_subprog_changes_pkt_data(env, t);
-		if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+		if (bpf_helper_call(insn)) {
+			const struct bpf_func_proto *fp;
+
+			ret = get_helper_proto(env, insn->imm, &fp);
+			/* If called in a non-sleepable context program will be
+			 * rejected anyway, so we should end up with precise
+			 * sleepable marks on subprogs, except for dead code
+			 * elimination.
+			 */
+			if (ret == 0 && fp->might_sleep)
+				mark_subprog_sleepable(env, t);
+			if (bpf_helper_changes_pkt_data(insn->imm))
+				mark_subprog_changes_pkt_data(env, t);
+		} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
 			struct bpf_kfunc_call_arg_meta meta;
 
 			ret = fetch_kfunc_meta(env, insn, &meta, NULL);
@@ -17204,6 +17219,13 @@  static int visit_insn(int t, struct bpf_verifier_env *env)
 				 */
 				mark_force_checkpoint(env, t);
 			}
+			/* Same as helpers, if called in a non-sleepable context
+			 * program will be rejected anyway, so we should end up
+			 * with precise sleepable marks on subprogs, except for
+			 * dead code elimination.
+			 */
+			if (ret == 0 && is_kfunc_sleepable(&meta))
+				mark_subprog_sleepable(env, t);
 		}
 		return visit_func_call_insn(t, insns, env, insn->src_reg == BPF_PSEUDO_CALL);