diff mbox series

[v2,bpf-next,3/7] bpf: Add per-program recursion prevention mechanism

Message ID 20210206170344.78399-4-alexei.starovoitov@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: Misc improvements | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 16 maintainers not CCed: songliubraving@fb.com kafai@fb.com john.fastabend@gmail.com yhs@fb.com linux-kselftest@vger.kernel.org ast@kernel.org mingo@redhat.com x86@kernel.org netdev@vger.kernel.org kpsingh@kernel.org andrii@kernel.org yoshfuji@linux-ipv6.org tglx@linutronix.de bp@alien8.de hpa@zytor.com shuah@kernel.org
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 12175 this patch: 12175
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning CHECK: No space is necessary after a cast CHECK: Unnecessary parentheses around 'prog->active' CHECK: Unnecessary parentheses around prog->active WARNING: line length of 81 exceeds 80 columns
netdev/build_allmodconfig_warn success Errors and warnings before: 12823 this patch: 12823
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Alexei Starovoitov Feb. 6, 2021, 5:03 p.m. UTC
From: Alexei Starovoitov <ast@kernel.org>

Since both sleepable and non-sleepable programs execute under migrate_disable
add recursion prevention mechanism to both types of programs when they're
executed via bpf trampoline.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 arch/x86/net/bpf_jit_comp.c                   | 15 +++++++++++++
 include/linux/bpf.h                           |  6 ++---
 include/linux/filter.h                        |  1 +
 kernel/bpf/core.c                             |  8 +++++++
 kernel/bpf/trampoline.c                       | 22 ++++++++++++++-----
 .../selftests/bpf/prog_tests/fexit_stress.c   |  2 +-
 .../bpf/prog_tests/trampoline_count.c         |  4 ++--
 7 files changed, 47 insertions(+), 11 deletions(-)

Comments

Andrii Nakryiko Feb. 8, 2021, 8:51 p.m. UTC | #1
On Sat, Feb 6, 2021 at 9:05 AM Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> From: Alexei Starovoitov <ast@kernel.org>
>
> Since both sleepable and non-sleepable programs execute under migrate_disable
> add recursion prevention mechanism to both types of programs when they're
> executed via bpf trampoline.
>
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
> ---
>  arch/x86/net/bpf_jit_comp.c                   | 15 +++++++++++++
>  include/linux/bpf.h                           |  6 ++---
>  include/linux/filter.h                        |  1 +
>  kernel/bpf/core.c                             |  8 +++++++
>  kernel/bpf/trampoline.c                       | 22 ++++++++++++++-----
>  .../selftests/bpf/prog_tests/fexit_stress.c   |  2 +-
>  .../bpf/prog_tests/trampoline_count.c         |  4 ++--
>  7 files changed, 47 insertions(+), 11 deletions(-)
>

[...]

> diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
> index b1f567514b7e..226f613ab289 100644
> --- a/kernel/bpf/trampoline.c
> +++ b/kernel/bpf/trampoline.c
> @@ -388,16 +388,21 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
>   * call prog->bpf_func
>   * call __bpf_prog_exit
>   */
> -#define NO_START_TIME 0
> -u64 notrace __bpf_prog_enter(void)
> +#define NO_START_TIME 1
> +u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
>         __acquires(RCU)
>  {
>         u64 start = NO_START_TIME;
>
>         rcu_read_lock();
>         migrate_disable();
> -       if (static_branch_unlikely(&bpf_stats_enabled_key))
> +       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
> +               return 0;
> +       if (static_branch_unlikely(&bpf_stats_enabled_key)) {
>                 start = sched_clock();
> +               if (unlikely(!start))
> +                       start = NO_START_TIME;
> +       }
>         return start;
>  }
>
> @@ -425,25 +430,32 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
>         __releases(RCU)
>  {
>         update_prog_stats(prog, start);
> +       __this_cpu_dec(*(prog->active));
>         migrate_enable();
>         rcu_read_unlock();
>  }
>
> -u64 notrace __bpf_prog_enter_sleepable(void)
> +u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
>  {
>         u64 start = NO_START_TIME;
>
>         rcu_read_lock_trace();
>         migrate_disable();
>         might_fault();
> -       if (static_branch_unlikely(&bpf_stats_enabled_key))
> +       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
> +               return 0;
> +       if (static_branch_unlikely(&bpf_stats_enabled_key)) {
>                 start = sched_clock();
> +               if (unlikely(!start))
> +                       start = NO_START_TIME;
> +       }
>         return start;


maybe extract this piece into a function, so that enter functions
would look like:

...
if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
         return 0;
return bpf_prog_start_time();

no need for u64 start initialization, more linear code, and no
duplication of logic?

Oh, and actually, given you have `start > NO_START_TIME` condition in
exit function, you don't need this `if (unlikely(!start))` bit at all,
because you are going to ignore both 0 and 1. So maybe no need for a
new function, but no need for extra if as well.

>  }
>
>  void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
>  {
>         update_prog_stats(prog, start);
> +       __this_cpu_dec(*(prog->active));
>         migrate_enable();
>         rcu_read_unlock_trace();
>  }
> diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
> index 3b9dbf7433f0..4698b0d2de36 100644
> --- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
> +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
> @@ -3,7 +3,7 @@
>  #include <test_progs.h>
>
>  /* x86-64 fits 55 JITed and 43 interpreted progs into half page */

Probably the comment is a bit outdated now forcing you to decrease CNT?

> -#define CNT 40
> +#define CNT 38
>
>  void test_fexit_stress(void)
>  {
> diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
> index 781c8d11604b..f3022d934e2d 100644
> --- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
> +++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
> @@ -4,7 +4,7 @@
>  #include <sys/prctl.h>
>  #include <test_progs.h>
>
> -#define MAX_TRAMP_PROGS 40
> +#define MAX_TRAMP_PROGS 38
>
>  struct inst {
>         struct bpf_object *obj;
> @@ -52,7 +52,7 @@ void test_trampoline_count(void)
>         struct bpf_link *link;
>         char comm[16] = {};
>
> -       /* attach 'allowed' 40 trampoline programs */
> +       /* attach 'allowed' trampoline programs */
>         for (i = 0; i < MAX_TRAMP_PROGS; i++) {
>                 obj = bpf_object__open_file(object, NULL);
>                 if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {
> --
> 2.24.1
>
Alexei Starovoitov Feb. 9, 2021, 7:06 p.m. UTC | #2
On 2/8/21 12:51 PM, Andrii Nakryiko wrote:
>>                  start = sched_clock();
>> +               if (unlikely(!start))
>> +                       start = NO_START_TIME;
>> +       }
>>          return start;
> 
> 
> Oh, and actually, given you have `start > NO_START_TIME` condition in
> exit function, you don't need this `if (unlikely(!start))` bit at all,
> because you are going to ignore both 0 and 1. So maybe no need for a
> new function, but no need for extra if as well.

This unlikely(!start) is needed for very unlikely case when
sched_clock() returns 0. In such case the prog should still be executed.
Andrii Nakryiko Feb. 9, 2021, 7:15 p.m. UTC | #3
On Tue, Feb 9, 2021 at 11:06 AM Alexei Starovoitov <ast@fb.com> wrote:
>
> On 2/8/21 12:51 PM, Andrii Nakryiko wrote:
> >>                  start = sched_clock();
> >> +               if (unlikely(!start))
> >> +                       start = NO_START_TIME;
> >> +       }
> >>          return start;
> >
> >
> > Oh, and actually, given you have `start > NO_START_TIME` condition in
> > exit function, you don't need this `if (unlikely(!start))` bit at all,
> > because you are going to ignore both 0 and 1. So maybe no need for a
> > new function, but no need for extra if as well.
>
> This unlikely(!start) is needed for very unlikely case when
> sched_clock() returns 0. In such case the prog should still be executed.
>

oh, right, I forgot we now skip execution when start == 0. Then I
guess the point of a helper function stands.
>
>
diff mbox series

Patch

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index d11b9bcebbea..79e7a0ec1da5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1740,8 +1740,11 @@  static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
 			   struct bpf_prog *p, int stack_size, bool mod_ret)
 {
 	u8 *prog = *pprog;
+	u8 *jmp_insn;
 	int cnt = 0;
 
+	/* arg1: mov rdi, progs[i] */
+	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
 	if (emit_call(&prog,
 		      p->aux->sleepable ? __bpf_prog_enter_sleepable :
 		      __bpf_prog_enter, prog))
@@ -1749,6 +1752,14 @@  static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
 	/* remember prog start time returned by __bpf_prog_enter */
 	emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
 
+	/* if (__bpf_prog_enter*(prog) == 0)
+	 *	goto skip_exec_of_prog;
+	 */
+	EMIT3(0x48, 0x85, 0xC0);  /* test rax,rax */
+	/* emit 2 nops that will be replaced with JE insn */
+	jmp_insn = prog;
+	emit_nops(&prog, 2);
+
 	/* arg1: lea rdi, [rbp - stack_size] */
 	EMIT4(0x48, 0x8D, 0x7D, -stack_size);
 	/* arg2: progs[i]->insnsi for interpreter */
@@ -1767,6 +1778,10 @@  static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
 	if (mod_ret)
 		emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
 
+	/* replace 2 nops with JE insn, since jmp target is known */
+	jmp_insn[0] = X86_JE;
+	jmp_insn[1] = prog - jmp_insn - 2;
+
 	/* arg1: mov rdi, progs[i] */
 	emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
 	/* arg2: mov rsi, rbx <- start time in nsec */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 2fa48439ef31..6f019b06a2fd 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -529,7 +529,7 @@  struct btf_func_model {
 /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
  * bytes on x86.  Pick a number to fit into BPF_IMAGE_SIZE / 2
  */
-#define BPF_MAX_TRAMP_PROGS 40
+#define BPF_MAX_TRAMP_PROGS 38
 
 struct bpf_tramp_progs {
 	struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
@@ -561,9 +561,9 @@  int arch_prepare_bpf_trampoline(void *image, void *image_end,
 				struct bpf_tramp_progs *tprogs,
 				void *orig_call);
 /* these two functions are called from generated trampoline */
-u64 notrace __bpf_prog_enter(void);
+u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
-u64 notrace __bpf_prog_enter_sleepable(void);
+u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
 
 struct bpf_ksym {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c6592590a0b7..d6c740eac056 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -567,6 +567,7 @@  struct bpf_prog {
 	struct bpf_prog_aux	*aux;		/* Auxiliary fields */
 	struct sock_fprog_kern	*orig_prog;	/* Original BPF program */
 	struct bpf_prog_stats __percpu *stats;
+	int __percpu		*active;
 	unsigned int		(*bpf_func)(const void *ctx,
 					    const struct bpf_insn *insn);
 	/* Instructions for interpreter */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index fa3da4cda476..f4560dbe7f31 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -91,6 +91,12 @@  struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
 		vfree(fp);
 		return NULL;
 	}
+	fp->active = alloc_percpu_gfp(int, GFP_KERNEL_ACCOUNT | gfp_extra_flags);
+	if (!fp->active) {
+		vfree(fp);
+		kfree(aux);
+		return NULL;
+	}
 
 	fp->pages = size / PAGE_SIZE;
 	fp->aux = aux;
@@ -116,6 +122,7 @@  struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
 
 	prog->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
 	if (!prog->stats) {
+		free_percpu(prog->active);
 		kfree(prog->aux);
 		vfree(prog);
 		return NULL;
@@ -250,6 +257,7 @@  void __bpf_prog_free(struct bpf_prog *fp)
 		mutex_destroy(&fp->aux->used_maps_mutex);
 		mutex_destroy(&fp->aux->dst_mutex);
 		free_percpu(fp->stats);
+		free_percpu(fp->active);
 		kfree(fp->aux->poke_tab);
 		kfree(fp->aux);
 	}
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index b1f567514b7e..226f613ab289 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -388,16 +388,21 @@  void bpf_trampoline_put(struct bpf_trampoline *tr)
  * call prog->bpf_func
  * call __bpf_prog_exit
  */
-#define NO_START_TIME 0
-u64 notrace __bpf_prog_enter(void)
+#define NO_START_TIME 1
+u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
 	__acquires(RCU)
 {
 	u64 start = NO_START_TIME;
 
 	rcu_read_lock();
 	migrate_disable();
-	if (static_branch_unlikely(&bpf_stats_enabled_key))
+	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+		return 0;
+	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
 		start = sched_clock();
+		if (unlikely(!start))
+			start = NO_START_TIME;
+	}
 	return start;
 }
 
@@ -425,25 +430,32 @@  void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
 	__releases(RCU)
 {
 	update_prog_stats(prog, start);
+	__this_cpu_dec(*(prog->active));
 	migrate_enable();
 	rcu_read_unlock();
 }
 
-u64 notrace __bpf_prog_enter_sleepable(void)
+u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
 {
 	u64 start = NO_START_TIME;
 
 	rcu_read_lock_trace();
 	migrate_disable();
 	might_fault();
-	if (static_branch_unlikely(&bpf_stats_enabled_key))
+	if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+		return 0;
+	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
 		start = sched_clock();
+		if (unlikely(!start))
+			start = NO_START_TIME;
+	}
 	return start;
 }
 
 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
 {
 	update_prog_stats(prog, start);
+	__this_cpu_dec(*(prog->active));
 	migrate_enable();
 	rcu_read_unlock_trace();
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 3b9dbf7433f0..4698b0d2de36 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -3,7 +3,7 @@ 
 #include <test_progs.h>
 
 /* x86-64 fits 55 JITed and 43 interpreted progs into half page */
-#define CNT 40
+#define CNT 38
 
 void test_fexit_stress(void)
 {
diff --git a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
index 781c8d11604b..f3022d934e2d 100644
--- a/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
+++ b/tools/testing/selftests/bpf/prog_tests/trampoline_count.c
@@ -4,7 +4,7 @@ 
 #include <sys/prctl.h>
 #include <test_progs.h>
 
-#define MAX_TRAMP_PROGS 40
+#define MAX_TRAMP_PROGS 38
 
 struct inst {
 	struct bpf_object *obj;
@@ -52,7 +52,7 @@  void test_trampoline_count(void)
 	struct bpf_link *link;
 	char comm[16] = {};
 
-	/* attach 'allowed' 40 trampoline programs */
+	/* attach 'allowed' trampoline programs */
 	for (i = 0; i < MAX_TRAMP_PROGS; i++) {
 		obj = bpf_object__open_file(object, NULL);
 		if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) {