diff mbox series

[v2,bpf-next,01/14] bpf: refactor BPF_PROG_RUN into a function

Message ID 20210726161211.925206-2-andrii@kernel.org (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series BPF perf link and user-provided context value | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 9 maintainers not CCed: netdev@vger.kernel.org yhs@fb.com hawk@kernel.org kpsingh@kernel.org kafai@fb.com john.fastabend@gmail.com songliubraving@fb.com davem@davemloft.net kuba@kernel.org
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 3232 this patch: 3232
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning WARNING: line length of 85 exceeds 80 columns WARNING: line length of 93 exceeds 80 columns
netdev/build_allmodconfig_warn success Errors and warnings before: 3319 this patch: 3319
netdev/header_inline success Link

Commit Message

Andrii Nakryiko July 26, 2021, 4:11 p.m. UTC
Turn BPF_PROG_RUN into a proper always inlined function. No functional and
performance changes are intended, but it makes it much easier to understand
what's going on with how BPF programs are actually get executed. It's more
obvious what types and callbacks are expected. Also extra () around input
parameters can be dropped, as well as `__` variable prefixes intended to avoid
naming collisions, which makes the code simpler to read and write.

This refactoring also highlighted one possible issue. BPF_PROG_RUN is both
a macro and an enum value (BPF_PROG_RUN == BPF_PROG_TEST_RUN). Turning
BPF_PROG_RUN into a function causes naming conflict compilation error. So
rename BPF_PROG_RUN into lower-case bpf_prog_run(), similar to
bpf_prog_run_xdp(), bpf_prog_run_pin_on_cpu(), etc. To avoid unnecessary code
churn across many networking calls to BPF_PROG_RUN, #define BPF_PROG_RUN as an
alias to bpf_prog_run.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 include/linux/filter.h | 58 +++++++++++++++++++++++++++---------------
 1 file changed, 37 insertions(+), 21 deletions(-)

Comments

Yonghong Song July 29, 2021, 4:49 p.m. UTC | #1
On 7/26/21 9:11 AM, Andrii Nakryiko wrote:
> Turn BPF_PROG_RUN into a proper always inlined function. No functional and
> performance changes are intended, but it makes it much easier to understand
> what's going on with how BPF programs are actually get executed. It's more
> obvious what types and callbacks are expected. Also extra () around input
> parameters can be dropped, as well as `__` variable prefixes intended to avoid
> naming collisions, which makes the code simpler to read and write.
> 
> This refactoring also highlighted one possible issue. BPF_PROG_RUN is both
> a macro and an enum value (BPF_PROG_RUN == BPF_PROG_TEST_RUN). Turning
> BPF_PROG_RUN into a function causes naming conflict compilation error. So
> rename BPF_PROG_RUN into lower-case bpf_prog_run(), similar to
> bpf_prog_run_xdp(), bpf_prog_run_pin_on_cpu(), etc. To avoid unnecessary code
> churn across many networking calls to BPF_PROG_RUN, #define BPF_PROG_RUN as an
> alias to bpf_prog_run.
> 
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---
>   include/linux/filter.h | 58 +++++++++++++++++++++++++++---------------
>   1 file changed, 37 insertions(+), 21 deletions(-)
> 
> diff --git a/include/linux/filter.h b/include/linux/filter.h
> index ba36989f711a..e59c97c72233 100644
> --- a/include/linux/filter.h
> +++ b/include/linux/filter.h
> @@ -585,25 +585,41 @@ struct sk_filter {
>   
>   DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
>   
> -#define __BPF_PROG_RUN(prog, ctx, dfunc)	({			\
> -	u32 __ret;							\
> -	cant_migrate();							\
> -	if (static_branch_unlikely(&bpf_stats_enabled_key)) {		\
> -		struct bpf_prog_stats *__stats;				\
> -		u64 __start = sched_clock();				\
> -		__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);	\
> -		__stats = this_cpu_ptr(prog->stats);			\
> -		u64_stats_update_begin(&__stats->syncp);		\
> -		__stats->cnt++;						\
> -		__stats->nsecs += sched_clock() - __start;		\
> -		u64_stats_update_end(&__stats->syncp);			\
> -	} else {							\
> -		__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);	\
> -	}								\
> -	__ret; })
> -
> -#define BPF_PROG_RUN(prog, ctx)						\
> -	__BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
> +typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
> +					  const struct bpf_insn *insnsi,
> +					  unsigned int (*bpf_func)(const void *,
> +								   const struct bpf_insn *));
> +
> +static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
> +					  const void *ctx,
> +					  bpf_dispatcher_fn dfunc)
> +{
> +	u32 ret;
> +
> +	cant_migrate();
> +	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
> +		struct bpf_prog_stats *stats;
> +		u64 start = sched_clock();
> +
> +		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
> +		stats = this_cpu_ptr(prog->stats);
> +		u64_stats_update_begin(&stats->syncp);
> +		stats->cnt++;
> +		stats->nsecs += sched_clock() - start;
> +		u64_stats_update_end(&stats->syncp);
> +	} else {
> +		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
> +	}
> +	return ret;
> +}
> +
> +static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
> +{
> +	return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
> +}
> +
> +/* avoids name conflict with BPF_PROG_RUN enum definedi uapi/linux/bpf.h */
> +#define BPF_PROG_RUN bpf_prog_run
>   
>   /*
>    * Use in preemptible and therefore migratable context to make sure that
> @@ -622,7 +638,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
>   	u32 ret;
>   
>   	migrate_disable();
> -	ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
> +	ret = __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);

This can be replaced with bpf_prog_run(prog, ctx).

>   	migrate_enable();
>   	return ret;
>   }
> @@ -768,7 +784,7 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
>   	 * under local_bh_disable(), which provides the needed RCU protection
>   	 * for accessing map entries.
>   	 */
> -	return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
> +	return __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
>   }
>   
>   void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
>
Andrii Nakryiko July 30, 2021, 4:05 a.m. UTC | #2
On Thu, Jul 29, 2021 at 9:50 AM Yonghong Song <yhs@fb.com> wrote:
>
>
>
> On 7/26/21 9:11 AM, Andrii Nakryiko wrote:
> > Turn BPF_PROG_RUN into a proper always inlined function. No functional and
> > performance changes are intended, but it makes it much easier to understand
> > what's going on with how BPF programs are actually get executed. It's more
> > obvious what types and callbacks are expected. Also extra () around input
> > parameters can be dropped, as well as `__` variable prefixes intended to avoid
> > naming collisions, which makes the code simpler to read and write.
> >
> > This refactoring also highlighted one possible issue. BPF_PROG_RUN is both
> > a macro and an enum value (BPF_PROG_RUN == BPF_PROG_TEST_RUN). Turning
> > BPF_PROG_RUN into a function causes naming conflict compilation error. So
> > rename BPF_PROG_RUN into lower-case bpf_prog_run(), similar to
> > bpf_prog_run_xdp(), bpf_prog_run_pin_on_cpu(), etc. To avoid unnecessary code
> > churn across many networking calls to BPF_PROG_RUN, #define BPF_PROG_RUN as an
> > alias to bpf_prog_run.
> >
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > ---
> >   include/linux/filter.h | 58 +++++++++++++++++++++++++++---------------
> >   1 file changed, 37 insertions(+), 21 deletions(-)
> >
> > diff --git a/include/linux/filter.h b/include/linux/filter.h
> > index ba36989f711a..e59c97c72233 100644
> > --- a/include/linux/filter.h
> > +++ b/include/linux/filter.h
> > @@ -585,25 +585,41 @@ struct sk_filter {
> >
> >   DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
> >
> > -#define __BPF_PROG_RUN(prog, ctx, dfunc)     ({                      \
> > -     u32 __ret;                                                      \
> > -     cant_migrate();                                                 \
> > -     if (static_branch_unlikely(&bpf_stats_enabled_key)) {           \
> > -             struct bpf_prog_stats *__stats;                         \
> > -             u64 __start = sched_clock();                            \
> > -             __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);   \
> > -             __stats = this_cpu_ptr(prog->stats);                    \
> > -             u64_stats_update_begin(&__stats->syncp);                \
> > -             __stats->cnt++;                                         \
> > -             __stats->nsecs += sched_clock() - __start;              \
> > -             u64_stats_update_end(&__stats->syncp);                  \
> > -     } else {                                                        \
> > -             __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);   \
> > -     }                                                               \
> > -     __ret; })
> > -
> > -#define BPF_PROG_RUN(prog, ctx)                                              \
> > -     __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
> > +typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
> > +                                       const struct bpf_insn *insnsi,
> > +                                       unsigned int (*bpf_func)(const void *,
> > +                                                                const struct bpf_insn *));
> > +
> > +static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
> > +                                       const void *ctx,
> > +                                       bpf_dispatcher_fn dfunc)
> > +{
> > +     u32 ret;
> > +
> > +     cant_migrate();
> > +     if (static_branch_unlikely(&bpf_stats_enabled_key)) {
> > +             struct bpf_prog_stats *stats;
> > +             u64 start = sched_clock();
> > +
> > +             ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
> > +             stats = this_cpu_ptr(prog->stats);
> > +             u64_stats_update_begin(&stats->syncp);
> > +             stats->cnt++;
> > +             stats->nsecs += sched_clock() - start;
> > +             u64_stats_update_end(&stats->syncp);
> > +     } else {
> > +             ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
> > +     }
> > +     return ret;
> > +}
> > +
> > +static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
> > +{
> > +     return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
> > +}
> > +
> > +/* avoids name conflict with BPF_PROG_RUN enum definedi uapi/linux/bpf.h */
> > +#define BPF_PROG_RUN bpf_prog_run
> >
> >   /*
> >    * Use in preemptible and therefore migratable context to make sure that
> > @@ -622,7 +638,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
> >       u32 ret;
> >
> >       migrate_disable();
> > -     ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
> > +     ret = __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
>
> This can be replaced with bpf_prog_run(prog, ctx).
>

ok, sure

> >       migrate_enable();
> >       return ret;
> >   }
> > @@ -768,7 +784,7 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
> >        * under local_bh_disable(), which provides the needed RCU protection
> >        * for accessing map entries.
> >        */
> > -     return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
> > +     return __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
> >   }
> >
> >   void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
> >
diff mbox series

Patch

diff --git a/include/linux/filter.h b/include/linux/filter.h
index ba36989f711a..e59c97c72233 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -585,25 +585,41 @@  struct sk_filter {
 
 DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 
-#define __BPF_PROG_RUN(prog, ctx, dfunc)	({			\
-	u32 __ret;							\
-	cant_migrate();							\
-	if (static_branch_unlikely(&bpf_stats_enabled_key)) {		\
-		struct bpf_prog_stats *__stats;				\
-		u64 __start = sched_clock();				\
-		__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);	\
-		__stats = this_cpu_ptr(prog->stats);			\
-		u64_stats_update_begin(&__stats->syncp);		\
-		__stats->cnt++;						\
-		__stats->nsecs += sched_clock() - __start;		\
-		u64_stats_update_end(&__stats->syncp);			\
-	} else {							\
-		__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);	\
-	}								\
-	__ret; })
-
-#define BPF_PROG_RUN(prog, ctx)						\
-	__BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
+typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
+					  const struct bpf_insn *insnsi,
+					  unsigned int (*bpf_func)(const void *,
+								   const struct bpf_insn *));
+
+static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog,
+					  const void *ctx,
+					  bpf_dispatcher_fn dfunc)
+{
+	u32 ret;
+
+	cant_migrate();
+	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
+		struct bpf_prog_stats *stats;
+		u64 start = sched_clock();
+
+		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+		stats = this_cpu_ptr(prog->stats);
+		u64_stats_update_begin(&stats->syncp);
+		stats->cnt++;
+		stats->nsecs += sched_clock() - start;
+		u64_stats_update_end(&stats->syncp);
+	} else {
+		ret = dfunc(ctx, prog->insnsi, prog->bpf_func);
+	}
+	return ret;
+}
+
+static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx)
+{
+	return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
+}
+
+/* avoids name conflict with BPF_PROG_RUN enum definedi uapi/linux/bpf.h */
+#define BPF_PROG_RUN bpf_prog_run
 
 /*
  * Use in preemptible and therefore migratable context to make sure that
@@ -622,7 +638,7 @@  static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
 	u32 ret;
 
 	migrate_disable();
-	ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func);
+	ret = __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func);
 	migrate_enable();
 	return ret;
 }
@@ -768,7 +784,7 @@  static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
 	 * under local_bh_disable(), which provides the needed RCU protection
 	 * for accessing map entries.
 	 */
-	return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
+	return __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp));
 }
 
 void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);