Message ID | 20210726161211.925206-7-andrii@kernel.org (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | BPF |
Headers | show |
Series | BPF perf link and user-provided context value | expand |
On 7/26/21 9:12 AM, Andrii Nakryiko wrote: > Add new BPF helper, bpf_get_user_ctx(), which can be used by BPF programs to > get access to the user_ctx value, specified during BPF program attachment (BPF > link creation) time. > > Currently all perf_event-backed BPF program types support bpf_get_user_ctx() > helper. Follow-up patches will add support for fentry/fexit programs as well. > > While at it, mark bpf_tracing_func_proto() as static to make it obvious that > it's only used from within the kernel/trace/bpf_trace.c. > > Cc: Peter Zijlstra <peterz@infradead.org> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org> > --- > include/linux/bpf.h | 3 --- > include/uapi/linux/bpf.h | 16 ++++++++++++++++ > kernel/trace/bpf_trace.c | 35 +++++++++++++++++++++++++++++++++- > tools/include/uapi/linux/bpf.h | 16 ++++++++++++++++ > 4 files changed, 66 insertions(+), 4 deletions(-) > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index 74b35faf0b73..94ebedc1e13a 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -2110,9 +2110,6 @@ extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; > extern const struct bpf_func_proto bpf_sk_setsockopt_proto; > extern const struct bpf_func_proto bpf_sk_getsockopt_proto; > > -const struct bpf_func_proto *bpf_tracing_func_proto( > - enum bpf_func_id func_id, const struct bpf_prog *prog); > - > const struct bpf_func_proto *tracing_prog_func_proto( > enum bpf_func_id func_id, const struct bpf_prog *prog); > > diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h > index bc1fd54a8f58..96afeced3467 100644 > --- a/include/uapi/linux/bpf.h > +++ b/include/uapi/linux/bpf.h > @@ -4856,6 +4856,21 @@ union bpf_attr { > * Get address of the traced function (for tracing and kprobe programs). > * Return > * Address of the traced function. > + * > + * u64 bpf_get_user_ctx(void *ctx) > + * Description > + * Get user_ctx value provided (optionally) during the program > + * attachment. It might be different for each individual > + * attachment, even if BPF program itself is the same. > + * Expects BPF program context *ctx* as a first argument. > + * > + * Supported for the following program types: > + * - kprobe/uprobe; > + * - tracepoint; > + * - perf_event. I think it is possible in the future we may need to support more program types with user_ctx, not just u64 but more than 64bit value. Should we may make this helper extensible like long bpf_get_user_ctx(void *ctx, void *user_ctx, u32 user_ctx_len) The return value will 0 to be good and a negative indicating an error. What do you think? > + * Return > + * Value specified by user at BPF link creation/attachment time > + * or 0, if it was not specified. > */ > #define __BPF_FUNC_MAPPER(FN) \ > FN(unspec), \ > @@ -5032,6 +5047,7 @@ union bpf_attr { > FN(timer_start), \ > FN(timer_cancel), \ > FN(get_func_ip), \ > + FN(get_user_ctx), \ > /* */ > > /* integer value in 'imm' field of BPF_CALL instruction selects which helper > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > index c9cf6a0d0fb3..b14978b3f6fb 100644 > --- a/kernel/trace/bpf_trace.c > +++ b/kernel/trace/bpf_trace.c > @@ -975,7 +975,34 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { > .arg1_type = ARG_PTR_TO_CTX, > }; > > -const struct bpf_func_proto * > +BPF_CALL_1(bpf_get_user_ctx_trace, void *, ctx) > +{ > + struct bpf_trace_run_ctx *run_ctx; > + > + run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); > + return run_ctx->user_ctx; > +} > + > +static const struct bpf_func_proto bpf_get_user_ctx_proto_trace = { > + .func = bpf_get_user_ctx_trace, > + .gpl_only = false, > + .ret_type = RET_INTEGER, > + .arg1_type = ARG_PTR_TO_CTX, > +}; > + [...]
On Thu, Jul 29, 2021 at 11:17 AM Yonghong Song <yhs@fb.com> wrote: > > > > On 7/26/21 9:12 AM, Andrii Nakryiko wrote: > > Add new BPF helper, bpf_get_user_ctx(), which can be used by BPF programs to > > get access to the user_ctx value, specified during BPF program attachment (BPF > > link creation) time. > > > > Currently all perf_event-backed BPF program types support bpf_get_user_ctx() > > helper. Follow-up patches will add support for fentry/fexit programs as well. > > > > While at it, mark bpf_tracing_func_proto() as static to make it obvious that > > it's only used from within the kernel/trace/bpf_trace.c. > > > > Cc: Peter Zijlstra <peterz@infradead.org> > > Signed-off-by: Andrii Nakryiko <andrii@kernel.org> > > --- > > include/linux/bpf.h | 3 --- > > include/uapi/linux/bpf.h | 16 ++++++++++++++++ > > kernel/trace/bpf_trace.c | 35 +++++++++++++++++++++++++++++++++- > > tools/include/uapi/linux/bpf.h | 16 ++++++++++++++++ > > 4 files changed, 66 insertions(+), 4 deletions(-) > > > > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > > index 74b35faf0b73..94ebedc1e13a 100644 > > --- a/include/linux/bpf.h > > +++ b/include/linux/bpf.h > > @@ -2110,9 +2110,6 @@ extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; > > extern const struct bpf_func_proto bpf_sk_setsockopt_proto; > > extern const struct bpf_func_proto bpf_sk_getsockopt_proto; > > > > -const struct bpf_func_proto *bpf_tracing_func_proto( > > - enum bpf_func_id func_id, const struct bpf_prog *prog); > > - > > const struct bpf_func_proto *tracing_prog_func_proto( > > enum bpf_func_id func_id, const struct bpf_prog *prog); > > > > diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h > > index bc1fd54a8f58..96afeced3467 100644 > > --- a/include/uapi/linux/bpf.h > > +++ b/include/uapi/linux/bpf.h > > @@ -4856,6 +4856,21 @@ union bpf_attr { > > * Get address of the traced function (for tracing and kprobe programs). > > * Return > > * Address of the traced function. > > + * > > + * u64 bpf_get_user_ctx(void *ctx) > > + * Description > > + * Get user_ctx value provided (optionally) during the program > > + * attachment. It might be different for each individual > > + * attachment, even if BPF program itself is the same. > > + * Expects BPF program context *ctx* as a first argument. > > + * > > + * Supported for the following program types: > > + * - kprobe/uprobe; > > + * - tracepoint; > > + * - perf_event. > > I think it is possible in the future we may need to support more > program types with user_ctx, not just u64 but more than 64bit value. > Should we may make this helper extensible like > long bpf_get_user_ctx(void *ctx, void *user_ctx, u32 user_ctx_len) > > The return value will 0 to be good and a negative indicating an error. > What do you think? I explicitly wanted to keep this user_ctx/bpf_cookie to a small fixed size. __u64 is perfect because it's small enough to not require dynamic memory allocation, but big enough to store any kind of index into an array *or* user-space pointer. So if user needs more storage than 8 bytes, they will be able to have a bigger array where user_ctx/bpf_cookie is just an integer index or some sort of key into hashmap, whichever is more convenient. So I'd like to keep it lean and simple. It is already powerful enough to support any scenario, IMO. > > > + * Return > > + * Value specified by user at BPF link creation/attachment time > > + * or 0, if it was not specified. > > */ > > #define __BPF_FUNC_MAPPER(FN) \ > > FN(unspec), \ > > @@ -5032,6 +5047,7 @@ union bpf_attr { > > FN(timer_start), \ > > FN(timer_cancel), \ > > FN(get_func_ip), \ > > + FN(get_user_ctx), \ > > /* */ > > > > /* integer value in 'imm' field of BPF_CALL instruction selects which helper > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > > index c9cf6a0d0fb3..b14978b3f6fb 100644 > > --- a/kernel/trace/bpf_trace.c > > +++ b/kernel/trace/bpf_trace.c > > @@ -975,7 +975,34 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { > > .arg1_type = ARG_PTR_TO_CTX, > > }; > > > > -const struct bpf_func_proto * > > +BPF_CALL_1(bpf_get_user_ctx_trace, void *, ctx) > > +{ > > + struct bpf_trace_run_ctx *run_ctx; > > + > > + run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); > > + return run_ctx->user_ctx; > > +} > > + > > +static const struct bpf_func_proto bpf_get_user_ctx_proto_trace = { > > + .func = bpf_get_user_ctx_trace, > > + .gpl_only = false, > > + .ret_type = RET_INTEGER, > > + .arg1_type = ARG_PTR_TO_CTX, > > +}; > > + > [...]
On 7/29/21 9:49 PM, Andrii Nakryiko wrote: > On Thu, Jul 29, 2021 at 11:17 AM Yonghong Song <yhs@fb.com> wrote: >> >> >> >> On 7/26/21 9:12 AM, Andrii Nakryiko wrote: >>> Add new BPF helper, bpf_get_user_ctx(), which can be used by BPF programs to >>> get access to the user_ctx value, specified during BPF program attachment (BPF >>> link creation) time. >>> >>> Currently all perf_event-backed BPF program types support bpf_get_user_ctx() >>> helper. Follow-up patches will add support for fentry/fexit programs as well. >>> >>> While at it, mark bpf_tracing_func_proto() as static to make it obvious that >>> it's only used from within the kernel/trace/bpf_trace.c. >>> >>> Cc: Peter Zijlstra <peterz@infradead.org> >>> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> >>> --- >>> include/linux/bpf.h | 3 --- >>> include/uapi/linux/bpf.h | 16 ++++++++++++++++ >>> kernel/trace/bpf_trace.c | 35 +++++++++++++++++++++++++++++++++- >>> tools/include/uapi/linux/bpf.h | 16 ++++++++++++++++ >>> 4 files changed, 66 insertions(+), 4 deletions(-) >>> >>> diff --git a/include/linux/bpf.h b/include/linux/bpf.h >>> index 74b35faf0b73..94ebedc1e13a 100644 >>> --- a/include/linux/bpf.h >>> +++ b/include/linux/bpf.h >>> @@ -2110,9 +2110,6 @@ extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; >>> extern const struct bpf_func_proto bpf_sk_setsockopt_proto; >>> extern const struct bpf_func_proto bpf_sk_getsockopt_proto; >>> >>> -const struct bpf_func_proto *bpf_tracing_func_proto( >>> - enum bpf_func_id func_id, const struct bpf_prog *prog); >>> - >>> const struct bpf_func_proto *tracing_prog_func_proto( >>> enum bpf_func_id func_id, const struct bpf_prog *prog); >>> >>> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h >>> index bc1fd54a8f58..96afeced3467 100644 >>> --- a/include/uapi/linux/bpf.h >>> +++ b/include/uapi/linux/bpf.h >>> @@ -4856,6 +4856,21 @@ union bpf_attr { >>> * Get address of the traced function (for tracing and kprobe programs). >>> * Return >>> * Address of the traced function. >>> + * >>> + * u64 bpf_get_user_ctx(void *ctx) >>> + * Description >>> + * Get user_ctx value provided (optionally) during the program >>> + * attachment. It might be different for each individual >>> + * attachment, even if BPF program itself is the same. >>> + * Expects BPF program context *ctx* as a first argument. >>> + * >>> + * Supported for the following program types: >>> + * - kprobe/uprobe; >>> + * - tracepoint; >>> + * - perf_event. >> >> I think it is possible in the future we may need to support more >> program types with user_ctx, not just u64 but more than 64bit value. >> Should we may make this helper extensible like >> long bpf_get_user_ctx(void *ctx, void *user_ctx, u32 user_ctx_len) >> >> The return value will 0 to be good and a negative indicating an error. >> What do you think? > > I explicitly wanted to keep this user_ctx/bpf_cookie to a small fixed > size. __u64 is perfect because it's small enough to not require > dynamic memory allocation, but big enough to store any kind of index > into an array *or* user-space pointer. So if user needs more storage > than 8 bytes, they will be able to have a bigger array where > user_ctx/bpf_cookie is just an integer index or some sort of key into > hashmap, whichever is more convenient. Okay. returning an index to a map is a good idea. This way, indeed a u64 return value is enough. > > So I'd like to keep it lean and simple. It is already powerful enough > to support any scenario, IMO. > >> >>> + * Return >>> + * Value specified by user at BPF link creation/attachment time >>> + * or 0, if it was not specified. >>> */ >>> #define __BPF_FUNC_MAPPER(FN) \ >>> FN(unspec), \ >>> @@ -5032,6 +5047,7 @@ union bpf_attr { >>> FN(timer_start), \ >>> FN(timer_cancel), \ >>> FN(get_func_ip), \ >>> + FN(get_user_ctx), \ >>> /* */ >>> >>> /* integer value in 'imm' field of BPF_CALL instruction selects which helper >>> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c >>> index c9cf6a0d0fb3..b14978b3f6fb 100644 >>> --- a/kernel/trace/bpf_trace.c >>> +++ b/kernel/trace/bpf_trace.c >>> @@ -975,7 +975,34 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { >>> .arg1_type = ARG_PTR_TO_CTX, >>> }; >>> >>> -const struct bpf_func_proto * >>> +BPF_CALL_1(bpf_get_user_ctx_trace, void *, ctx) >>> +{ >>> + struct bpf_trace_run_ctx *run_ctx; >>> + >>> + run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); >>> + return run_ctx->user_ctx; >>> +} >>> + >>> +static const struct bpf_func_proto bpf_get_user_ctx_proto_trace = { >>> + .func = bpf_get_user_ctx_trace, >>> + .gpl_only = false, >>> + .ret_type = RET_INTEGER, >>> + .arg1_type = ARG_PTR_TO_CTX, >>> +}; >>> + >> [...]
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 74b35faf0b73..94ebedc1e13a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2110,9 +2110,6 @@ extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; extern const struct bpf_func_proto bpf_sk_setsockopt_proto; extern const struct bpf_func_proto bpf_sk_getsockopt_proto; -const struct bpf_func_proto *bpf_tracing_func_proto( - enum bpf_func_id func_id, const struct bpf_prog *prog); - const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index bc1fd54a8f58..96afeced3467 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4856,6 +4856,21 @@ union bpf_attr { * Get address of the traced function (for tracing and kprobe programs). * Return * Address of the traced function. + * + * u64 bpf_get_user_ctx(void *ctx) + * Description + * Get user_ctx value provided (optionally) during the program + * attachment. It might be different for each individual + * attachment, even if BPF program itself is the same. + * Expects BPF program context *ctx* as a first argument. + * + * Supported for the following program types: + * - kprobe/uprobe; + * - tracepoint; + * - perf_event. + * Return + * Value specified by user at BPF link creation/attachment time + * or 0, if it was not specified. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5032,6 +5047,7 @@ union bpf_attr { FN(timer_start), \ FN(timer_cancel), \ FN(get_func_ip), \ + FN(get_user_ctx), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index c9cf6a0d0fb3..b14978b3f6fb 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -975,7 +975,34 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { .arg1_type = ARG_PTR_TO_CTX, }; -const struct bpf_func_proto * +BPF_CALL_1(bpf_get_user_ctx_trace, void *, ctx) +{ + struct bpf_trace_run_ctx *run_ctx; + + run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx); + return run_ctx->user_ctx; +} + +static const struct bpf_func_proto bpf_get_user_ctx_proto_trace = { + .func = bpf_get_user_ctx_trace, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +BPF_CALL_1(bpf_get_user_ctx_pe, struct bpf_perf_event_data_kern *, ctx) +{ + return ctx->event->user_ctx; +} + +static const struct bpf_func_proto bpf_get_user_ctx_proto_pe = { + .func = bpf_get_user_ctx_pe, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +}; + +static const struct bpf_func_proto * bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { @@ -1108,6 +1135,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) #endif case BPF_FUNC_get_func_ip: return &bpf_get_func_ip_proto_kprobe; + case BPF_FUNC_get_user_ctx: + return &bpf_get_user_ctx_proto_trace; default: return bpf_tracing_func_proto(func_id, prog); } @@ -1218,6 +1247,8 @@ tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_stackid_proto_tp; case BPF_FUNC_get_stack: return &bpf_get_stack_proto_tp; + case BPF_FUNC_get_user_ctx: + return &bpf_get_user_ctx_proto_trace; default: return bpf_tracing_func_proto(func_id, prog); } @@ -1325,6 +1356,8 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_perf_prog_read_value_proto; case BPF_FUNC_read_branch_records: return &bpf_read_branch_records_proto; + case BPF_FUNC_get_user_ctx: + return &bpf_get_user_ctx_proto_pe; default: return bpf_tracing_func_proto(func_id, prog); } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index bc1fd54a8f58..96afeced3467 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4856,6 +4856,21 @@ union bpf_attr { * Get address of the traced function (for tracing and kprobe programs). * Return * Address of the traced function. + * + * u64 bpf_get_user_ctx(void *ctx) + * Description + * Get user_ctx value provided (optionally) during the program + * attachment. It might be different for each individual + * attachment, even if BPF program itself is the same. + * Expects BPF program context *ctx* as a first argument. + * + * Supported for the following program types: + * - kprobe/uprobe; + * - tracepoint; + * - perf_event. + * Return + * Value specified by user at BPF link creation/attachment time + * or 0, if it was not specified. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5032,6 +5047,7 @@ union bpf_attr { FN(timer_start), \ FN(timer_cancel), \ FN(get_func_ip), \ + FN(get_user_ctx), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper
Add new BPF helper, bpf_get_user_ctx(), which can be used by BPF programs to get access to the user_ctx value, specified during BPF program attachment (BPF link creation) time. Currently all perf_event-backed BPF program types support bpf_get_user_ctx() helper. Follow-up patches will add support for fentry/fexit programs as well. While at it, mark bpf_tracing_func_proto() as static to make it obvious that it's only used from within the kernel/trace/bpf_trace.c. Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> --- include/linux/bpf.h | 3 --- include/uapi/linux/bpf.h | 16 ++++++++++++++++ kernel/trace/bpf_trace.c | 35 +++++++++++++++++++++++++++++++++- tools/include/uapi/linux/bpf.h | 16 ++++++++++++++++ 4 files changed, 66 insertions(+), 4 deletions(-)