diff mbox series

[bpf-next,2/3] bpf: Allow using bpf_sk_storage in FENTRY/FEXIT/RAW_TP

Message ID 20201106220803.3950648-1-kafai@fb.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series bpf: Enable bpf_sk_storage for FENTRY/FEXIT/RAW_TP | expand

Commit Message

Martin KaFai Lau Nov. 6, 2020, 10:08 p.m. UTC
This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
the bpf_sk_storage_(get|delete) helper, so those tracing programs
can access the sk's bpf_local_storage and the later selftest
will show some examples.

The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
cg sockops...etc which is running either in softirq or
task context.

This patch adds bpf_sk_storage_get_tracing_proto and
bpf_sk_storage_delete_tracing_proto.  They will check
in runtime that the helpers can only be called when serving
softirq or running in a task context.  That should enable
most common tracing use cases on sk.

During the load time, the new tracing_allowed() function
will ensure the tracing prog using the bpf_sk_storage_(get|delete)
helper is not tracing any *sk_storage*() function itself.
The sk is passed as "void *" when calling into bpf_local_storage.

Signed-off-by: Martin KaFai Lau <kafai@fb.com>
---
 include/net/bpf_sk_storage.h |  2 +
 kernel/trace/bpf_trace.c     |  5 +++
 net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
 3 files changed, 80 insertions(+)

Comments

Song Liu Nov. 6, 2020, 10:59 p.m. UTC | #1
> On Nov 6, 2020, at 2:08 PM, Martin KaFai Lau <kafai@fb.com> wrote:
> 
> This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> the bpf_sk_storage_(get|delete) helper, so those tracing programs
> can access the sk's bpf_local_storage and the later selftest
> will show some examples.
> 
> The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> cg sockops...etc which is running either in softirq or
> task context.
> 
> This patch adds bpf_sk_storage_get_tracing_proto and
> bpf_sk_storage_delete_tracing_proto.  They will check
> in runtime that the helpers can only be called when serving
> softirq or running in a task context.  That should enable
> most common tracing use cases on sk.
> 
> During the load time, the new tracing_allowed() function
> will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> helper is not tracing any *sk_storage*() function itself.
> The sk is passed as "void *" when calling into bpf_local_storage.
> 
> Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> ---
> include/net/bpf_sk_storage.h |  2 +
> kernel/trace/bpf_trace.c     |  5 +++
> net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> 3 files changed, 80 insertions(+)
> 
> diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
> index 3c516dd07caf..0e85713f56df 100644
> --- a/include/net/bpf_sk_storage.h
> +++ b/include/net/bpf_sk_storage.h
> @@ -20,6 +20,8 @@ void bpf_sk_storage_free(struct sock *sk);
> 
> extern const struct bpf_func_proto bpf_sk_storage_get_proto;
> extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
> +extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
> +extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
> 
> struct bpf_local_storage_elem;
> struct bpf_sk_storage_diag;
> diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> index e4515b0f62a8..cfce60ad1cb5 100644
> --- a/kernel/trace/bpf_trace.c
> +++ b/kernel/trace/bpf_trace.c
> @@ -16,6 +16,7 @@
> #include <linux/syscalls.h>
> #include <linux/error-injection.h>
> #include <linux/btf_ids.h>
> +#include <net/bpf_sk_storage.h>
> 
> #include <uapi/linux/bpf.h>
> #include <uapi/linux/btf.h>
> @@ -1735,6 +1736,10 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> 		return &bpf_skc_to_tcp_request_sock_proto;
> 	case BPF_FUNC_skc_to_udp6_sock:
> 		return &bpf_skc_to_udp6_sock_proto;
> +	case BPF_FUNC_sk_storage_get:
> +		return &bpf_sk_storage_get_tracing_proto;
> +	case BPF_FUNC_sk_storage_delete:
> +		return &bpf_sk_storage_delete_tracing_proto;
> #endif
> 	case BPF_FUNC_seq_printf:
> 		return prog->expected_attach_type == BPF_TRACE_ITER ?
> diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
> index 001eac65e40f..1a41c917e08d 100644
> --- a/net/core/bpf_sk_storage.c
> +++ b/net/core/bpf_sk_storage.c
> @@ -6,6 +6,7 @@
> #include <linux/types.h>
> #include <linux/spinlock.h>
> #include <linux/bpf.h>
> +#include <linux/btf.h>
> #include <linux/btf_ids.h>
> #include <linux/bpf_local_storage.h>
> #include <net/bpf_sk_storage.h>
> @@ -378,6 +379,78 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
> 	.arg2_type	= ARG_PTR_TO_BTF_ID_SOCK_COMMON,
> };
> 
> +static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
> +{
> +	const struct btf *btf_vmlinux;
> +	const struct btf_type *t;
> +	const char *tname;
> +	u32 btf_id;
> +
> +	if (prog->aux->dst_prog)
> +		return false;
> +
> +	/* Ensure the tracing program is not tracing
> +	 * any *sk_storage*() function and also
> +	 * use the bpf_sk_storage_(get|delete) helper.
> +	 */
> +	switch (prog->expected_attach_type) {
> +	case BPF_TRACE_RAW_TP:
> +		/* bpf_sk_storage has no trace point */
> +		return true;
> +	case BPF_TRACE_FENTRY:
> +	case BPF_TRACE_FEXIT:
> +		btf_vmlinux = bpf_get_btf_vmlinux();
> +		btf_id = prog->aux->attach_btf_id;
> +		t = btf_type_by_id(btf_vmlinux, btf_id);

What happens to fentry/fexit attach to other BPF programs? I guess
we should check for t == NULL?

Thanks,
Song

> +		tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> +		return !strstr(tname, "sk_storage");
> +	default:
> +		return false;
> +	}
> +
> +	return false;
> +}

[...]
Martin KaFai Lau Nov. 6, 2020, 11:18 p.m. UTC | #2
On Fri, Nov 06, 2020 at 02:59:14PM -0800, Song Liu wrote:
> 
> 
> > On Nov 6, 2020, at 2:08 PM, Martin KaFai Lau <kafai@fb.com> wrote:
> > 
> > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > can access the sk's bpf_local_storage and the later selftest
> > will show some examples.
> > 
> > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > cg sockops...etc which is running either in softirq or
> > task context.
> > 
> > This patch adds bpf_sk_storage_get_tracing_proto and
> > bpf_sk_storage_delete_tracing_proto.  They will check
> > in runtime that the helpers can only be called when serving
> > softirq or running in a task context.  That should enable
> > most common tracing use cases on sk.
> > 
> > During the load time, the new tracing_allowed() function
> > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > helper is not tracing any *sk_storage*() function itself.
> > The sk is passed as "void *" when calling into bpf_local_storage.
> > 
> > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > ---
> > include/net/bpf_sk_storage.h |  2 +
> > kernel/trace/bpf_trace.c     |  5 +++
> > net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> > 3 files changed, 80 insertions(+)
> > 
> > diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
> > index 3c516dd07caf..0e85713f56df 100644
> > --- a/include/net/bpf_sk_storage.h
> > +++ b/include/net/bpf_sk_storage.h
> > @@ -20,6 +20,8 @@ void bpf_sk_storage_free(struct sock *sk);
> > 
> > extern const struct bpf_func_proto bpf_sk_storage_get_proto;
> > extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
> > +extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
> > +extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
> > 
> > struct bpf_local_storage_elem;
> > struct bpf_sk_storage_diag;
> > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
> > index e4515b0f62a8..cfce60ad1cb5 100644
> > --- a/kernel/trace/bpf_trace.c
> > +++ b/kernel/trace/bpf_trace.c
> > @@ -16,6 +16,7 @@
> > #include <linux/syscalls.h>
> > #include <linux/error-injection.h>
> > #include <linux/btf_ids.h>
> > +#include <net/bpf_sk_storage.h>
> > 
> > #include <uapi/linux/bpf.h>
> > #include <uapi/linux/btf.h>
> > @@ -1735,6 +1736,10 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
> > 		return &bpf_skc_to_tcp_request_sock_proto;
> > 	case BPF_FUNC_skc_to_udp6_sock:
> > 		return &bpf_skc_to_udp6_sock_proto;
> > +	case BPF_FUNC_sk_storage_get:
> > +		return &bpf_sk_storage_get_tracing_proto;
> > +	case BPF_FUNC_sk_storage_delete:
> > +		return &bpf_sk_storage_delete_tracing_proto;
> > #endif
> > 	case BPF_FUNC_seq_printf:
> > 		return prog->expected_attach_type == BPF_TRACE_ITER ?
> > diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
> > index 001eac65e40f..1a41c917e08d 100644
> > --- a/net/core/bpf_sk_storage.c
> > +++ b/net/core/bpf_sk_storage.c
> > @@ -6,6 +6,7 @@
> > #include <linux/types.h>
> > #include <linux/spinlock.h>
> > #include <linux/bpf.h>
> > +#include <linux/btf.h>
> > #include <linux/btf_ids.h>
> > #include <linux/bpf_local_storage.h>
> > #include <net/bpf_sk_storage.h>
> > @@ -378,6 +379,78 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
> > 	.arg2_type	= ARG_PTR_TO_BTF_ID_SOCK_COMMON,
> > };
> > 
> > +static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
> > +{
> > +	const struct btf *btf_vmlinux;
> > +	const struct btf_type *t;
> > +	const char *tname;
> > +	u32 btf_id;
> > +
> > +	if (prog->aux->dst_prog)
> > +		return false;
> > +
> > +	/* Ensure the tracing program is not tracing
> > +	 * any *sk_storage*() function and also
> > +	 * use the bpf_sk_storage_(get|delete) helper.
> > +	 */
> > +	switch (prog->expected_attach_type) {
> > +	case BPF_TRACE_RAW_TP:
> > +		/* bpf_sk_storage has no trace point */
> > +		return true;
> > +	case BPF_TRACE_FENTRY:
> > +	case BPF_TRACE_FEXIT:
> > +		btf_vmlinux = bpf_get_btf_vmlinux();
> > +		btf_id = prog->aux->attach_btf_id;
> > +		t = btf_type_by_id(btf_vmlinux, btf_id);
> 
> What happens to fentry/fexit attach to other BPF programs? I guess
> we should check for t == NULL?
It does not support tracing BPF program and using bpf_sk_storage
at the same time for now, so there is a "if (prog->aux->dst_prog)" test earlier.
It could be extended to do it later as a follow up.
I missed to mention that in the commit message.  

"t" should not be NULL here when tracing a kernel function.
The verifier should have already checked it and ensured "t" is a FUNC.

> > +		tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > +		return !strstr(tname, "sk_storage");
> > +	default:
> > +		return false;
> > +	}
> > +
> > +	return false;
> > +}
> 
> [...]
> 
>
Song Liu Nov. 7, 2020, 12:20 a.m. UTC | #3
> On Nov 6, 2020, at 3:18 PM, Martin Lau <kafai@fb.com> wrote:
> 
> On Fri, Nov 06, 2020 at 02:59:14PM -0800, Song Liu wrote:
>> 
>> 
>>> On Nov 6, 2020, at 2:08 PM, Martin KaFai Lau <kafai@fb.com> wrote:
>>> 

[...]

>>> +static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
>>> +{
>>> +	const struct btf *btf_vmlinux;
>>> +	const struct btf_type *t;
>>> +	const char *tname;
>>> +	u32 btf_id;
>>> +
>>> +	if (prog->aux->dst_prog)
>>> +		return false;
>>> +
>>> +	/* Ensure the tracing program is not tracing
>>> +	 * any *sk_storage*() function and also
>>> +	 * use the bpf_sk_storage_(get|delete) helper.
>>> +	 */
>>> +	switch (prog->expected_attach_type) {
>>> +	case BPF_TRACE_RAW_TP:
>>> +		/* bpf_sk_storage has no trace point */
>>> +		return true;
>>> +	case BPF_TRACE_FENTRY:
>>> +	case BPF_TRACE_FEXIT:
>>> +		btf_vmlinux = bpf_get_btf_vmlinux();
>>> +		btf_id = prog->aux->attach_btf_id;
>>> +		t = btf_type_by_id(btf_vmlinux, btf_id);
>> 
>> What happens to fentry/fexit attach to other BPF programs? I guess
>> we should check for t == NULL?
> It does not support tracing BPF program and using bpf_sk_storage
> at the same time for now, so there is a "if (prog->aux->dst_prog)" test earlier.
> It could be extended to do it later as a follow up.
> I missed to mention that in the commit message.  
> 
> "t" should not be NULL here when tracing a kernel function.
> The verifier should have already checked it and ensured "t" is a FUNC.

Ah, I missed the dst_prog check. Thanks for the explanation. 

Acked-by: Song Liu <songliubraving@fb.com>
Andrii Nakryiko Nov. 7, 2020, 1:14 a.m. UTC | #4
On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
>
> This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> the bpf_sk_storage_(get|delete) helper, so those tracing programs
> can access the sk's bpf_local_storage and the later selftest
> will show some examples.
>
> The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> cg sockops...etc which is running either in softirq or
> task context.
>
> This patch adds bpf_sk_storage_get_tracing_proto and
> bpf_sk_storage_delete_tracing_proto.  They will check
> in runtime that the helpers can only be called when serving
> softirq or running in a task context.  That should enable
> most common tracing use cases on sk.
>
> During the load time, the new tracing_allowed() function
> will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> helper is not tracing any *sk_storage*() function itself.
> The sk is passed as "void *" when calling into bpf_local_storage.
>
> Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> ---
>  include/net/bpf_sk_storage.h |  2 +
>  kernel/trace/bpf_trace.c     |  5 +++
>  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
>  3 files changed, 80 insertions(+)
>

[...]

> +       switch (prog->expected_attach_type) {
> +       case BPF_TRACE_RAW_TP:
> +               /* bpf_sk_storage has no trace point */
> +               return true;
> +       case BPF_TRACE_FENTRY:
> +       case BPF_TRACE_FEXIT:
> +               btf_vmlinux = bpf_get_btf_vmlinux();
> +               btf_id = prog->aux->attach_btf_id;
> +               t = btf_type_by_id(btf_vmlinux, btf_id);
> +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> +               return !strstr(tname, "sk_storage");

I'm always feeling uneasy about substring checks... Also, KP just
fixed the issue with string-based checks for LSM. Can we use a
BTF_ID_SET of blacklisted functions instead?

> +       default:
> +               return false;
> +       }
> +
> +       return false;
> +}
> +

[...]
Martin KaFai Lau Nov. 7, 2020, 1:52 a.m. UTC | #5
On Fri, Nov 06, 2020 at 05:14:14PM -0800, Andrii Nakryiko wrote:
> On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
> >
> > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > can access the sk's bpf_local_storage and the later selftest
> > will show some examples.
> >
> > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > cg sockops...etc which is running either in softirq or
> > task context.
> >
> > This patch adds bpf_sk_storage_get_tracing_proto and
> > bpf_sk_storage_delete_tracing_proto.  They will check
> > in runtime that the helpers can only be called when serving
> > softirq or running in a task context.  That should enable
> > most common tracing use cases on sk.
> >
> > During the load time, the new tracing_allowed() function
> > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > helper is not tracing any *sk_storage*() function itself.
> > The sk is passed as "void *" when calling into bpf_local_storage.
> >
> > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > ---
> >  include/net/bpf_sk_storage.h |  2 +
> >  kernel/trace/bpf_trace.c     |  5 +++
> >  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> >  3 files changed, 80 insertions(+)
> >
> 
> [...]
> 
> > +       switch (prog->expected_attach_type) {
> > +       case BPF_TRACE_RAW_TP:
> > +               /* bpf_sk_storage has no trace point */
> > +               return true;
> > +       case BPF_TRACE_FENTRY:
> > +       case BPF_TRACE_FEXIT:
> > +               btf_vmlinux = bpf_get_btf_vmlinux();
> > +               btf_id = prog->aux->attach_btf_id;
> > +               t = btf_type_by_id(btf_vmlinux, btf_id);
> > +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > +               return !strstr(tname, "sk_storage");
> 
> I'm always feeling uneasy about substring checks... Also, KP just
> fixed the issue with string-based checks for LSM. Can we use a
> BTF_ID_SET of blacklisted functions instead?
KP one is different.  It accidentally whitelist-ed more than it should.

It is a blacklist here.  It is actually cleaner and safer to blacklist
all functions with "sk_storage" and too pessimistic is fine here.

> 
> > +       default:
> > +               return false;
> > +       }
> > +
> > +       return false;
> > +}
> > +
> 
> [...]
Andrii Nakryiko Nov. 9, 2020, 6:09 p.m. UTC | #6
On Fri, Nov 6, 2020 at 5:52 PM Martin KaFai Lau <kafai@fb.com> wrote:
>
> On Fri, Nov 06, 2020 at 05:14:14PM -0800, Andrii Nakryiko wrote:
> > On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > >
> > > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > > can access the sk's bpf_local_storage and the later selftest
> > > will show some examples.
> > >
> > > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > > cg sockops...etc which is running either in softirq or
> > > task context.
> > >
> > > This patch adds bpf_sk_storage_get_tracing_proto and
> > > bpf_sk_storage_delete_tracing_proto.  They will check
> > > in runtime that the helpers can only be called when serving
> > > softirq or running in a task context.  That should enable
> > > most common tracing use cases on sk.
> > >
> > > During the load time, the new tracing_allowed() function
> > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > helper is not tracing any *sk_storage*() function itself.
> > > The sk is passed as "void *" when calling into bpf_local_storage.
> > >
> > > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > > ---
> > >  include/net/bpf_sk_storage.h |  2 +
> > >  kernel/trace/bpf_trace.c     |  5 +++
> > >  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> > >  3 files changed, 80 insertions(+)
> > >
> >
> > [...]
> >
> > > +       switch (prog->expected_attach_type) {
> > > +       case BPF_TRACE_RAW_TP:
> > > +               /* bpf_sk_storage has no trace point */
> > > +               return true;
> > > +       case BPF_TRACE_FENTRY:
> > > +       case BPF_TRACE_FEXIT:
> > > +               btf_vmlinux = bpf_get_btf_vmlinux();
> > > +               btf_id = prog->aux->attach_btf_id;
> > > +               t = btf_type_by_id(btf_vmlinux, btf_id);
> > > +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > > +               return !strstr(tname, "sk_storage");
> >
> > I'm always feeling uneasy about substring checks... Also, KP just
> > fixed the issue with string-based checks for LSM. Can we use a
> > BTF_ID_SET of blacklisted functions instead?
> KP one is different.  It accidentally whitelist-ed more than it should.
>
> It is a blacklist here.  It is actually cleaner and safer to blacklist
> all functions with "sk_storage" and too pessimistic is fine here.

Fine for whom? Prefix check would be half-bad, but substring check is
horrible. Suddenly "task_storage" (and anything related) would be also
blacklisted. Let's do a prefix check at least.

>
> >
> > > +       default:
> > > +               return false;
> > > +       }
> > > +
> > > +       return false;
> > > +}
> > > +
> >
> > [...]
John Fastabend Nov. 9, 2020, 8:32 p.m. UTC | #7
Andrii Nakryiko wrote:
> On Fri, Nov 6, 2020 at 5:52 PM Martin KaFai Lau <kafai@fb.com> wrote:
> >
> > On Fri, Nov 06, 2020 at 05:14:14PM -0800, Andrii Nakryiko wrote:
> > > On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > >
> > > > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > > > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > > > can access the sk's bpf_local_storage and the later selftest
> > > > will show some examples.
> > > >
> > > > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > > > cg sockops...etc which is running either in softirq or
> > > > task context.
> > > >
> > > > This patch adds bpf_sk_storage_get_tracing_proto and
> > > > bpf_sk_storage_delete_tracing_proto.  They will check
> > > > in runtime that the helpers can only be called when serving
> > > > softirq or running in a task context.  That should enable
> > > > most common tracing use cases on sk.
> > > >
> > > > During the load time, the new tracing_allowed() function
> > > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > > helper is not tracing any *sk_storage*() function itself.
> > > > The sk is passed as "void *" when calling into bpf_local_storage.
> > > >
> > > > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > > > ---
> > > >  include/net/bpf_sk_storage.h |  2 +
> > > >  kernel/trace/bpf_trace.c     |  5 +++
> > > >  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> > > >  3 files changed, 80 insertions(+)
> > > >
> > >
> > > [...]
> > >
> > > > +       switch (prog->expected_attach_type) {
> > > > +       case BPF_TRACE_RAW_TP:
> > > > +               /* bpf_sk_storage has no trace point */
> > > > +               return true;
> > > > +       case BPF_TRACE_FENTRY:
> > > > +       case BPF_TRACE_FEXIT:
> > > > +               btf_vmlinux = bpf_get_btf_vmlinux();
> > > > +               btf_id = prog->aux->attach_btf_id;
> > > > +               t = btf_type_by_id(btf_vmlinux, btf_id);
> > > > +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > > > +               return !strstr(tname, "sk_storage");
> > >
> > > I'm always feeling uneasy about substring checks... Also, KP just
> > > fixed the issue with string-based checks for LSM. Can we use a
> > > BTF_ID_SET of blacklisted functions instead?
> > KP one is different.  It accidentally whitelist-ed more than it should.
> >
> > It is a blacklist here.  It is actually cleaner and safer to blacklist
> > all functions with "sk_storage" and too pessimistic is fine here.
> 
> Fine for whom? Prefix check would be half-bad, but substring check is
> horrible. Suddenly "task_storage" (and anything related) would be also
> blacklisted. Let's do a prefix check at least.
> 

Agree, prefix check sounds like a good idea. But, just doing a quick
grep seems like it will need at least bpf_sk_storage and sk_storage to
catch everything.
KP Singh Nov. 10, 2020, 10:01 p.m. UTC | #8
On Mon, Nov 9, 2020 at 9:32 PM John Fastabend <john.fastabend@gmail.com> wrote:
>
> Andrii Nakryiko wrote:
> > On Fri, Nov 6, 2020 at 5:52 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > >
> > > On Fri, Nov 06, 2020 at 05:14:14PM -0800, Andrii Nakryiko wrote:
> > > > On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > >
> > > > > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > > > > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > > > > can access the sk's bpf_local_storage and the later selftest
> > > > > will show some examples.
> > > > >
> > > > > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > > > > cg sockops...etc which is running either in softirq or
> > > > > task context.
> > > > >
> > > > > This patch adds bpf_sk_storage_get_tracing_proto and
> > > > > bpf_sk_storage_delete_tracing_proto.  They will check
> > > > > in runtime that the helpers can only be called when serving
> > > > > softirq or running in a task context.  That should enable
> > > > > most common tracing use cases on sk.
> > > > >
> > > > > During the load time, the new tracing_allowed() function
> > > > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > > > helper is not tracing any *sk_storage*() function itself.
> > > > > The sk is passed as "void *" when calling into bpf_local_storage.
> > > > >
> > > > > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > > > > ---
> > > > >  include/net/bpf_sk_storage.h |  2 +
> > > > >  kernel/trace/bpf_trace.c     |  5 +++
> > > > >  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> > > > >  3 files changed, 80 insertions(+)
> > > > >
> > > >
> > > > [...]
> > > >
> > > > > +       switch (prog->expected_attach_type) {
> > > > > +       case BPF_TRACE_RAW_TP:
> > > > > +               /* bpf_sk_storage has no trace point */
> > > > > +               return true;
> > > > > +       case BPF_TRACE_FENTRY:
> > > > > +       case BPF_TRACE_FEXIT:
> > > > > +               btf_vmlinux = bpf_get_btf_vmlinux();
> > > > > +               btf_id = prog->aux->attach_btf_id;
> > > > > +               t = btf_type_by_id(btf_vmlinux, btf_id);
> > > > > +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > > > > +               return !strstr(tname, "sk_storage");
> > > >
> > > > I'm always feeling uneasy about substring checks... Also, KP just
> > > > fixed the issue with string-based checks for LSM. Can we use a
> > > > BTF_ID_SET of blacklisted functions instead?
> > > KP one is different.  It accidentally whitelist-ed more than it should.
> > >
> > > It is a blacklist here.  It is actually cleaner and safer to blacklist
> > > all functions with "sk_storage" and too pessimistic is fine here.
> >
> > Fine for whom? Prefix check would be half-bad, but substring check is
> > horrible. Suddenly "task_storage" (and anything related) would be also
> > blacklisted. Let's do a prefix check at least.
> >
>
> Agree, prefix check sounds like a good idea. But, just doing a quick
> grep seems like it will need at least bpf_sk_storage and sk_storage to
> catch everything.

Is there any reason we are not using BTF ID sets and an allow list similar
to bpf_d_path helper? (apart from the obvious inconvenience of
needing to update the set in the kernel)
Martin KaFai Lau Nov. 10, 2020, 11:43 p.m. UTC | #9
On Tue, Nov 10, 2020 at 11:01:12PM +0100, KP Singh wrote:
> On Mon, Nov 9, 2020 at 9:32 PM John Fastabend <john.fastabend@gmail.com> wrote:
> >
> > Andrii Nakryiko wrote:
> > > On Fri, Nov 6, 2020 at 5:52 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > >
> > > > On Fri, Nov 06, 2020 at 05:14:14PM -0800, Andrii Nakryiko wrote:
> > > > > On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > > >
> > > > > > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > > > > > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > > > > > can access the sk's bpf_local_storage and the later selftest
> > > > > > will show some examples.
> > > > > >
> > > > > > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > > > > > cg sockops...etc which is running either in softirq or
> > > > > > task context.
> > > > > >
> > > > > > This patch adds bpf_sk_storage_get_tracing_proto and
> > > > > > bpf_sk_storage_delete_tracing_proto.  They will check
> > > > > > in runtime that the helpers can only be called when serving
> > > > > > softirq or running in a task context.  That should enable
> > > > > > most common tracing use cases on sk.
> > > > > >
> > > > > > During the load time, the new tracing_allowed() function
> > > > > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > > > > helper is not tracing any *sk_storage*() function itself.
> > > > > > The sk is passed as "void *" when calling into bpf_local_storage.
> > > > > >
> > > > > > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > > > > > ---
> > > > > >  include/net/bpf_sk_storage.h |  2 +
> > > > > >  kernel/trace/bpf_trace.c     |  5 +++
> > > > > >  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> > > > > >  3 files changed, 80 insertions(+)
> > > > > >
> > > > >
> > > > > [...]
> > > > >
> > > > > > +       switch (prog->expected_attach_type) {
> > > > > > +       case BPF_TRACE_RAW_TP:
> > > > > > +               /* bpf_sk_storage has no trace point */
> > > > > > +               return true;
> > > > > > +       case BPF_TRACE_FENTRY:
> > > > > > +       case BPF_TRACE_FEXIT:
> > > > > > +               btf_vmlinux = bpf_get_btf_vmlinux();
> > > > > > +               btf_id = prog->aux->attach_btf_id;
> > > > > > +               t = btf_type_by_id(btf_vmlinux, btf_id);
> > > > > > +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > > > > > +               return !strstr(tname, "sk_storage");
> > > > >
> > > > > I'm always feeling uneasy about substring checks... Also, KP just
> > > > > fixed the issue with string-based checks for LSM. Can we use a
> > > > > BTF_ID_SET of blacklisted functions instead?
> > > > KP one is different.  It accidentally whitelist-ed more than it should.
> > > >
> > > > It is a blacklist here.  It is actually cleaner and safer to blacklist
> > > > all functions with "sk_storage" and too pessimistic is fine here.
> > >
> > > Fine for whom? Prefix check would be half-bad, but substring check is
> > > horrible. Suddenly "task_storage" (and anything related) would be also
> > > blacklisted. Let's do a prefix check at least.
> > >
> >
> > Agree, prefix check sounds like a good idea. But, just doing a quick
> > grep seems like it will need at least bpf_sk_storage and sk_storage to
> > catch everything.
> 
> Is there any reason we are not using BTF ID sets and an allow list similar
> to bpf_d_path helper? (apart from the obvious inconvenience of
> needing to update the set in the kernel)
It is a blacklist here, a small recap from commit message.

> During the load time, the new tracing_allowed() function
> will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> helper is not tracing any *sk_storage*() function itself.
> The sk is passed as "void *" when calling into bpf_local_storage.

Both BTF_ID and string-based (either prefix/substr) will work.

The intention is to first disallow a tracing program from tracing
any function in bpf_sk_storage.c and also calling the
bpf_sk_storage_(get|delete) helper at the same time.
This blacklist can be revisited later if there would
be a use case in some of the blacklist-ed
functions (which I doubt).

To use BTF_ID, it needs to consider about if the current (and future)
bpf_sk_storage function can be used in BTF_ID or not:
static, global/external, or inlined.

If BTF_ID is the best way for doing all black/white list, I don't mind
either.  I could force some to inline and we need to remember
to revisit the blacklist when the scope of fentry/fexit tracable
function changed, e.g. when static function becomes traceable
later.  The future changes to bpf_sk_storage.c will need to
adjust this list also.
Andrii Nakryiko Nov. 10, 2020, 11:53 p.m. UTC | #10
On Tue, Nov 10, 2020 at 3:43 PM Martin KaFai Lau <kafai@fb.com> wrote:
>
> On Tue, Nov 10, 2020 at 11:01:12PM +0100, KP Singh wrote:
> > On Mon, Nov 9, 2020 at 9:32 PM John Fastabend <john.fastabend@gmail.com> wrote:
> > >
> > > Andrii Nakryiko wrote:
> > > > On Fri, Nov 6, 2020 at 5:52 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > >
> > > > > On Fri, Nov 06, 2020 at 05:14:14PM -0800, Andrii Nakryiko wrote:
> > > > > > On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > > > >
> > > > > > > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > > > > > > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > > > > > > can access the sk's bpf_local_storage and the later selftest
> > > > > > > will show some examples.
> > > > > > >
> > > > > > > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > > > > > > cg sockops...etc which is running either in softirq or
> > > > > > > task context.
> > > > > > >
> > > > > > > This patch adds bpf_sk_storage_get_tracing_proto and
> > > > > > > bpf_sk_storage_delete_tracing_proto.  They will check
> > > > > > > in runtime that the helpers can only be called when serving
> > > > > > > softirq or running in a task context.  That should enable
> > > > > > > most common tracing use cases on sk.
> > > > > > >
> > > > > > > During the load time, the new tracing_allowed() function
> > > > > > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > > > > > helper is not tracing any *sk_storage*() function itself.
> > > > > > > The sk is passed as "void *" when calling into bpf_local_storage.
> > > > > > >
> > > > > > > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > > > > > > ---
> > > > > > >  include/net/bpf_sk_storage.h |  2 +
> > > > > > >  kernel/trace/bpf_trace.c     |  5 +++
> > > > > > >  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> > > > > > >  3 files changed, 80 insertions(+)
> > > > > > >
> > > > > >
> > > > > > [...]
> > > > > >
> > > > > > > +       switch (prog->expected_attach_type) {
> > > > > > > +       case BPF_TRACE_RAW_TP:
> > > > > > > +               /* bpf_sk_storage has no trace point */
> > > > > > > +               return true;
> > > > > > > +       case BPF_TRACE_FENTRY:
> > > > > > > +       case BPF_TRACE_FEXIT:
> > > > > > > +               btf_vmlinux = bpf_get_btf_vmlinux();
> > > > > > > +               btf_id = prog->aux->attach_btf_id;
> > > > > > > +               t = btf_type_by_id(btf_vmlinux, btf_id);
> > > > > > > +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > > > > > > +               return !strstr(tname, "sk_storage");
> > > > > >
> > > > > > I'm always feeling uneasy about substring checks... Also, KP just
> > > > > > fixed the issue with string-based checks for LSM. Can we use a
> > > > > > BTF_ID_SET of blacklisted functions instead?
> > > > > KP one is different.  It accidentally whitelist-ed more than it should.
> > > > >
> > > > > It is a blacklist here.  It is actually cleaner and safer to blacklist
> > > > > all functions with "sk_storage" and too pessimistic is fine here.
> > > >
> > > > Fine for whom? Prefix check would be half-bad, but substring check is
> > > > horrible. Suddenly "task_storage" (and anything related) would be also
> > > > blacklisted. Let's do a prefix check at least.
> > > >
> > >
> > > Agree, prefix check sounds like a good idea. But, just doing a quick
> > > grep seems like it will need at least bpf_sk_storage and sk_storage to
> > > catch everything.
> >
> > Is there any reason we are not using BTF ID sets and an allow list similar
> > to bpf_d_path helper? (apart from the obvious inconvenience of
> > needing to update the set in the kernel)
> It is a blacklist here, a small recap from commit message.
>
> > During the load time, the new tracing_allowed() function
> > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > helper is not tracing any *sk_storage*() function itself.
> > The sk is passed as "void *" when calling into bpf_local_storage.
>
> Both BTF_ID and string-based (either prefix/substr) will work.
>
> The intention is to first disallow a tracing program from tracing
> any function in bpf_sk_storage.c and also calling the
> bpf_sk_storage_(get|delete) helper at the same time.
> This blacklist can be revisited later if there would
> be a use case in some of the blacklist-ed
> functions (which I doubt).
>
> To use BTF_ID, it needs to consider about if the current (and future)
> bpf_sk_storage function can be used in BTF_ID or not:
> static, global/external, or inlined.
>
> If BTF_ID is the best way for doing all black/white list, I don't mind
> either.  I could force some to inline and we need to remember
> to revisit the blacklist when the scope of fentry/fexit tracable
> function changed, e.g. when static function becomes traceable

You can consider static functions traceable already. Arnaldo landed a
change a day or so ago in pahole that exposes static functions in BTF
and makes it possible to fentry/fexit attach them.

> later.  The future changes to bpf_sk_storage.c will need to
> adjust this list also.
Martin KaFai Lau Nov. 11, 2020, 12:07 a.m. UTC | #11
On Tue, Nov 10, 2020 at 03:53:13PM -0800, Andrii Nakryiko wrote:
> On Tue, Nov 10, 2020 at 3:43 PM Martin KaFai Lau <kafai@fb.com> wrote:
> >
> > On Tue, Nov 10, 2020 at 11:01:12PM +0100, KP Singh wrote:
> > > On Mon, Nov 9, 2020 at 9:32 PM John Fastabend <john.fastabend@gmail.com> wrote:
> > > >
> > > > Andrii Nakryiko wrote:
> > > > > On Fri, Nov 6, 2020 at 5:52 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > > >
> > > > > > On Fri, Nov 06, 2020 at 05:14:14PM -0800, Andrii Nakryiko wrote:
> > > > > > > On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > > > > >
> > > > > > > > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > > > > > > > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > > > > > > > can access the sk's bpf_local_storage and the later selftest
> > > > > > > > will show some examples.
> > > > > > > >
> > > > > > > > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > > > > > > > cg sockops...etc which is running either in softirq or
> > > > > > > > task context.
> > > > > > > >
> > > > > > > > This patch adds bpf_sk_storage_get_tracing_proto and
> > > > > > > > bpf_sk_storage_delete_tracing_proto.  They will check
> > > > > > > > in runtime that the helpers can only be called when serving
> > > > > > > > softirq or running in a task context.  That should enable
> > > > > > > > most common tracing use cases on sk.
> > > > > > > >
> > > > > > > > During the load time, the new tracing_allowed() function
> > > > > > > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > > > > > > helper is not tracing any *sk_storage*() function itself.
> > > > > > > > The sk is passed as "void *" when calling into bpf_local_storage.
> > > > > > > >
> > > > > > > > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > > > > > > > ---
> > > > > > > >  include/net/bpf_sk_storage.h |  2 +
> > > > > > > >  kernel/trace/bpf_trace.c     |  5 +++
> > > > > > > >  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> > > > > > > >  3 files changed, 80 insertions(+)
> > > > > > > >
> > > > > > >
> > > > > > > [...]
> > > > > > >
> > > > > > > > +       switch (prog->expected_attach_type) {
> > > > > > > > +       case BPF_TRACE_RAW_TP:
> > > > > > > > +               /* bpf_sk_storage has no trace point */
> > > > > > > > +               return true;
> > > > > > > > +       case BPF_TRACE_FENTRY:
> > > > > > > > +       case BPF_TRACE_FEXIT:
> > > > > > > > +               btf_vmlinux = bpf_get_btf_vmlinux();
> > > > > > > > +               btf_id = prog->aux->attach_btf_id;
> > > > > > > > +               t = btf_type_by_id(btf_vmlinux, btf_id);
> > > > > > > > +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > > > > > > > +               return !strstr(tname, "sk_storage");
> > > > > > >
> > > > > > > I'm always feeling uneasy about substring checks... Also, KP just
> > > > > > > fixed the issue with string-based checks for LSM. Can we use a
> > > > > > > BTF_ID_SET of blacklisted functions instead?
> > > > > > KP one is different.  It accidentally whitelist-ed more than it should.
> > > > > >
> > > > > > It is a blacklist here.  It is actually cleaner and safer to blacklist
> > > > > > all functions with "sk_storage" and too pessimistic is fine here.
> > > > >
> > > > > Fine for whom? Prefix check would be half-bad, but substring check is
> > > > > horrible. Suddenly "task_storage" (and anything related) would be also
> > > > > blacklisted. Let's do a prefix check at least.
> > > > >
> > > >
> > > > Agree, prefix check sounds like a good idea. But, just doing a quick
> > > > grep seems like it will need at least bpf_sk_storage and sk_storage to
> > > > catch everything.
> > >
> > > Is there any reason we are not using BTF ID sets and an allow list similar
> > > to bpf_d_path helper? (apart from the obvious inconvenience of
> > > needing to update the set in the kernel)
> > It is a blacklist here, a small recap from commit message.
> >
> > > During the load time, the new tracing_allowed() function
> > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > helper is not tracing any *sk_storage*() function itself.
> > > The sk is passed as "void *" when calling into bpf_local_storage.
> >
> > Both BTF_ID and string-based (either prefix/substr) will work.
> >
> > The intention is to first disallow a tracing program from tracing
> > any function in bpf_sk_storage.c and also calling the
> > bpf_sk_storage_(get|delete) helper at the same time.
> > This blacklist can be revisited later if there would
> > be a use case in some of the blacklist-ed
> > functions (which I doubt).
> >
> > To use BTF_ID, it needs to consider about if the current (and future)
> > bpf_sk_storage function can be used in BTF_ID or not:
> > static, global/external, or inlined.
> >
> > If BTF_ID is the best way for doing all black/white list, I don't mind
> > either.  I could force some to inline and we need to remember
> > to revisit the blacklist when the scope of fentry/fexit tracable
> > function changed, e.g. when static function becomes traceable
> 
> You can consider static functions traceable already. Arnaldo landed a
> change a day or so ago in pahole that exposes static functions in BTF
> and makes it possible to fentry/fexit attach them.
Good to know.

Is all static traceable (and can be used in BTF_ID)?
Andrii Nakryiko Nov. 11, 2020, 12:17 a.m. UTC | #12
On Tue, Nov 10, 2020 at 4:07 PM Martin KaFai Lau <kafai@fb.com> wrote:
>
> On Tue, Nov 10, 2020 at 03:53:13PM -0800, Andrii Nakryiko wrote:
> > On Tue, Nov 10, 2020 at 3:43 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > >
> > > On Tue, Nov 10, 2020 at 11:01:12PM +0100, KP Singh wrote:
> > > > On Mon, Nov 9, 2020 at 9:32 PM John Fastabend <john.fastabend@gmail.com> wrote:
> > > > >
> > > > > Andrii Nakryiko wrote:
> > > > > > On Fri, Nov 6, 2020 at 5:52 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > > > >
> > > > > > > On Fri, Nov 06, 2020 at 05:14:14PM -0800, Andrii Nakryiko wrote:
> > > > > > > > On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > > > > > >
> > > > > > > > > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > > > > > > > > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > > > > > > > > can access the sk's bpf_local_storage and the later selftest
> > > > > > > > > will show some examples.
> > > > > > > > >
> > > > > > > > > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > > > > > > > > cg sockops...etc which is running either in softirq or
> > > > > > > > > task context.
> > > > > > > > >
> > > > > > > > > This patch adds bpf_sk_storage_get_tracing_proto and
> > > > > > > > > bpf_sk_storage_delete_tracing_proto.  They will check
> > > > > > > > > in runtime that the helpers can only be called when serving
> > > > > > > > > softirq or running in a task context.  That should enable
> > > > > > > > > most common tracing use cases on sk.
> > > > > > > > >
> > > > > > > > > During the load time, the new tracing_allowed() function
> > > > > > > > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > > > > > > > helper is not tracing any *sk_storage*() function itself.
> > > > > > > > > The sk is passed as "void *" when calling into bpf_local_storage.
> > > > > > > > >
> > > > > > > > > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > > > > > > > > ---
> > > > > > > > >  include/net/bpf_sk_storage.h |  2 +
> > > > > > > > >  kernel/trace/bpf_trace.c     |  5 +++
> > > > > > > > >  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> > > > > > > > >  3 files changed, 80 insertions(+)
> > > > > > > > >
> > > > > > > >
> > > > > > > > [...]
> > > > > > > >
> > > > > > > > > +       switch (prog->expected_attach_type) {
> > > > > > > > > +       case BPF_TRACE_RAW_TP:
> > > > > > > > > +               /* bpf_sk_storage has no trace point */
> > > > > > > > > +               return true;
> > > > > > > > > +       case BPF_TRACE_FENTRY:
> > > > > > > > > +       case BPF_TRACE_FEXIT:
> > > > > > > > > +               btf_vmlinux = bpf_get_btf_vmlinux();
> > > > > > > > > +               btf_id = prog->aux->attach_btf_id;
> > > > > > > > > +               t = btf_type_by_id(btf_vmlinux, btf_id);
> > > > > > > > > +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > > > > > > > > +               return !strstr(tname, "sk_storage");
> > > > > > > >
> > > > > > > > I'm always feeling uneasy about substring checks... Also, KP just
> > > > > > > > fixed the issue with string-based checks for LSM. Can we use a
> > > > > > > > BTF_ID_SET of blacklisted functions instead?
> > > > > > > KP one is different.  It accidentally whitelist-ed more than it should.
> > > > > > >
> > > > > > > It is a blacklist here.  It is actually cleaner and safer to blacklist
> > > > > > > all functions with "sk_storage" and too pessimistic is fine here.
> > > > > >
> > > > > > Fine for whom? Prefix check would be half-bad, but substring check is
> > > > > > horrible. Suddenly "task_storage" (and anything related) would be also
> > > > > > blacklisted. Let's do a prefix check at least.
> > > > > >
> > > > >
> > > > > Agree, prefix check sounds like a good idea. But, just doing a quick
> > > > > grep seems like it will need at least bpf_sk_storage and sk_storage to
> > > > > catch everything.
> > > >
> > > > Is there any reason we are not using BTF ID sets and an allow list similar
> > > > to bpf_d_path helper? (apart from the obvious inconvenience of
> > > > needing to update the set in the kernel)
> > > It is a blacklist here, a small recap from commit message.
> > >
> > > > During the load time, the new tracing_allowed() function
> > > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > > helper is not tracing any *sk_storage*() function itself.
> > > > The sk is passed as "void *" when calling into bpf_local_storage.
> > >
> > > Both BTF_ID and string-based (either prefix/substr) will work.
> > >
> > > The intention is to first disallow a tracing program from tracing
> > > any function in bpf_sk_storage.c and also calling the
> > > bpf_sk_storage_(get|delete) helper at the same time.
> > > This blacklist can be revisited later if there would
> > > be a use case in some of the blacklist-ed
> > > functions (which I doubt).
> > >
> > > To use BTF_ID, it needs to consider about if the current (and future)
> > > bpf_sk_storage function can be used in BTF_ID or not:
> > > static, global/external, or inlined.
> > >
> > > If BTF_ID is the best way for doing all black/white list, I don't mind
> > > either.  I could force some to inline and we need to remember
> > > to revisit the blacklist when the scope of fentry/fexit tracable
> > > function changed, e.g. when static function becomes traceable
> >
> > You can consider static functions traceable already. Arnaldo landed a
> > change a day or so ago in pahole that exposes static functions in BTF
> > and makes it possible to fentry/fexit attach them.
> Good to know.
>
> Is all static traceable (and can be used in BTF_ID)?

Only those that end up not inlined, I think. Similarly as with
kprobes. pahole actually checks mcount section to keep only those that
are attachable with ftrace. See [0] for patches.

  [0] https://patchwork.kernel.org/project/netdevbpf/list/?series=379377&state=*
Martin KaFai Lau Nov. 11, 2020, 12:20 a.m. UTC | #13
On Tue, Nov 10, 2020 at 04:17:06PM -0800, Andrii Nakryiko wrote:
> On Tue, Nov 10, 2020 at 4:07 PM Martin KaFai Lau <kafai@fb.com> wrote:
> >
> > On Tue, Nov 10, 2020 at 03:53:13PM -0800, Andrii Nakryiko wrote:
> > > On Tue, Nov 10, 2020 at 3:43 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > >
> > > > On Tue, Nov 10, 2020 at 11:01:12PM +0100, KP Singh wrote:
> > > > > On Mon, Nov 9, 2020 at 9:32 PM John Fastabend <john.fastabend@gmail.com> wrote:
> > > > > >
> > > > > > Andrii Nakryiko wrote:
> > > > > > > On Fri, Nov 6, 2020 at 5:52 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > > > > >
> > > > > > > > On Fri, Nov 06, 2020 at 05:14:14PM -0800, Andrii Nakryiko wrote:
> > > > > > > > > On Fri, Nov 6, 2020 at 2:08 PM Martin KaFai Lau <kafai@fb.com> wrote:
> > > > > > > > > >
> > > > > > > > > > This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
> > > > > > > > > > the bpf_sk_storage_(get|delete) helper, so those tracing programs
> > > > > > > > > > can access the sk's bpf_local_storage and the later selftest
> > > > > > > > > > will show some examples.
> > > > > > > > > >
> > > > > > > > > > The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
> > > > > > > > > > cg sockops...etc which is running either in softirq or
> > > > > > > > > > task context.
> > > > > > > > > >
> > > > > > > > > > This patch adds bpf_sk_storage_get_tracing_proto and
> > > > > > > > > > bpf_sk_storage_delete_tracing_proto.  They will check
> > > > > > > > > > in runtime that the helpers can only be called when serving
> > > > > > > > > > softirq or running in a task context.  That should enable
> > > > > > > > > > most common tracing use cases on sk.
> > > > > > > > > >
> > > > > > > > > > During the load time, the new tracing_allowed() function
> > > > > > > > > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > > > > > > > > helper is not tracing any *sk_storage*() function itself.
> > > > > > > > > > The sk is passed as "void *" when calling into bpf_local_storage.
> > > > > > > > > >
> > > > > > > > > > Signed-off-by: Martin KaFai Lau <kafai@fb.com>
> > > > > > > > > > ---
> > > > > > > > > >  include/net/bpf_sk_storage.h |  2 +
> > > > > > > > > >  kernel/trace/bpf_trace.c     |  5 +++
> > > > > > > > > >  net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
> > > > > > > > > >  3 files changed, 80 insertions(+)
> > > > > > > > > >
> > > > > > > > >
> > > > > > > > > [...]
> > > > > > > > >
> > > > > > > > > > +       switch (prog->expected_attach_type) {
> > > > > > > > > > +       case BPF_TRACE_RAW_TP:
> > > > > > > > > > +               /* bpf_sk_storage has no trace point */
> > > > > > > > > > +               return true;
> > > > > > > > > > +       case BPF_TRACE_FENTRY:
> > > > > > > > > > +       case BPF_TRACE_FEXIT:
> > > > > > > > > > +               btf_vmlinux = bpf_get_btf_vmlinux();
> > > > > > > > > > +               btf_id = prog->aux->attach_btf_id;
> > > > > > > > > > +               t = btf_type_by_id(btf_vmlinux, btf_id);
> > > > > > > > > > +               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
> > > > > > > > > > +               return !strstr(tname, "sk_storage");
> > > > > > > > >
> > > > > > > > > I'm always feeling uneasy about substring checks... Also, KP just
> > > > > > > > > fixed the issue with string-based checks for LSM. Can we use a
> > > > > > > > > BTF_ID_SET of blacklisted functions instead?
> > > > > > > > KP one is different.  It accidentally whitelist-ed more than it should.
> > > > > > > >
> > > > > > > > It is a blacklist here.  It is actually cleaner and safer to blacklist
> > > > > > > > all functions with "sk_storage" and too pessimistic is fine here.
> > > > > > >
> > > > > > > Fine for whom? Prefix check would be half-bad, but substring check is
> > > > > > > horrible. Suddenly "task_storage" (and anything related) would be also
> > > > > > > blacklisted. Let's do a prefix check at least.
> > > > > > >
> > > > > >
> > > > > > Agree, prefix check sounds like a good idea. But, just doing a quick
> > > > > > grep seems like it will need at least bpf_sk_storage and sk_storage to
> > > > > > catch everything.
> > > > >
> > > > > Is there any reason we are not using BTF ID sets and an allow list similar
> > > > > to bpf_d_path helper? (apart from the obvious inconvenience of
> > > > > needing to update the set in the kernel)
> > > > It is a blacklist here, a small recap from commit message.
> > > >
> > > > > During the load time, the new tracing_allowed() function
> > > > > will ensure the tracing prog using the bpf_sk_storage_(get|delete)
> > > > > helper is not tracing any *sk_storage*() function itself.
> > > > > The sk is passed as "void *" when calling into bpf_local_storage.
> > > >
> > > > Both BTF_ID and string-based (either prefix/substr) will work.
> > > >
> > > > The intention is to first disallow a tracing program from tracing
> > > > any function in bpf_sk_storage.c and also calling the
> > > > bpf_sk_storage_(get|delete) helper at the same time.
> > > > This blacklist can be revisited later if there would
> > > > be a use case in some of the blacklist-ed
> > > > functions (which I doubt).
> > > >
> > > > To use BTF_ID, it needs to consider about if the current (and future)
> > > > bpf_sk_storage function can be used in BTF_ID or not:
> > > > static, global/external, or inlined.
> > > >
> > > > If BTF_ID is the best way for doing all black/white list, I don't mind
> > > > either.  I could force some to inline and we need to remember
> > > > to revisit the blacklist when the scope of fentry/fexit tracable
> > > > function changed, e.g. when static function becomes traceable
> > >
> > > You can consider static functions traceable already. Arnaldo landed a
> > > change a day or so ago in pahole that exposes static functions in BTF
> > > and makes it possible to fentry/fexit attach them.
> > Good to know.
> >
> > Is all static traceable (and can be used in BTF_ID)?
> 
> Only those that end up not inlined, I think. Similarly as with
> kprobes. pahole actually checks mcount section to keep only those that
> are attachable with ftrace. See [0] for patches.
> 
>   [0] https://patchwork.kernel.org/project/netdevbpf/list/?series=379377&state=*
I will go with the prefix then to avoid tagging functions with
inline/noinline.
diff mbox series

Patch

diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
index 3c516dd07caf..0e85713f56df 100644
--- a/include/net/bpf_sk_storage.h
+++ b/include/net/bpf_sk_storage.h
@@ -20,6 +20,8 @@  void bpf_sk_storage_free(struct sock *sk);
 
 extern const struct bpf_func_proto bpf_sk_storage_get_proto;
 extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
+extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
 
 struct bpf_local_storage_elem;
 struct bpf_sk_storage_diag;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index e4515b0f62a8..cfce60ad1cb5 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -16,6 +16,7 @@ 
 #include <linux/syscalls.h>
 #include <linux/error-injection.h>
 #include <linux/btf_ids.h>
+#include <net/bpf_sk_storage.h>
 
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/btf.h>
@@ -1735,6 +1736,10 @@  tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_skc_to_tcp_request_sock_proto;
 	case BPF_FUNC_skc_to_udp6_sock:
 		return &bpf_skc_to_udp6_sock_proto;
+	case BPF_FUNC_sk_storage_get:
+		return &bpf_sk_storage_get_tracing_proto;
+	case BPF_FUNC_sk_storage_delete:
+		return &bpf_sk_storage_delete_tracing_proto;
 #endif
 	case BPF_FUNC_seq_printf:
 		return prog->expected_attach_type == BPF_TRACE_ITER ?
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 001eac65e40f..1a41c917e08d 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -6,6 +6,7 @@ 
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <linux/btf_ids.h>
 #include <linux/bpf_local_storage.h>
 #include <net/bpf_sk_storage.h>
@@ -378,6 +379,78 @@  const struct bpf_func_proto bpf_sk_storage_delete_proto = {
 	.arg2_type	= ARG_PTR_TO_BTF_ID_SOCK_COMMON,
 };
 
+static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
+{
+	const struct btf *btf_vmlinux;
+	const struct btf_type *t;
+	const char *tname;
+	u32 btf_id;
+
+	if (prog->aux->dst_prog)
+		return false;
+
+	/* Ensure the tracing program is not tracing
+	 * any *sk_storage*() function and also
+	 * use the bpf_sk_storage_(get|delete) helper.
+	 */
+	switch (prog->expected_attach_type) {
+	case BPF_TRACE_RAW_TP:
+		/* bpf_sk_storage has no trace point */
+		return true;
+	case BPF_TRACE_FENTRY:
+	case BPF_TRACE_FEXIT:
+		btf_vmlinux = bpf_get_btf_vmlinux();
+		btf_id = prog->aux->attach_btf_id;
+		t = btf_type_by_id(btf_vmlinux, btf_id);
+		tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+		return !strstr(tname, "sk_storage");
+	default:
+		return false;
+	}
+
+	return false;
+}
+
+BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
+	   void *, value, u64, flags)
+{
+	if (!in_serving_softirq() && !in_task())
+		return (unsigned long)NULL;
+
+	return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags);
+}
+
+BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
+	   struct sock *, sk)
+{
+	if (!in_serving_softirq() && !in_task())
+		return -EPERM;
+
+	return ____bpf_sk_storage_delete(map, sk);
+}
+
+const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
+	.func		= bpf_sk_storage_get_tracing,
+	.gpl_only	= false,
+	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
+	.arg1_type	= ARG_CONST_MAP_PTR,
+	.arg2_type	= ARG_PTR_TO_BTF_ID,
+	.arg2_btf_id	= &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+	.arg3_type	= ARG_PTR_TO_MAP_VALUE_OR_NULL,
+	.arg4_type	= ARG_ANYTHING,
+	.allowed	= bpf_sk_storage_tracing_allowed,
+};
+
+const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
+	.func		= bpf_sk_storage_delete_tracing,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_CONST_MAP_PTR,
+	.arg2_type	= ARG_PTR_TO_BTF_ID,
+	.arg2_btf_id	= &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+	.allowed	= bpf_sk_storage_tracing_allowed,
+};
+
 struct bpf_sk_storage_diag {
 	u32 nr_maps;
 	struct bpf_map *maps[];