diff mbox series

[bpf-next,v3,3/3] bpf: remove extra lock_sock for TCP_ZEROCOPY_RECEIVE

Message ID 20210105214350.138053-4-sdf@google.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: misc performance improvements for cgroup hooks | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 12 maintainers not CCed: kpsingh@kernel.org yhs@fb.com linux-kselftest@vger.kernel.org andrii@kernel.org kuba@kernel.org shuah@kernel.org john.fastabend@gmail.com iii@linux.ibm.com yoshfuji@linux-ipv6.org toke@redhat.com davem@davemloft.net kuznet@ms2.inr.ac.ru
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 12609 this patch: 12609
netdev/kdoc success Errors and warnings before: 3 this patch: 3
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 297 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 12948 this patch: 12948
netdev/header_inline success Link
netdev/stable success Stable not CCed

Commit Message

Stanislav Fomichev Jan. 5, 2021, 9:43 p.m. UTC
Add custom implementation of getsockopt hook for TCP_ZEROCOPY_RECEIVE.
We skip generic hooks for TCP_ZEROCOPY_RECEIVE and have a custom
call in do_tcp_getsockopt using the on-stack data. This removes
3% overhead for locking/unlocking the socket.

Also:
- Removed BUILD_BUG_ON (zerocopy doesn't depend on the buf size anymore)
- Separated on-stack buffer into bpf_sockopt_buf and downsized to 32 bytes
  (let's keep it to help with the other options)

(I can probably split this patch into two: add new features and rework
 bpf_sockopt_buf; can follow up if the approach in general sounds
 good).

Without this patch:
     3.29%     0.07%  tcp_mmap  [kernel.kallsyms]  [k] __cgroup_bpf_run_filter_getsockopt
            |
             --3.22%--__cgroup_bpf_run_filter_getsockopt
                       |
                       |--0.66%--lock_sock_nested
                       |
                       |--0.57%--__might_fault
                       |
                        --0.56%--release_sock

With the patch applied:
     0.42%     0.10%  tcp_mmap  [kernel.kallsyms]  [k] __cgroup_bpf_run_filter_getsockopt_kern
     0.02%     0.02%  tcp_mmap  [kernel.kallsyms]  [k] __cgroup_bpf_run_filter_getsockopt

Signed-off-by: Stanislav Fomichev <sdf@google.com>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Eric Dumazet <edumazet@google.com>
---
 include/linux/bpf-cgroup.h                    |  16 +++
 include/linux/filter.h                        |   6 +-
 kernel/bpf/cgroup.c                           | 103 +++++++++++++-----
 net/ipv4/tcp.c                                |   2 +
 .../selftests/bpf/prog_tests/sockopt_sk.c     |  22 ++++
 .../testing/selftests/bpf/progs/sockopt_sk.c  |  15 +++
 6 files changed, 135 insertions(+), 29 deletions(-)

Comments

Martin KaFai Lau Jan. 6, 2021, 7:47 p.m. UTC | #1
On Tue, Jan 05, 2021 at 01:43:50PM -0800, Stanislav Fomichev wrote:
> Add custom implementation of getsockopt hook for TCP_ZEROCOPY_RECEIVE.
> We skip generic hooks for TCP_ZEROCOPY_RECEIVE and have a custom
> call in do_tcp_getsockopt using the on-stack data. This removes
> 3% overhead for locking/unlocking the socket.
> 
> Also:
> - Removed BUILD_BUG_ON (zerocopy doesn't depend on the buf size anymore)
> - Separated on-stack buffer into bpf_sockopt_buf and downsized to 32 bytes
>   (let's keep it to help with the other options)
> 
> (I can probably split this patch into two: add new features and rework
>  bpf_sockopt_buf; can follow up if the approach in general sounds
>  good).
> 
> Without this patch:
>      3.29%     0.07%  tcp_mmap  [kernel.kallsyms]  [k] __cgroup_bpf_run_filter_getsockopt
>             |
>              --3.22%--__cgroup_bpf_run_filter_getsockopt
>                        |
>                        |--0.66%--lock_sock_nested
A general question for sockopt prog, why the BPF_CGROUP_(GET|SET)SOCKOPT prog
has to run under lock_sock()?

>                        |
>                        |--0.57%--__might_fault
>                        |
>                         --0.56%--release_sock
> 
> With the patch applied:
>      0.42%     0.10%  tcp_mmap  [kernel.kallsyms]  [k] __cgroup_bpf_run_filter_getsockopt_kern
>      0.02%     0.02%  tcp_mmap  [kernel.kallsyms]  [k] __cgroup_bpf_run_filter_getsockopt
> 
[ ... ]

> @@ -1445,15 +1442,29 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
>  				       int __user *optlen, int max_optlen,
>  				       int retval)
>  {
> -	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
> -	struct bpf_sockopt_kern ctx = {
> -		.sk = sk,
> -		.level = level,
> -		.optname = optname,
> -		.retval = retval,
> -	};
> +	struct bpf_sockopt_kern ctx;
> +	struct bpf_sockopt_buf buf;
> +	struct cgroup *cgrp;
>  	int ret;
>  
> +#ifdef CONFIG_INET
> +	/* TCP do_tcp_getsockopt has optimized getsockopt implementation
> +	 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
> +	 */
> +	if (sk->sk_prot->getsockopt == tcp_getsockopt &&
> +	    level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
> +		return retval;
> +#endif
That seems too much protocol details and not very scalable.
It is not very related to kernel/bpf/cgroup.c which has very little idea
whether a specific protocol has optimized things in some ways (e.g. by
directly calling cgroup's bpf prog at some strategic places in this patch).
Lets see if it can be done better.

At least, these protocol checks belong to the net's socket.c
more than the bpf's cgroup.c here.  If it also looks like layering
breakage in socket.c, may be adding a signal in sk_prot (for example)
to tell if the sk_prot->getsockopt has already called the cgroup's bpf
prog?  (e.g. tcp_getsockopt() can directly call the cgroup's bpf for all
optname instead of only TCP_ZEROCOPY_RECEIVE).

For example:

int __sys_getsockopt(...)
{
	/* ... */

	if (!sk_prot->bpf_getsockopt_handled)
		BPF_CGROUP_RUN_PROG_GETSOCKOPT(...);
}

Thoughts?

> +
> +	memset(&buf, 0, sizeof(buf));
> +	memset(&ctx, 0, sizeof(ctx));
> +
> +	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
> +	ctx.sk = sk;
> +	ctx.level = level;
> +	ctx.optname = optname;
> +	ctx.retval = retval;
> +
>  	/* Opportunistic check to see whether we have any BPF program
>  	 * attached to the hook so we don't waste time allocating
>  	 * memory and locking the socket.
Stanislav Fomichev Jan. 6, 2021, 10:45 p.m. UTC | #2
On 01/06, Martin KaFai Lau wrote:
> On Tue, Jan 05, 2021 at 01:43:50PM -0800, Stanislav Fomichev wrote:
> > Add custom implementation of getsockopt hook for TCP_ZEROCOPY_RECEIVE.
> > We skip generic hooks for TCP_ZEROCOPY_RECEIVE and have a custom
> > call in do_tcp_getsockopt using the on-stack data. This removes
> > 3% overhead for locking/unlocking the socket.
> >
> > Also:
> > - Removed BUILD_BUG_ON (zerocopy doesn't depend on the buf size anymore)
> > - Separated on-stack buffer into bpf_sockopt_buf and downsized to 32  
> bytes
> >   (let's keep it to help with the other options)
> >
> > (I can probably split this patch into two: add new features and rework
> >  bpf_sockopt_buf; can follow up if the approach in general sounds
> >  good).
> >
> > Without this patch:
> >      3.29%     0.07%  tcp_mmap  [kernel.kallsyms]  [k]  
> __cgroup_bpf_run_filter_getsockopt
> >             |
> >              --3.22%--__cgroup_bpf_run_filter_getsockopt
> >                        |
> >                        |--0.66%--lock_sock_nested
> A general question for sockopt prog, why the BPF_CGROUP_(GET|SET)SOCKOPT  
> prog
> has to run under lock_sock()?
I don't think there is a strong reason. We expose sk to the BPF program,
but mainly for the socket storage map (which, afaik, doesn't require
socket to be locked). OTOH, it seems that providing a consistent view
of the sk to the BPF is a good idea.

Eric has suggested to try to use fast socket lock. It helps a bit,
but it doesn't remove the issue completely because
we do a bunch of copy_{to,from}_user in the generic
__cgroup_bpf_run_filter_getsockopt as well :-(

> >                        |
> >                        |--0.57%--__might_fault
> >                        |
> >                         --0.56%--release_sock
> >
> > With the patch applied:
> >      0.42%     0.10%  tcp_mmap  [kernel.kallsyms]  [k]  
> __cgroup_bpf_run_filter_getsockopt_kern
> >      0.02%     0.02%  tcp_mmap  [kernel.kallsyms]  [k]  
> __cgroup_bpf_run_filter_getsockopt
> >
> [ ... ]

> > @@ -1445,15 +1442,29 @@ int __cgroup_bpf_run_filter_getsockopt(struct  
> sock *sk, int level,
> >  				       int __user *optlen, int max_optlen,
> >  				       int retval)
> >  {
> > -	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
> > -	struct bpf_sockopt_kern ctx = {
> > -		.sk = sk,
> > -		.level = level,
> > -		.optname = optname,
> > -		.retval = retval,
> > -	};
> > +	struct bpf_sockopt_kern ctx;
> > +	struct bpf_sockopt_buf buf;
> > +	struct cgroup *cgrp;
> >  	int ret;
> >
> > +#ifdef CONFIG_INET
> > +	/* TCP do_tcp_getsockopt has optimized getsockopt implementation
> > +	 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
> > +	 */
> > +	if (sk->sk_prot->getsockopt == tcp_getsockopt &&
> > +	    level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
> > +		return retval;
> > +#endif
> That seems too much protocol details and not very scalable.
> It is not very related to kernel/bpf/cgroup.c which has very little idea
> whether a specific protocol has optimized things in some ways (e.g. by
> directly calling cgroup's bpf prog at some strategic places in this  
> patch).
> Lets see if it can be done better.

> At least, these protocol checks belong to the net's socket.c
> more than the bpf's cgroup.c here.  If it also looks like layering
> breakage in socket.c, may be adding a signal in sk_prot (for example)
> to tell if the sk_prot->getsockopt has already called the cgroup's bpf
> prog?  (e.g. tcp_getsockopt() can directly call the cgroup's bpf for all
> optname instead of only TCP_ZEROCOPY_RECEIVE).

> For example:

> int __sys_getsockopt(...)
> {
> 	/* ... */

> 	if (!sk_prot->bpf_getsockopt_handled)
> 		BPF_CGROUP_RUN_PROG_GETSOCKOPT(...);
> }

> Thoughts?

Sounds good. I didn't go that far because I don't expect there to be
a lot of special cases like that. But it might be worth supporting
it in a generic way from the beginning.

I was thinking about something simpler:

int __cgroup_bpf_run_filter_getsockopt(sk, ...)
{
	if (sk->sk_prot->bypass_bpf_getsockopt(level, optlen)) {
		return retval;
	}

  	// ...
}

Not sure it's worth exposing it to the __sys_getsockopt. WDYT?
Martin KaFai Lau Jan. 7, 2021, 1:29 a.m. UTC | #3
On Wed, Jan 06, 2021 at 02:45:56PM -0800, sdf@google.com wrote:
> On 01/06, Martin KaFai Lau wrote:
> > On Tue, Jan 05, 2021 at 01:43:50PM -0800, Stanislav Fomichev wrote:
> > > Add custom implementation of getsockopt hook for TCP_ZEROCOPY_RECEIVE.
> > > We skip generic hooks for TCP_ZEROCOPY_RECEIVE and have a custom
> > > call in do_tcp_getsockopt using the on-stack data. This removes
> > > 3% overhead for locking/unlocking the socket.
> > >
> > > Also:
> > > - Removed BUILD_BUG_ON (zerocopy doesn't depend on the buf size anymore)
> > > - Separated on-stack buffer into bpf_sockopt_buf and downsized to 32
> > bytes
> > >   (let's keep it to help with the other options)
> > >
> > > (I can probably split this patch into two: add new features and rework
> > >  bpf_sockopt_buf; can follow up if the approach in general sounds
> > >  good).
> > >
> > > Without this patch:
> > >      3.29%     0.07%  tcp_mmap  [kernel.kallsyms]  [k]
> > __cgroup_bpf_run_filter_getsockopt
> > >             |
> > >              --3.22%--__cgroup_bpf_run_filter_getsockopt
> > >                        |
> > >                        |--0.66%--lock_sock_nested
> > A general question for sockopt prog, why the BPF_CGROUP_(GET|SET)SOCKOPT
> > prog
> > has to run under lock_sock()?
> I don't think there is a strong reason. We expose sk to the BPF program,
> but mainly for the socket storage map (which, afaik, doesn't require
> socket to be locked). OTOH, it seems that providing a consistent view
> of the sk to the BPF is a good idea.
hmm... most of the bpf prog also does not require a locked sock.  For
example, the __sk_buff->sk.  If a bpf prog needs a locked view of sk,
a more generic solution is desired.  Anyhow, I guess the train has sort
of sailed for sockopt bpf.

> 
> Eric has suggested to try to use fast socket lock. It helps a bit,
> but it doesn't remove the issue completely because
> we do a bunch of copy_{to,from}_user in the generic
> __cgroup_bpf_run_filter_getsockopt as well :-(
> 
> > >                        |
> > >                        |--0.57%--__might_fault
Is it a debug kernel?

> > >                        |
> > >                         --0.56%--release_sock
> > >
> > > With the patch applied:
> > >      0.42%     0.10%  tcp_mmap  [kernel.kallsyms]  [k]
> > __cgroup_bpf_run_filter_getsockopt_kern
> > >      0.02%     0.02%  tcp_mmap  [kernel.kallsyms]  [k]
> > __cgroup_bpf_run_filter_getsockopt
> > >
> > [ ... ]
> 
> > > @@ -1445,15 +1442,29 @@ int __cgroup_bpf_run_filter_getsockopt(struct
> > sock *sk, int level,
> > >  				       int __user *optlen, int max_optlen,
> > >  				       int retval)
> > >  {
> > > -	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
> > > -	struct bpf_sockopt_kern ctx = {
> > > -		.sk = sk,
> > > -		.level = level,
> > > -		.optname = optname,
> > > -		.retval = retval,
> > > -	};
> > > +	struct bpf_sockopt_kern ctx;
> > > +	struct bpf_sockopt_buf buf;
> > > +	struct cgroup *cgrp;
> > >  	int ret;
> > >
> > > +#ifdef CONFIG_INET
> > > +	/* TCP do_tcp_getsockopt has optimized getsockopt implementation
> > > +	 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
> > > +	 */
> > > +	if (sk->sk_prot->getsockopt == tcp_getsockopt &&
> > > +	    level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
> > > +		return retval;
> > > +#endif
> > That seems too much protocol details and not very scalable.
> > It is not very related to kernel/bpf/cgroup.c which has very little idea
> > whether a specific protocol has optimized things in some ways (e.g. by
> > directly calling cgroup's bpf prog at some strategic places in this
> > patch).
> > Lets see if it can be done better.
> 
> > At least, these protocol checks belong to the net's socket.c
> > more than the bpf's cgroup.c here.  If it also looks like layering
> > breakage in socket.c, may be adding a signal in sk_prot (for example)
> > to tell if the sk_prot->getsockopt has already called the cgroup's bpf
> > prog?  (e.g. tcp_getsockopt() can directly call the cgroup's bpf for all
> > optname instead of only TCP_ZEROCOPY_RECEIVE).
> 
> > For example:
> 
> > int __sys_getsockopt(...)
> > {
> > 	/* ... */
> 
> > 	if (!sk_prot->bpf_getsockopt_handled)
> > 		BPF_CGROUP_RUN_PROG_GETSOCKOPT(...);
> > }
> 
> > Thoughts?
> 
> Sounds good. I didn't go that far because I don't expect there to be
> a lot of special cases like that. But it might be worth supporting
> it in a generic way from the beginning.
> 
> I was thinking about something simpler:
> 
> int __cgroup_bpf_run_filter_getsockopt(sk, ...)
> {
> 	if (sk->sk_prot->bypass_bpf_getsockopt(level, optlen)) {
I think it meant s/optlen/optname/ which is not __user.
Yeah, I think that can provide a more generic solution
and also abstract things away.
Please add a details comment in this function.

> 		return retval;
> 	}
> 
>  	// ...
> }
> 
> Not sure it's worth exposing it to the __sys_getsockopt. WDYT?
or call that in BPF_CGROUP_RUN_PROG_GETSOCKOPT().  then the
changes in __cgroup_bpf_run_filter_getsockopt() in this
patch should go away?
Stanislav Fomichev Jan. 7, 2021, 4:09 p.m. UTC | #4
On 01/06, Martin KaFai Lau wrote:
> On Wed, Jan 06, 2021 at 02:45:56PM -0800, sdf@google.com wrote:
> > On 01/06, Martin KaFai Lau wrote:
> > > On Tue, Jan 05, 2021 at 01:43:50PM -0800, Stanislav Fomichev wrote:
> > > > Add custom implementation of getsockopt hook for  
> TCP_ZEROCOPY_RECEIVE.
> > > > We skip generic hooks for TCP_ZEROCOPY_RECEIVE and have a custom
> > > > call in do_tcp_getsockopt using the on-stack data. This removes
> > > > 3% overhead for locking/unlocking the socket.
> > > >
> > > > Also:
> > > > - Removed BUILD_BUG_ON (zerocopy doesn't depend on the buf size  
> anymore)
> > > > - Separated on-stack buffer into bpf_sockopt_buf and downsized to 32
> > > bytes
> > > >   (let's keep it to help with the other options)
> > > >
> > > > (I can probably split this patch into two: add new features and  
> rework
> > > >  bpf_sockopt_buf; can follow up if the approach in general sounds
> > > >  good).
> > > >
> > > > Without this patch:
> > > >      3.29%     0.07%  tcp_mmap  [kernel.kallsyms]  [k]
> > > __cgroup_bpf_run_filter_getsockopt
> > > >             |
> > > >              --3.22%--__cgroup_bpf_run_filter_getsockopt
> > > >                        |
> > > >                        |--0.66%--lock_sock_nested
> > > A general question for sockopt prog, why the BPF_CGROUP_(GET| 
> SET)SOCKOPT
> > > prog
> > > has to run under lock_sock()?
> > I don't think there is a strong reason. We expose sk to the BPF program,
> > but mainly for the socket storage map (which, afaik, doesn't require
> > socket to be locked). OTOH, it seems that providing a consistent view
> > of the sk to the BPF is a good idea.
> hmm... most of the bpf prog also does not require a locked sock.  For
> example, the __sk_buff->sk.  If a bpf prog needs a locked view of sk,
> a more generic solution is desired.  Anyhow, I guess the train has sort
> of sailed for sockopt bpf.

> >
> > Eric has suggested to try to use fast socket lock. It helps a bit,
> > but it doesn't remove the issue completely because
> > we do a bunch of copy_{to,from}_user in the generic
> > __cgroup_bpf_run_filter_getsockopt as well :-(
> >
> > > >                        |
> > > >                        |--0.57%--__might_fault
> Is it a debug kernel?
Yeah, I think I did have CONFIG_DEBUG_ATOMIC_SLEEP=y for this particular
run, but I don't think I have anything else debugging related (although,
it might bring in DEBUG_KERNEL and some other crap). Let me check if
something else has crept in and rerun the benchmarks without it.
I'll respin with the updated data if nothing serious pop up.

> > > >                        |
> > > >                         --0.56%--release_sock
> > > >
> > > > With the patch applied:
> > > >      0.42%     0.10%  tcp_mmap  [kernel.kallsyms]  [k]
> > > __cgroup_bpf_run_filter_getsockopt_kern
> > > >      0.02%     0.02%  tcp_mmap  [kernel.kallsyms]  [k]
> > > __cgroup_bpf_run_filter_getsockopt
> > > >
> > > [ ... ]
> >
> > > > @@ -1445,15 +1442,29 @@ int  
> __cgroup_bpf_run_filter_getsockopt(struct
> > > sock *sk, int level,
> > > >  				       int __user *optlen, int max_optlen,
> > > >  				       int retval)
> > > >  {
> > > > -	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
> > > > -	struct bpf_sockopt_kern ctx = {
> > > > -		.sk = sk,
> > > > -		.level = level,
> > > > -		.optname = optname,
> > > > -		.retval = retval,
> > > > -	};
> > > > +	struct bpf_sockopt_kern ctx;
> > > > +	struct bpf_sockopt_buf buf;
> > > > +	struct cgroup *cgrp;
> > > >  	int ret;
> > > >
> > > > +#ifdef CONFIG_INET
> > > > +	/* TCP do_tcp_getsockopt has optimized getsockopt implementation
> > > > +	 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
> > > > +	 */
> > > > +	if (sk->sk_prot->getsockopt == tcp_getsockopt &&
> > > > +	    level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
> > > > +		return retval;
> > > > +#endif
> > > That seems too much protocol details and not very scalable.
> > > It is not very related to kernel/bpf/cgroup.c which has very little  
> idea
> > > whether a specific protocol has optimized things in some ways (e.g. by
> > > directly calling cgroup's bpf prog at some strategic places in this
> > > patch).
> > > Lets see if it can be done better.
> >
> > > At least, these protocol checks belong to the net's socket.c
> > > more than the bpf's cgroup.c here.  If it also looks like layering
> > > breakage in socket.c, may be adding a signal in sk_prot (for example)
> > > to tell if the sk_prot->getsockopt has already called the cgroup's bpf
> > > prog?  (e.g. tcp_getsockopt() can directly call the cgroup's bpf for  
> all
> > > optname instead of only TCP_ZEROCOPY_RECEIVE).
> >
> > > For example:
> >
> > > int __sys_getsockopt(...)
> > > {
> > > 	/* ... */
> >
> > > 	if (!sk_prot->bpf_getsockopt_handled)
> > > 		BPF_CGROUP_RUN_PROG_GETSOCKOPT(...);
> > > }
> >
> > > Thoughts?
> >
> > Sounds good. I didn't go that far because I don't expect there to be
> > a lot of special cases like that. But it might be worth supporting
> > it in a generic way from the beginning.
> >
> > I was thinking about something simpler:
> >
> > int __cgroup_bpf_run_filter_getsockopt(sk, ...)
> > {
> > 	if (sk->sk_prot->bypass_bpf_getsockopt(level, optlen)) {
> I think it meant s/optlen/optname/ which is not __user.
> Yeah, I think that can provide a more generic solution
> and also abstract things away.
> Please add a details comment in this function.
Sure, will do!

> > 		return retval;
> > 	}
> >
> >  	// ...
> > }
> >
> > Not sure it's worth exposing it to the __sys_getsockopt. WDYT?
> or call that in BPF_CGROUP_RUN_PROG_GETSOCKOPT().  then the
> changes in __cgroup_bpf_run_filter_getsockopt() in this
> patch should go away?
SG, will add to BPF_CGROUP_RUN_PROG_GETSOCKOPT.
diff mbox series

Patch

diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index dd4b8e300746..e8ed9a839abe 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -147,6 +147,10 @@  int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
 				       int __user *optlen, int max_optlen,
 				       int retval);
 
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+					    int optname, void *optval,
+					    int *optlen, int retval);
+
 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
 	struct bpf_map *map)
 {
@@ -373,6 +377,16 @@  int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 	__ret;								       \
 })
 
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
+					    optlen, retval)		       \
+({									       \
+	int __ret = retval;						       \
+	if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT))			       \
+		__ret = __cgroup_bpf_run_filter_getsockopt_kern(	       \
+			sock, level, optname, optval, optlen, retval);	       \
+	__ret;								       \
+})
+
 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 			   enum bpf_prog_type ptype, struct bpf_prog *prog);
 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
@@ -454,6 +468,8 @@  static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
 				       optlen, max_optlen, retval) ({ retval; })
+#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
+					    optlen, retval) ({ retval; })
 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
 				       kernel_optval) ({ 0; })
 
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 54a4225f36d8..8739f1d4cac4 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1281,7 +1281,10 @@  struct bpf_sysctl_kern {
 	u64 tmp_reg;
 };
 
-#define BPF_SOCKOPT_KERN_BUF_SIZE	64
+#define BPF_SOCKOPT_KERN_BUF_SIZE	32
+struct bpf_sockopt_buf {
+	u8		data[BPF_SOCKOPT_KERN_BUF_SIZE];
+};
 
 struct bpf_sockopt_kern {
 	struct sock	*sk;
@@ -1291,7 +1294,6 @@  struct bpf_sockopt_kern {
 	s32		optname;
 	s32		optlen;
 	s32		retval;
-	u8		buf[BPF_SOCKOPT_KERN_BUF_SIZE];
 };
 
 int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index adbecdcaa370..5ac927b8d75b 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -16,7 +16,7 @@ 
 #include <linux/bpf-cgroup.h>
 #include <net/sock.h>
 #include <net/bpf_sk_storage.h>
-#include <uapi/linux/tcp.h> /* sizeof(struct tcp_zerocopy_receive) */
+#include <net/tcp.h> /* sizeof(struct tcp_zerocopy_receive) & tcp_getsockopt */
 
 #include "../cgroup/cgroup-internal.h"
 
@@ -1299,7 +1299,8 @@  static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
 	return empty;
 }
 
-static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
+static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
+			     struct bpf_sockopt_buf *buf)
 {
 	if (unlikely(max_optlen < 0))
 		return -EINVAL;
@@ -1311,18 +1312,11 @@  static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
 		max_optlen = PAGE_SIZE;
 	}
 
-	if (max_optlen <= sizeof(ctx->buf)) {
+	if (max_optlen <= sizeof(buf->data)) {
 		/* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
 		 * bytes avoid the cost of kzalloc.
-		 *
-		 * In order to remove extra allocations from the TCP
-		 * fast zero-copy path ensure that buffer covers
-		 * the size of struct tcp_zerocopy_receive.
 		 */
-		BUILD_BUG_ON(sizeof(struct tcp_zerocopy_receive) >
-			     BPF_SOCKOPT_KERN_BUF_SIZE);
-
-		ctx->optval = ctx->buf;
+		ctx->optval = buf->data;
 		ctx->optval_end = ctx->optval + max_optlen;
 		return max_optlen;
 	}
@@ -1336,16 +1330,18 @@  static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
 	return max_optlen;
 }
 
-static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
+static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
+			     struct bpf_sockopt_buf *buf)
 {
-	if (ctx->optval == ctx->buf)
+	if (ctx->optval == buf->data)
 		return;
 	kfree(ctx->optval);
 }
 
-static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx)
+static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
+				  struct bpf_sockopt_buf *buf)
 {
-	return ctx->optval != ctx->buf;
+	return ctx->optval != buf->data;
 }
 
 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
@@ -1353,6 +1349,7 @@  int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
 				       int *optlen, char **kernel_optval)
 {
 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+	struct bpf_sockopt_buf buf = {};
 	struct bpf_sockopt_kern ctx = {
 		.sk = sk,
 		.level = *level,
@@ -1373,7 +1370,7 @@  int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
 	 */
 	max_optlen = max_t(int, 16, *optlen);
 
-	max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
 	if (max_optlen < 0)
 		return max_optlen;
 
@@ -1419,7 +1416,7 @@  int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
 			 * No way to export on-stack buf, have to allocate a
 			 * new buffer.
 			 */
-			if (!sockopt_buf_allocated(&ctx)) {
+			if (!sockopt_buf_allocated(&ctx, &buf)) {
 				void *p = kzalloc(ctx.optlen, GFP_USER);
 
 				if (!p) {
@@ -1436,7 +1433,7 @@  int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
 
 out:
 	if (ret)
-		sockopt_free_buf(&ctx);
+		sockopt_free_buf(&ctx, &buf);
 	return ret;
 }
 
@@ -1445,15 +1442,29 @@  int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
 				       int __user *optlen, int max_optlen,
 				       int retval)
 {
-	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
-	struct bpf_sockopt_kern ctx = {
-		.sk = sk,
-		.level = level,
-		.optname = optname,
-		.retval = retval,
-	};
+	struct bpf_sockopt_kern ctx;
+	struct bpf_sockopt_buf buf;
+	struct cgroup *cgrp;
 	int ret;
 
+#ifdef CONFIG_INET
+	/* TCP do_tcp_getsockopt has optimized getsockopt implementation
+	 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
+	 */
+	if (sk->sk_prot->getsockopt == tcp_getsockopt &&
+	    level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
+		return retval;
+#endif
+
+	memset(&buf, 0, sizeof(buf));
+	memset(&ctx, 0, sizeof(ctx));
+
+	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+	ctx.sk = sk;
+	ctx.level = level;
+	ctx.optname = optname;
+	ctx.retval = retval;
+
 	/* Opportunistic check to see whether we have any BPF program
 	 * attached to the hook so we don't waste time allocating
 	 * memory and locking the socket.
@@ -1463,7 +1474,7 @@  int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
 
 	ctx.optlen = max_optlen;
 
-	max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+	max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
 	if (max_optlen < 0)
 		return max_optlen;
 
@@ -1521,9 +1532,47 @@  int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
 	ret = ctx.retval;
 
 out:
-	sockopt_free_buf(&ctx);
+	sockopt_free_buf(&ctx, &buf);
 	return ret;
 }
+
+int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
+					    int optname, void *optval,
+					    int *optlen, int retval)
+{
+	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+	struct bpf_sockopt_kern ctx = {
+		.sk = sk,
+		.level = level,
+		.optname = optname,
+		.retval = retval,
+		.optlen = *optlen,
+		.optval = optval,
+		.optval_end = optval + *optlen,
+	};
+	int ret;
+
+	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
+				 &ctx, BPF_PROG_RUN);
+	if (!ret)
+		return -EPERM;
+
+	if (ctx.optlen > *optlen)
+		return -EFAULT;
+
+	/* BPF programs only allowed to set retval to 0, not some
+	 * arbitrary value.
+	 */
+	if (ctx.retval != 0 && ctx.retval != retval)
+		return -EFAULT;
+
+	/* BPF programs can shrink the buffer, export the modifications.
+	 */
+	if (ctx.optlen != 0)
+		*optlen = ctx.optlen;
+
+	return ctx.retval;
+}
 #endif
 
 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ed42d2193c5c..d5367a879ed2 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -4098,6 +4098,8 @@  static int do_tcp_getsockopt(struct sock *sk, int level,
 			return -EFAULT;
 		lock_sock(sk);
 		err = tcp_zerocopy_receive(sk, &zc);
+		err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
+							  &zc, &len, err);
 		release_sock(sk);
 		if (len >= offsetofend(struct tcp_zerocopy_receive, err))
 			goto zerocopy_rcv_sk_err;
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
index b25c9c45c148..6bb18b1d8578 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
@@ -11,6 +11,7 @@  static int getsetsockopt(void)
 		char u8[4];
 		__u32 u32;
 		char cc[16]; /* TCP_CA_NAME_MAX */
+		struct tcp_zerocopy_receive zc;
 	} buf = {};
 	socklen_t optlen;
 	char *big_buf = NULL;
@@ -154,6 +155,27 @@  static int getsetsockopt(void)
 		goto err;
 	}
 
+	/* TCP_ZEROCOPY_RECEIVE triggers */
+	memset(&buf, 0, sizeof(buf));
+	optlen = sizeof(buf.zc);
+	err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen);
+	if (err) {
+		log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d",
+			err, errno);
+		goto err;
+	}
+
+	memset(&buf, 0, sizeof(buf));
+	buf.zc.address = 12345; /* rejected by BPF */
+	optlen = sizeof(buf.zc);
+	errno = 0;
+	err = getsockopt(fd, SOL_TCP, TCP_ZEROCOPY_RECEIVE, &buf, &optlen);
+	if (errno != EPERM) {
+		log_err("Unexpected getsockopt(TCP_ZEROCOPY_RECEIVE) err=%d errno=%d",
+			err, errno);
+		goto err;
+	}
+
 	free(big_buf);
 	close(fd);
 	return 0;
diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c
index 712df7b49cb1..c726f0763a13 100644
--- a/tools/testing/selftests/bpf/progs/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c
@@ -57,6 +57,21 @@  int _getsockopt(struct bpf_sockopt *ctx)
 		return 1;
 	}
 
+	if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) {
+		/* Verify that TCP_ZEROCOPY_RECEIVE triggers.
+		 * It has a custom implementation for performance
+		 * reasons.
+		 */
+
+		if (optval + sizeof(struct tcp_zerocopy_receive) > optval_end)
+			return 0; /* EPERM, bounds check */
+
+		if (((struct tcp_zerocopy_receive *)optval)->address != 0)
+			return 0; /* EPERM, unexpected data */
+
+		return 1;
+	}
+
 	if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
 		if (optval + 1 > optval_end)
 			return 0; /* EPERM, bounds check */