diff mbox series

[bpf-next,1/2] sk_msg: Keep reference on socket file while wait_memory

Message ID 20220815023343.295094-2-liujian56@huawei.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series > Keep reference on socket file while wait send memory | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 2 this patch: 2
netdev/cc_maintainers warning 1 maintainers not CCed: netdev@vger.kernel.org
netdev/build_clang success Errors and warnings before: 5 this patch: 5
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2 this patch: 2
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 17 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Kernel LATEST on ubuntu-latest with llvm-16
bpf/vmtest-bpf-next-VM_Test-4 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-5 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-1 fail Logs for Kernel LATEST on ubuntu-latest with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Kernel LATEST on z15 with gcc

Commit Message

Liu Jian Aug. 15, 2022, 2:33 a.m. UTC
Fix the below NULL pointer dereference:

[   14.471200] Call Trace:
[   14.471562]  <TASK>
[   14.471882]  lock_acquire+0x245/0x2e0
[   14.472416]  ? remove_wait_queue+0x12/0x50
[   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
[   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
[   14.474318]  ? remove_wait_queue+0x12/0x50
[   14.474907]  remove_wait_queue+0x12/0x50
[   14.475480]  sk_stream_wait_memory+0x20d/0x340
[   14.476127]  ? do_wait_intr_irq+0x80/0x80
[   14.476704]  do_tcp_sendpages+0x287/0x600
[   14.477283]  tcp_bpf_push+0xab/0x260
[   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
[   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
[   14.479096]  tcp_bpf_send_verdict+0x105/0x470
[   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
[   14.480311]  sock_sendmsg+0x2d/0x40
[   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
[   14.481390]  ? copy_msghdr_from_user+0x62/0x80
[   14.482048]  ___sys_sendmsg+0x78/0xb0
[   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
[   14.483215]  ? __do_fault+0x2a/0x1a0
[   14.483738]  ? do_fault+0x15e/0x5d0
[   14.484246]  ? __handle_mm_fault+0x56b/0x1040
[   14.484874]  ? lock_is_held_type+0xdf/0x130
[   14.485474]  ? find_held_lock+0x2d/0x90
[   14.486046]  ? __sys_sendmsg+0x41/0x70
[   14.486587]  __sys_sendmsg+0x41/0x70
[   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
[   14.487822]  do_syscall_64+0x34/0x80
[   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd

The test scene as following flow:
thread1                               thread2
-----------                           ---------------
 tcp_bpf_sendmsg
  tcp_bpf_send_verdict
   tcp_bpf_sendmsg_redir              sock_close
    tcp_bpf_push_locked                 __sock_release
     tcp_bpf_push                         //inet_release
      do_tcp_sendpages                    sock->ops->release
       sk_stream_wait_memory          	   // tcp_close
          sk_wait_event                      sk->sk_prot->close
           release_sock(__sk);
            ***

                                                lock_sock(sk);
                                                  __tcp_close
                                                    sock_orphan(sk)
                                                      sk->sk_wq  = NULL
                                                release_sock
            ****
           lock_sock(__sk);
          remove_wait_queue(sk_sleep(sk), &wait);
             sk_sleep(sk)
             //NULL pointer dereference
             &rcu_dereference_raw(sk->sk_wq)->wait

While waiting for memory in thread1, the socket is released with its wait
queue because thread2 has closed it. This caused by tcp_bpf_send_verdict
didn't increase the f_count of psock->sk_redir->sk_socket->file in thread1.

Avoid it by keeping a reference to the socket file while redirect sock wait
send memory. Refer to [1].

[1] https://lore.kernel.org/netdev/20190211090949.18560-1-jakub@cloudflare.com/

Signed-off-by: Liu Jian <liujian56@huawei.com>
---
 net/ipv4/tcp_bpf.c | 8 ++++++++
 1 file changed, 8 insertions(+)

Comments

John Fastabend Aug. 17, 2022, 12:54 a.m. UTC | #1
Liu Jian wrote:
> Fix the below NULL pointer dereference:
> 
> [   14.471200] Call Trace:
> [   14.471562]  <TASK>
> [   14.471882]  lock_acquire+0x245/0x2e0
> [   14.472416]  ? remove_wait_queue+0x12/0x50
> [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
> [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
> [   14.474318]  ? remove_wait_queue+0x12/0x50
> [   14.474907]  remove_wait_queue+0x12/0x50
> [   14.475480]  sk_stream_wait_memory+0x20d/0x340
> [   14.476127]  ? do_wait_intr_irq+0x80/0x80
> [   14.476704]  do_tcp_sendpages+0x287/0x600
> [   14.477283]  tcp_bpf_push+0xab/0x260
> [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
> [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
> [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
> [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
> [   14.480311]  sock_sendmsg+0x2d/0x40
> [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
> [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
> [   14.482048]  ___sys_sendmsg+0x78/0xb0
> [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
> [   14.483215]  ? __do_fault+0x2a/0x1a0
> [   14.483738]  ? do_fault+0x15e/0x5d0
> [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
> [   14.484874]  ? lock_is_held_type+0xdf/0x130
> [   14.485474]  ? find_held_lock+0x2d/0x90
> [   14.486046]  ? __sys_sendmsg+0x41/0x70
> [   14.486587]  __sys_sendmsg+0x41/0x70
> [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
> [   14.487822]  do_syscall_64+0x34/0x80
> [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
> 
> The test scene as following flow:
> thread1                               thread2
> -----------                           ---------------
>  tcp_bpf_sendmsg
>   tcp_bpf_send_verdict
>    tcp_bpf_sendmsg_redir              sock_close
>     tcp_bpf_push_locked                 __sock_release
>      tcp_bpf_push                         //inet_release
>       do_tcp_sendpages                    sock->ops->release
>        sk_stream_wait_memory          	   // tcp_close
>           sk_wait_event                      sk->sk_prot->close
>            release_sock(__sk);
>             ***
> 
>                                                 lock_sock(sk);
>                                                   __tcp_close
>                                                     sock_orphan(sk)
>                                                       sk->sk_wq  = NULL
>                                                 release_sock
>             ****
>            lock_sock(__sk);
>           remove_wait_queue(sk_sleep(sk), &wait);
>              sk_sleep(sk)
>              //NULL pointer dereference
>              &rcu_dereference_raw(sk->sk_wq)->wait
> 
> While waiting for memory in thread1, the socket is released with its wait
> queue because thread2 has closed it. This caused by tcp_bpf_send_verdict
> didn't increase the f_count of psock->sk_redir->sk_socket->file in thread1.
> 
> Avoid it by keeping a reference to the socket file while redirect sock wait
> send memory. Refer to [1].
> 
> [1] https://lore.kernel.org/netdev/20190211090949.18560-1-jakub@cloudflare.com/
> 
> Signed-off-by: Liu Jian <liujian56@huawei.com>

Thanks for the detailed commit message its necessary to understand
the problem without spending hours deciphering it myself.

When I looked at [1] we solved a simliar problem by using
the MSG_DONTWAIT flag so that the error was pushed back to
the sending.

Can we do the same thing here? The nice bit here is the error
would get all the way back to the sending socket so userspace
could decide how to handle it? Did I miss something?

> ---
>  net/ipv4/tcp_bpf.c | 8 ++++++++
>  1 file changed, 8 insertions(+)
> 
> diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
> index a1626afe87a1..201375829367 100644
> --- a/net/ipv4/tcp_bpf.c
> +++ b/net/ipv4/tcp_bpf.c
> @@ -125,9 +125,17 @@ static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
>  {
>  	int ret;
>  
> +	/* Hold on to socket wait queue. */
> +	if (sk->sk_socket && sk->sk_socket->file)
> +		get_file(sk->sk_socket->file);
> +
>  	lock_sock(sk);
>  	ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
>  	release_sock(sk);
> +
> +	if (sk->sk_socket && sk->sk_socket->file)
> +		fput(sk->sk_socket->file);
> +
>  	return ret;
>  }
>  
> -- 
> 2.17.1
>
Liu Jian Aug. 17, 2022, 2:11 a.m. UTC | #2
> -----Original Message-----
> From: John Fastabend [mailto:john.fastabend@gmail.com]
> Sent: Wednesday, August 17, 2022 8:55 AM
> To: liujian (CE) <liujian56@huawei.com>; john.fastabend@gmail.com;
> jakub@cloudflare.com; edumazet@google.com; davem@davemloft.net;
> yoshfuji@linux-ipv6.org; dsahern@kernel.org; kuba@kernel.org;
> pabeni@redhat.com; andrii@kernel.org; mykolal@fb.com; ast@kernel.org;
> daniel@iogearbox.net; martin.lau@linux.dev; song@kernel.org; yhs@fb.com;
> kpsingh@kernel.org; sdf@google.com; haoluo@google.com;
> jolsa@kernel.org; shuah@kernel.org; bpf@vger.kernel.org
> Cc: liujian (CE) <liujian56@huawei.com>
> Subject: RE: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket file
> while wait_memory
> 
> Liu Jian wrote:
> > Fix the below NULL pointer dereference:
> >
> > [   14.471200] Call Trace:
> > [   14.471562]  <TASK>
> > [   14.471882]  lock_acquire+0x245/0x2e0
> > [   14.472416]  ? remove_wait_queue+0x12/0x50
> > [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
> > [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
> > [   14.474318]  ? remove_wait_queue+0x12/0x50
> > [   14.474907]  remove_wait_queue+0x12/0x50
> > [   14.475480]  sk_stream_wait_memory+0x20d/0x340
> > [   14.476127]  ? do_wait_intr_irq+0x80/0x80
> > [   14.476704]  do_tcp_sendpages+0x287/0x600
> > [   14.477283]  tcp_bpf_push+0xab/0x260
> > [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
> > [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
> > [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
> > [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
> > [   14.480311]  sock_sendmsg+0x2d/0x40
> > [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
> > [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
> > [   14.482048]  ___sys_sendmsg+0x78/0xb0
> > [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
> > [   14.483215]  ? __do_fault+0x2a/0x1a0
> > [   14.483738]  ? do_fault+0x15e/0x5d0
> > [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
> > [   14.484874]  ? lock_is_held_type+0xdf/0x130
> > [   14.485474]  ? find_held_lock+0x2d/0x90
> > [   14.486046]  ? __sys_sendmsg+0x41/0x70
> > [   14.486587]  __sys_sendmsg+0x41/0x70
> > [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
> > [   14.487822]  do_syscall_64+0x34/0x80
> > [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
> >
> > The test scene as following flow:
> > thread1                               thread2
> > -----------                           ---------------
> >  tcp_bpf_sendmsg
> >   tcp_bpf_send_verdict
> >    tcp_bpf_sendmsg_redir              sock_close
> >     tcp_bpf_push_locked                 __sock_release
> >      tcp_bpf_push                         //inet_release
> >       do_tcp_sendpages                    sock->ops->release
> >        sk_stream_wait_memory          	   // tcp_close
> >           sk_wait_event                      sk->sk_prot->close
> >            release_sock(__sk);
> >             ***
> >
> >                                                 lock_sock(sk);
> >                                                   __tcp_close
> >                                                     sock_orphan(sk)
> >                                                       sk->sk_wq  = NULL
> >                                                 release_sock
> >             ****
> >            lock_sock(__sk);
> >           remove_wait_queue(sk_sleep(sk), &wait);
> >              sk_sleep(sk)
> >              //NULL pointer dereference
> >              &rcu_dereference_raw(sk->sk_wq)->wait
> >
> > While waiting for memory in thread1, the socket is released with its
> > wait queue because thread2 has closed it. This caused by
> > tcp_bpf_send_verdict didn't increase the f_count of psock->sk_redir-
> >sk_socket->file in thread1.
> >
> > Avoid it by keeping a reference to the socket file while redirect sock
> > wait send memory. Refer to [1].
> >
> > [1]
> > https://lore.kernel.org/netdev/20190211090949.18560-1-jakub@cloudflare
> > .com/
> >
> > Signed-off-by: Liu Jian <liujian56@huawei.com>
> 
> Thanks for the detailed commit message its necessary to understand the
> problem without spending hours deciphering it myself.
> 
> When I looked at [1] we solved a simliar problem by using the
> MSG_DONTWAIT flag so that the error was pushed back to the sending.
> 
> Can we do the same thing here? The nice bit here is the error would get all
> the way back to the sending socket so userspace could decide how to handle
> it? Did I miss something?
> 
[1] works in sk_psock_backlog function, it can not be detected by the userspace app.
But here, the problem is that app wants this to be a blocked system call.
If the MSG_DONTWAIT flag is forcibly added, this changes the function behavior.

> > ---
> >  net/ipv4/tcp_bpf.c | 8 ++++++++
> >  1 file changed, 8 insertions(+)
> >
> > diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index
> > a1626afe87a1..201375829367 100644
> > --- a/net/ipv4/tcp_bpf.c
> > +++ b/net/ipv4/tcp_bpf.c
> > @@ -125,9 +125,17 @@ static int tcp_bpf_push_locked(struct sock *sk,
> > struct sk_msg *msg,  {
> >  	int ret;
> >
> > +	/* Hold on to socket wait queue. */
> > +	if (sk->sk_socket && sk->sk_socket->file)
> > +		get_file(sk->sk_socket->file);
> > +
> >  	lock_sock(sk);
> >  	ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
> >  	release_sock(sk);
> > +
> > +	if (sk->sk_socket && sk->sk_socket->file)
> > +		fput(sk->sk_socket->file);
> > +
> >  	return ret;
> >  }
> >
> > --
> > 2.17.1
> >
>
John Fastabend Aug. 17, 2022, 6:33 p.m. UTC | #3
liujian (CE) wrote:
> 
> 
> > -----Original Message-----
> > From: John Fastabend [mailto:john.fastabend@gmail.com]
> > Sent: Wednesday, August 17, 2022 8:55 AM
> > To: liujian (CE) <liujian56@huawei.com>; john.fastabend@gmail.com;
> > jakub@cloudflare.com; edumazet@google.com; davem@davemloft.net;
> > yoshfuji@linux-ipv6.org; dsahern@kernel.org; kuba@kernel.org;
> > pabeni@redhat.com; andrii@kernel.org; mykolal@fb.com; ast@kernel.org;
> > daniel@iogearbox.net; martin.lau@linux.dev; song@kernel.org; yhs@fb.com;
> > kpsingh@kernel.org; sdf@google.com; haoluo@google.com;
> > jolsa@kernel.org; shuah@kernel.org; bpf@vger.kernel.org
> > Cc: liujian (CE) <liujian56@huawei.com>
> > Subject: RE: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket file
> > while wait_memory
> > 
> > Liu Jian wrote:
> > > Fix the below NULL pointer dereference:
> > >
> > > [   14.471200] Call Trace:
> > > [   14.471562]  <TASK>
> > > [   14.471882]  lock_acquire+0x245/0x2e0
> > > [   14.472416]  ? remove_wait_queue+0x12/0x50
> > > [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
> > > [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
> > > [   14.474318]  ? remove_wait_queue+0x12/0x50
> > > [   14.474907]  remove_wait_queue+0x12/0x50
> > > [   14.475480]  sk_stream_wait_memory+0x20d/0x340
> > > [   14.476127]  ? do_wait_intr_irq+0x80/0x80
> > > [   14.476704]  do_tcp_sendpages+0x287/0x600
> > > [   14.477283]  tcp_bpf_push+0xab/0x260
> > > [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
> > > [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
> > > [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
> > > [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
> > > [   14.480311]  sock_sendmsg+0x2d/0x40
> > > [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
> > > [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
> > > [   14.482048]  ___sys_sendmsg+0x78/0xb0
> > > [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
> > > [   14.483215]  ? __do_fault+0x2a/0x1a0
> > > [   14.483738]  ? do_fault+0x15e/0x5d0
> > > [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
> > > [   14.484874]  ? lock_is_held_type+0xdf/0x130
> > > [   14.485474]  ? find_held_lock+0x2d/0x90
> > > [   14.486046]  ? __sys_sendmsg+0x41/0x70
> > > [   14.486587]  __sys_sendmsg+0x41/0x70
> > > [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
> > > [   14.487822]  do_syscall_64+0x34/0x80
> > > [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
> > >
> > > The test scene as following flow:
> > > thread1                               thread2
> > > -----------                           ---------------
> > >  tcp_bpf_sendmsg
> > >   tcp_bpf_send_verdict
> > >    tcp_bpf_sendmsg_redir              sock_close
> > >     tcp_bpf_push_locked                 __sock_release
> > >      tcp_bpf_push                         //inet_release
> > >       do_tcp_sendpages                    sock->ops->release
> > >        sk_stream_wait_memory          	   // tcp_close
> > >           sk_wait_event                      sk->sk_prot->close
> > >            release_sock(__sk);
> > >             ***
> > >
> > >                                                 lock_sock(sk);
> > >                                                   __tcp_close
> > >                                                     sock_orphan(sk)
> > >                                                       sk->sk_wq  = NULL
> > >                                                 release_sock
> > >             ****
> > >            lock_sock(__sk);
> > >           remove_wait_queue(sk_sleep(sk), &wait);
> > >              sk_sleep(sk)
> > >              //NULL pointer dereference
> > >              &rcu_dereference_raw(sk->sk_wq)->wait
> > >
> > > While waiting for memory in thread1, the socket is released with its
> > > wait queue because thread2 has closed it. This caused by
> > > tcp_bpf_send_verdict didn't increase the f_count of psock->sk_redir-
> > >sk_socket->file in thread1.
> > >
> > > Avoid it by keeping a reference to the socket file while redirect sock
> > > wait send memory. Refer to [1].
> > >
> > > [1]
> > > https://lore.kernel.org/netdev/20190211090949.18560-1-jakub@cloudflare
> > > .com/
> > >
> > > Signed-off-by: Liu Jian <liujian56@huawei.com>
> > 
> > Thanks for the detailed commit message its necessary to understand the
> > problem without spending hours deciphering it myself.
> > 
> > When I looked at [1] we solved a simliar problem by using the
> > MSG_DONTWAIT flag so that the error was pushed back to the sending.
> > 
> > Can we do the same thing here? The nice bit here is the error would get all
> > the way back to the sending socket so userspace could decide how to handle
> > it? Did I miss something?
> > 
> [1] works in sk_psock_backlog function, it can not be detected by the userspace app.
> But here, the problem is that app wants this to be a blocked system call.
> If the MSG_DONTWAIT flag is forcibly added, this changes the function behavior.
> 

Ah right. We could push it to the sk_psock_backlog as another option similar
to what sk_psock_verdict_apply() does with sk_psock_skb_redirect(). The
problem would be we don't have an skb here and then instead of the
stream wait logic we would be using the backlog logic which might create
some subtle change. Seems a bit intrusive to me.

I don't have any better ideas off-hand even though reaching into the file
like above in the patch is not ideal.

Maybe Jakub has some thoughts?

Thanks!
John
Jakub Sitnicki Aug. 19, 2022, 8:39 a.m. UTC | #4
On Mon, Aug 15, 2022 at 10:33 AM +08, Liu Jian wrote:
> Fix the below NULL pointer dereference:
>
> [   14.471200] Call Trace:
> [   14.471562]  <TASK>
> [   14.471882]  lock_acquire+0x245/0x2e0
> [   14.472416]  ? remove_wait_queue+0x12/0x50
> [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
> [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
> [   14.474318]  ? remove_wait_queue+0x12/0x50
> [   14.474907]  remove_wait_queue+0x12/0x50
> [   14.475480]  sk_stream_wait_memory+0x20d/0x340
> [   14.476127]  ? do_wait_intr_irq+0x80/0x80
> [   14.476704]  do_tcp_sendpages+0x287/0x600
> [   14.477283]  tcp_bpf_push+0xab/0x260
> [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
> [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
> [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
> [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
> [   14.480311]  sock_sendmsg+0x2d/0x40
> [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
> [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
> [   14.482048]  ___sys_sendmsg+0x78/0xb0
> [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
> [   14.483215]  ? __do_fault+0x2a/0x1a0
> [   14.483738]  ? do_fault+0x15e/0x5d0
> [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
> [   14.484874]  ? lock_is_held_type+0xdf/0x130
> [   14.485474]  ? find_held_lock+0x2d/0x90
> [   14.486046]  ? __sys_sendmsg+0x41/0x70
> [   14.486587]  __sys_sendmsg+0x41/0x70
> [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
> [   14.487822]  do_syscall_64+0x34/0x80
> [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
>
> The test scene as following flow:
> thread1                               thread2
> -----------                           ---------------
>  tcp_bpf_sendmsg
>   tcp_bpf_send_verdict
>    tcp_bpf_sendmsg_redir              sock_close
>     tcp_bpf_push_locked                 __sock_release
>      tcp_bpf_push                         //inet_release
>       do_tcp_sendpages                    sock->ops->release
>        sk_stream_wait_memory          	   // tcp_close
>           sk_wait_event                      sk->sk_prot->close
>            release_sock(__sk);
>             ***
>
>                                                 lock_sock(sk);
>                                                   __tcp_close
>                                                     sock_orphan(sk)
>                                                       sk->sk_wq  = NULL
>                                                 release_sock
>             ****
>            lock_sock(__sk);
>           remove_wait_queue(sk_sleep(sk), &wait);
>              sk_sleep(sk)
>              //NULL pointer dereference
>              &rcu_dereference_raw(sk->sk_wq)->wait
>
> While waiting for memory in thread1, the socket is released with its wait
> queue because thread2 has closed it. This caused by tcp_bpf_send_verdict
> didn't increase the f_count of psock->sk_redir->sk_socket->file in thread1.

I'm not sure about this approach. Keeping a closed sock file alive, just
so we can wakeup from sleep, seems like wasted effort.

__tcp_close sets sk->sk_shutdown = RCV_SHUTDOWN | SEND_SHUTDOWN. So we
will return from sk_stream_wait_memory via the do_error path.

SEND_SHUTDOWN might be set because socket got closed and orphaned - dead
and detached from its file, like in this case.

So, IMHO, we should check if SOCK_DEAD flag is set on wakeup due to
SEND_SHUTDOWN in sk_stream_wait_memory, before accessing the wait queue.

[...]
Liu Jian Aug. 19, 2022, 10:01 a.m. UTC | #5
> -----Original Message-----
> From: Jakub Sitnicki [mailto:jakub@cloudflare.com]
> Sent: Friday, August 19, 2022 4:39 PM
> To: liujian (CE) <liujian56@huawei.com>; john.fastabend@gmail.com;
> edumazet@google.com
> Cc: davem@davemloft.net; yoshfuji@linux-ipv6.org; dsahern@kernel.org;
> kuba@kernel.org; pabeni@redhat.com; andrii@kernel.org; mykolal@fb.com;
> ast@kernel.org; daniel@iogearbox.net; martin.lau@linux.dev;
> song@kernel.org; yhs@fb.com; kpsingh@kernel.org; sdf@google.com;
> haoluo@google.com; jolsa@kernel.org; shuah@kernel.org;
> bpf@vger.kernel.org
> Subject: Re: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket file
> while wait_memory
> 
> On Mon, Aug 15, 2022 at 10:33 AM +08, Liu Jian wrote:
> > Fix the below NULL pointer dereference:
> >
> > [   14.471200] Call Trace:
> > [   14.471562]  <TASK>
> > [   14.471882]  lock_acquire+0x245/0x2e0
> > [   14.472416]  ? remove_wait_queue+0x12/0x50
> > [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
> > [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
> > [   14.474318]  ? remove_wait_queue+0x12/0x50
> > [   14.474907]  remove_wait_queue+0x12/0x50
> > [   14.475480]  sk_stream_wait_memory+0x20d/0x340
> > [   14.476127]  ? do_wait_intr_irq+0x80/0x80
> > [   14.476704]  do_tcp_sendpages+0x287/0x600
> > [   14.477283]  tcp_bpf_push+0xab/0x260
> > [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
> > [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
> > [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
> > [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
> > [   14.480311]  sock_sendmsg+0x2d/0x40
> > [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
> > [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
> > [   14.482048]  ___sys_sendmsg+0x78/0xb0
> > [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
> > [   14.483215]  ? __do_fault+0x2a/0x1a0
> > [   14.483738]  ? do_fault+0x15e/0x5d0
> > [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
> > [   14.484874]  ? lock_is_held_type+0xdf/0x130
> > [   14.485474]  ? find_held_lock+0x2d/0x90
> > [   14.486046]  ? __sys_sendmsg+0x41/0x70
> > [   14.486587]  __sys_sendmsg+0x41/0x70
> > [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
> > [   14.487822]  do_syscall_64+0x34/0x80
> > [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
> >
> > The test scene as following flow:
> > thread1                               thread2
> > -----------                           ---------------
> >  tcp_bpf_sendmsg
> >   tcp_bpf_send_verdict
> >    tcp_bpf_sendmsg_redir              sock_close
> >     tcp_bpf_push_locked                 __sock_release
> >      tcp_bpf_push                         //inet_release
> >       do_tcp_sendpages                    sock->ops->release
> >        sk_stream_wait_memory          	   // tcp_close
> >           sk_wait_event                      sk->sk_prot->close
> >            release_sock(__sk);
> >             ***
> >
> >                                                 lock_sock(sk);
> >                                                   __tcp_close
> >                                                     sock_orphan(sk)
> >                                                       sk->sk_wq  = NULL
> >                                                 release_sock
> >             ****
> >            lock_sock(__sk);
> >           remove_wait_queue(sk_sleep(sk), &wait);
> >              sk_sleep(sk)
> >              //NULL pointer dereference
> >              &rcu_dereference_raw(sk->sk_wq)->wait
> >
> > While waiting for memory in thread1, the socket is released with its
> > wait queue because thread2 has closed it. This caused by
> > tcp_bpf_send_verdict didn't increase the f_count of psock->sk_redir-
> >sk_socket->file in thread1.
> 
> I'm not sure about this approach. Keeping a closed sock file alive, just so we
> can wakeup from sleep, seems like wasted effort.
> 
> __tcp_close sets sk->sk_shutdown = RCV_SHUTDOWN | SEND_SHUTDOWN.
> So we will return from sk_stream_wait_memory via the do_error path.
> 
> SEND_SHUTDOWN might be set because socket got closed and orphaned -
> dead and detached from its file, like in this case.
> 
> So, IMHO, we should check if SOCK_DEAD flag is set on wakeup due to
> SEND_SHUTDOWN in sk_stream_wait_memory, before accessing the wait
> queue.
> 
> [...]
As jakub's approach, this problem can be solved.

diff --git a/include/net/sock.h b/include/net/sock.h
index a7273b289188..a3dab7140f1e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1998,6 +1998,8 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
 {
        BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
+       if (sock_flag(sk, SOCK_DEAD))
+               return NULL;
        return &rcu_dereference_raw(sk->sk_wq)->wait;
 }
 /* Detach socket from process context.
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 9860bb9a847c..da1be17d0b19 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -51,6 +51,8 @@ void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry
 {
        unsigned long flags;
 
+       if (wq_head == NULL)
+               return;
        spin_lock_irqsave(&wq_head->lock, flags);
        __remove_wait_queue(wq_head, wq_entry);
        spin_unlock_irqrestore(&wq_head->lock, flags);
Jakub Sitnicki Aug. 19, 2022, 10:34 a.m. UTC | #6
On Fri, Aug 19, 2022 at 10:01 AM GMT, liujian (CE) wrote:
>> -----Original Message-----
>> From: Jakub Sitnicki [mailto:jakub@cloudflare.com]
>> Sent: Friday, August 19, 2022 4:39 PM
>> To: liujian (CE) <liujian56@huawei.com>; john.fastabend@gmail.com;
>> edumazet@google.com
>> Cc: davem@davemloft.net; yoshfuji@linux-ipv6.org; dsahern@kernel.org;
>> kuba@kernel.org; pabeni@redhat.com; andrii@kernel.org; mykolal@fb.com;
>> ast@kernel.org; daniel@iogearbox.net; martin.lau@linux.dev;
>> song@kernel.org; yhs@fb.com; kpsingh@kernel.org; sdf@google.com;
>> haoluo@google.com; jolsa@kernel.org; shuah@kernel.org;
>> bpf@vger.kernel.org
>> Subject: Re: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket file
>> while wait_memory
>> 
>> On Mon, Aug 15, 2022 at 10:33 AM +08, Liu Jian wrote:
>> > Fix the below NULL pointer dereference:
>> >
>> > [   14.471200] Call Trace:
>> > [   14.471562]  <TASK>
>> > [   14.471882]  lock_acquire+0x245/0x2e0
>> > [   14.472416]  ? remove_wait_queue+0x12/0x50
>> > [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
>> > [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
>> > [   14.474318]  ? remove_wait_queue+0x12/0x50
>> > [   14.474907]  remove_wait_queue+0x12/0x50
>> > [   14.475480]  sk_stream_wait_memory+0x20d/0x340
>> > [   14.476127]  ? do_wait_intr_irq+0x80/0x80
>> > [   14.476704]  do_tcp_sendpages+0x287/0x600
>> > [   14.477283]  tcp_bpf_push+0xab/0x260
>> > [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
>> > [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
>> > [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
>> > [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
>> > [   14.480311]  sock_sendmsg+0x2d/0x40
>> > [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
>> > [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
>> > [   14.482048]  ___sys_sendmsg+0x78/0xb0
>> > [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
>> > [   14.483215]  ? __do_fault+0x2a/0x1a0
>> > [   14.483738]  ? do_fault+0x15e/0x5d0
>> > [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
>> > [   14.484874]  ? lock_is_held_type+0xdf/0x130
>> > [   14.485474]  ? find_held_lock+0x2d/0x90
>> > [   14.486046]  ? __sys_sendmsg+0x41/0x70
>> > [   14.486587]  __sys_sendmsg+0x41/0x70
>> > [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
>> > [   14.487822]  do_syscall_64+0x34/0x80
>> > [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
>> >
>> > The test scene as following flow:
>> > thread1                               thread2
>> > -----------                           ---------------
>> >  tcp_bpf_sendmsg
>> >   tcp_bpf_send_verdict
>> >    tcp_bpf_sendmsg_redir              sock_close
>> >     tcp_bpf_push_locked                 __sock_release
>> >      tcp_bpf_push                         //inet_release
>> >       do_tcp_sendpages                    sock->ops->release
>> >        sk_stream_wait_memory          	   // tcp_close
>> >           sk_wait_event                      sk->sk_prot->close
>> >            release_sock(__sk);
>> >             ***
>> >
>> >                                                 lock_sock(sk);
>> >                                                   __tcp_close
>> >                                                     sock_orphan(sk)
>> >                                                       sk->sk_wq  = NULL
>> >                                                 release_sock
>> >             ****
>> >            lock_sock(__sk);
>> >           remove_wait_queue(sk_sleep(sk), &wait);
>> >              sk_sleep(sk)
>> >              //NULL pointer dereference
>> >              &rcu_dereference_raw(sk->sk_wq)->wait
>> >
>> > While waiting for memory in thread1, the socket is released with its
>> > wait queue because thread2 has closed it. This caused by
>> > tcp_bpf_send_verdict didn't increase the f_count of psock->sk_redir-
>> >sk_socket->file in thread1.
>> 
>> I'm not sure about this approach. Keeping a closed sock file alive, just so we
>> can wakeup from sleep, seems like wasted effort.
>> 
>> __tcp_close sets sk->sk_shutdown = RCV_SHUTDOWN | SEND_SHUTDOWN.
>> So we will return from sk_stream_wait_memory via the do_error path.
>> 
>> SEND_SHUTDOWN might be set because socket got closed and orphaned -
>> dead and detached from its file, like in this case.
>> 
>> So, IMHO, we should check if SOCK_DEAD flag is set on wakeup due to
>> SEND_SHUTDOWN in sk_stream_wait_memory, before accessing the wait
>> queue.
>> 
>> [...]
> As jakub's approach, this problem can be solved.
>
> diff --git a/include/net/sock.h b/include/net/sock.h
> index a7273b289188..a3dab7140f1e 100644
> --- a/include/net/sock.h
> +++ b/include/net/sock.h
> @@ -1998,6 +1998,8 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
>  static inline wait_queue_head_t *sk_sleep(struct sock *sk)
>  {
>         BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
> +       if (sock_flag(sk, SOCK_DEAD))
> +               return NULL;
>         return &rcu_dereference_raw(sk->sk_wq)->wait;
>  }
>  /* Detach socket from process context.
> diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
> index 9860bb9a847c..da1be17d0b19 100644
> --- a/kernel/sched/wait.c
> +++ b/kernel/sched/wait.c
> @@ -51,6 +51,8 @@ void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry
>  {
>         unsigned long flags;
>  
> +       if (wq_head == NULL)
> +               return;
>         spin_lock_irqsave(&wq_head->lock, flags);
>         __remove_wait_queue(wq_head, wq_entry);
>         spin_unlock_irqrestore(&wq_head->lock, flags);

I don't know if we want to change the contract for sk_sleep()
remove_wait_queue() so that they accept dead sockets or nulls.

How about just:

diff --git a/net/core/stream.c b/net/core/stream.c
index ccc083cdef23..1105057ce00a 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -159,7 +159,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
                *timeo_p = current_timeo;
        }
 out:
-       remove_wait_queue(sk_sleep(sk), &wait);
+       if (!sock_flag(sk, SOCK_DEAD))
+               remove_wait_queue(sk_sleep(sk), &wait);
        return err;

 do_error:
Liu Jian Aug. 20, 2022, 3:01 a.m. UTC | #7
> -----Original Message-----
> From: Jakub Sitnicki [mailto:jakub@cloudflare.com]
> Sent: Friday, August 19, 2022 6:35 PM
> To: liujian (CE) <liujian56@huawei.com>
> Cc: john.fastabend@gmail.com; edumazet@google.com;
> davem@davemloft.net; yoshfuji@linux-ipv6.org; dsahern@kernel.org;
> kuba@kernel.org; pabeni@redhat.com; andrii@kernel.org; mykolal@fb.com;
> ast@kernel.org; daniel@iogearbox.net; martin.lau@linux.dev;
> song@kernel.org; yhs@fb.com; kpsingh@kernel.org; sdf@google.com;
> haoluo@google.com; jolsa@kernel.org; shuah@kernel.org;
> bpf@vger.kernel.org
> Subject: Re: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket file
> while wait_memory
> 
> 
> On Fri, Aug 19, 2022 at 10:01 AM GMT, liujian (CE) wrote:
> >> -----Original Message-----
> >> From: Jakub Sitnicki [mailto:jakub@cloudflare.com]
> >> Sent: Friday, August 19, 2022 4:39 PM
> >> To: liujian (CE) <liujian56@huawei.com>; john.fastabend@gmail.com;
> >> edumazet@google.com
> >> Cc: davem@davemloft.net; yoshfuji@linux-ipv6.org; dsahern@kernel.org;
> >> kuba@kernel.org; pabeni@redhat.com; andrii@kernel.org;
> >> mykolal@fb.com; ast@kernel.org; daniel@iogearbox.net;
> >> martin.lau@linux.dev; song@kernel.org; yhs@fb.com;
> >> kpsingh@kernel.org; sdf@google.com; haoluo@google.com;
> >> jolsa@kernel.org; shuah@kernel.org; bpf@vger.kernel.org
> >> Subject: Re: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket
> >> file while wait_memory
> >>
> >> On Mon, Aug 15, 2022 at 10:33 AM +08, Liu Jian wrote:
> >> > Fix the below NULL pointer dereference:
> >> >
> >> > [   14.471200] Call Trace:
> >> > [   14.471562]  <TASK>
> >> > [   14.471882]  lock_acquire+0x245/0x2e0
> >> > [   14.472416]  ? remove_wait_queue+0x12/0x50
> >> > [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
> >> > [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
> >> > [   14.474318]  ? remove_wait_queue+0x12/0x50
> >> > [   14.474907]  remove_wait_queue+0x12/0x50
> >> > [   14.475480]  sk_stream_wait_memory+0x20d/0x340
> >> > [   14.476127]  ? do_wait_intr_irq+0x80/0x80
> >> > [   14.476704]  do_tcp_sendpages+0x287/0x600
> >> > [   14.477283]  tcp_bpf_push+0xab/0x260
> >> > [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
> >> > [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
> >> > [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
> >> > [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
> >> > [   14.480311]  sock_sendmsg+0x2d/0x40
> >> > [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
> >> > [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
> >> > [   14.482048]  ___sys_sendmsg+0x78/0xb0
> >> > [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
> >> > [   14.483215]  ? __do_fault+0x2a/0x1a0
> >> > [   14.483738]  ? do_fault+0x15e/0x5d0
> >> > [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
> >> > [   14.484874]  ? lock_is_held_type+0xdf/0x130
> >> > [   14.485474]  ? find_held_lock+0x2d/0x90
> >> > [   14.486046]  ? __sys_sendmsg+0x41/0x70
> >> > [   14.486587]  __sys_sendmsg+0x41/0x70
> >> > [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
> >> > [   14.487822]  do_syscall_64+0x34/0x80
> >> > [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
> >> >
> >> > The test scene as following flow:
> >> > thread1                               thread2
> >> > -----------                           ---------------
> >> >  tcp_bpf_sendmsg
> >> >   tcp_bpf_send_verdict
> >> >    tcp_bpf_sendmsg_redir              sock_close
> >> >     tcp_bpf_push_locked                 __sock_release
> >> >      tcp_bpf_push                         //inet_release
> >> >       do_tcp_sendpages                    sock->ops->release
> >> >        sk_stream_wait_memory          	   // tcp_close
> >> >           sk_wait_event                      sk->sk_prot->close
> >> >            release_sock(__sk);
> >> >             ***
> >> >
> >> >                                                 lock_sock(sk);
> >> >                                                   __tcp_close
> >> >                                                     sock_orphan(sk)
> >> >                                                       sk->sk_wq  = NULL
> >> >                                                 release_sock
> >> >             ****
> >> >            lock_sock(__sk);
> >> >           remove_wait_queue(sk_sleep(sk), &wait);
> >> >              sk_sleep(sk)
> >> >              //NULL pointer dereference
> >> >              &rcu_dereference_raw(sk->sk_wq)->wait
> >> >
> >> > While waiting for memory in thread1, the socket is released with
> >> >its  wait queue because thread2 has closed it. This caused by
> >> >tcp_bpf_send_verdict didn't increase the f_count of psock->sk_redir-
> >> >sk_socket->file in thread1.
> >>
> >> I'm not sure about this approach. Keeping a closed sock file alive,
> >> just so we can wakeup from sleep, seems like wasted effort.
> >>
> >> __tcp_close sets sk->sk_shutdown = RCV_SHUTDOWN |
> SEND_SHUTDOWN.
> >> So we will return from sk_stream_wait_memory via the do_error path.
> >>
> >> SEND_SHUTDOWN might be set because socket got closed and orphaned
> -
> >> dead and detached from its file, like in this case.
> >>
> >> So, IMHO, we should check if SOCK_DEAD flag is set on wakeup due to
> >> SEND_SHUTDOWN in sk_stream_wait_memory, before accessing the
> wait
> >> queue.
> >>
> >> [...]
> > As jakub's approach, this problem can be solved.
> >
> > diff --git a/include/net/sock.h b/include/net/sock.h index
> > a7273b289188..a3dab7140f1e 100644
> > --- a/include/net/sock.h
> > +++ b/include/net/sock.h
> > @@ -1998,6 +1998,8 @@ static inline void sk_set_socket(struct sock
> > *sk, struct socket *sock)  static inline wait_queue_head_t
> > *sk_sleep(struct sock *sk)  {
> >         BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
> > +       if (sock_flag(sk, SOCK_DEAD))
> > +               return NULL;
> >         return &rcu_dereference_raw(sk->sk_wq)->wait;
> >  }
> >  /* Detach socket from process context.
> > diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index
> > 9860bb9a847c..da1be17d0b19 100644
> > --- a/kernel/sched/wait.c
> > +++ b/kernel/sched/wait.c
> > @@ -51,6 +51,8 @@ void remove_wait_queue(struct wait_queue_head
> > *wq_head, struct wait_queue_entry  {
> >         unsigned long flags;
> >
> > +       if (wq_head == NULL)
> > +               return;
> >         spin_lock_irqsave(&wq_head->lock, flags);
> >         __remove_wait_queue(wq_head, wq_entry);
> >         spin_unlock_irqrestore(&wq_head->lock, flags);
> 
> I don't know if we want to change the contract for sk_sleep()
> remove_wait_queue() so that they accept dead sockets or nulls.
> 
> How about just:

It is all ok to me, thank you. Cloud you provide a format patch?

Tested-by: Liu Jian <liujian56@huawei.com>
> 
> diff --git a/net/core/stream.c b/net/core/stream.c index
> ccc083cdef23..1105057ce00a 100644
> --- a/net/core/stream.c
> +++ b/net/core/stream.c
> @@ -159,7 +159,8 @@ int sk_stream_wait_memory(struct sock *sk, long
> *timeo_p)
>                 *timeo_p = current_timeo;
>         }
>  out:
> -       remove_wait_queue(sk_sleep(sk), &wait);
> +       if (!sock_flag(sk, SOCK_DEAD))
> +               remove_wait_queue(sk_sleep(sk), &wait);
>         return err;
> 
>  do_error:
> 
> 
>
Jakub Sitnicki Aug. 22, 2022, 2:32 p.m. UTC | #8
On Sat, Aug 20, 2022 at 03:01 AM GMT, liujian (CE) wrote:
>> -----Original Message-----
>> From: Jakub Sitnicki [mailto:jakub@cloudflare.com]
>> Sent: Friday, August 19, 2022 6:35 PM
>> To: liujian (CE) <liujian56@huawei.com>
>> Cc: john.fastabend@gmail.com; edumazet@google.com;
>> davem@davemloft.net; yoshfuji@linux-ipv6.org; dsahern@kernel.org;
>> kuba@kernel.org; pabeni@redhat.com; andrii@kernel.org; mykolal@fb.com;
>> ast@kernel.org; daniel@iogearbox.net; martin.lau@linux.dev;
>> song@kernel.org; yhs@fb.com; kpsingh@kernel.org; sdf@google.com;
>> haoluo@google.com; jolsa@kernel.org; shuah@kernel.org;
>> bpf@vger.kernel.org
>> Subject: Re: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket file
>> while wait_memory
>> 
>> 
>> On Fri, Aug 19, 2022 at 10:01 AM GMT, liujian (CE) wrote:
>> >> -----Original Message-----
>> >> From: Jakub Sitnicki [mailto:jakub@cloudflare.com]
>> >> Sent: Friday, August 19, 2022 4:39 PM
>> >> To: liujian (CE) <liujian56@huawei.com>; john.fastabend@gmail.com;
>> >> edumazet@google.com
>> >> Cc: davem@davemloft.net; yoshfuji@linux-ipv6.org; dsahern@kernel.org;
>> >> kuba@kernel.org; pabeni@redhat.com; andrii@kernel.org;
>> >> mykolal@fb.com; ast@kernel.org; daniel@iogearbox.net;
>> >> martin.lau@linux.dev; song@kernel.org; yhs@fb.com;
>> >> kpsingh@kernel.org; sdf@google.com; haoluo@google.com;
>> >> jolsa@kernel.org; shuah@kernel.org; bpf@vger.kernel.org
>> >> Subject: Re: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket
>> >> file while wait_memory
>> >>
>> >> On Mon, Aug 15, 2022 at 10:33 AM +08, Liu Jian wrote:
>> >> > Fix the below NULL pointer dereference:
>> >> >
>> >> > [   14.471200] Call Trace:
>> >> > [   14.471562]  <TASK>
>> >> > [   14.471882]  lock_acquire+0x245/0x2e0
>> >> > [   14.472416]  ? remove_wait_queue+0x12/0x50
>> >> > [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
>> >> > [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
>> >> > [   14.474318]  ? remove_wait_queue+0x12/0x50
>> >> > [   14.474907]  remove_wait_queue+0x12/0x50
>> >> > [   14.475480]  sk_stream_wait_memory+0x20d/0x340
>> >> > [   14.476127]  ? do_wait_intr_irq+0x80/0x80
>> >> > [   14.476704]  do_tcp_sendpages+0x287/0x600
>> >> > [   14.477283]  tcp_bpf_push+0xab/0x260
>> >> > [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
>> >> > [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
>> >> > [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
>> >> > [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
>> >> > [   14.480311]  sock_sendmsg+0x2d/0x40
>> >> > [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
>> >> > [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
>> >> > [   14.482048]  ___sys_sendmsg+0x78/0xb0
>> >> > [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
>> >> > [   14.483215]  ? __do_fault+0x2a/0x1a0
>> >> > [   14.483738]  ? do_fault+0x15e/0x5d0
>> >> > [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
>> >> > [   14.484874]  ? lock_is_held_type+0xdf/0x130
>> >> > [   14.485474]  ? find_held_lock+0x2d/0x90
>> >> > [   14.486046]  ? __sys_sendmsg+0x41/0x70
>> >> > [   14.486587]  __sys_sendmsg+0x41/0x70
>> >> > [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
>> >> > [   14.487822]  do_syscall_64+0x34/0x80
>> >> > [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
>> >> >
>> >> > The test scene as following flow:
>> >> > thread1                               thread2
>> >> > -----------                           ---------------
>> >> >  tcp_bpf_sendmsg
>> >> >   tcp_bpf_send_verdict
>> >> >    tcp_bpf_sendmsg_redir              sock_close
>> >> >     tcp_bpf_push_locked                 __sock_release
>> >> >      tcp_bpf_push                         //inet_release
>> >> >       do_tcp_sendpages                    sock->ops->release
>> >> >        sk_stream_wait_memory          	   // tcp_close
>> >> >           sk_wait_event                      sk->sk_prot->close
>> >> >            release_sock(__sk);
>> >> >             ***
>> >> >
>> >> >                                                 lock_sock(sk);
>> >> >                                                   __tcp_close
>> >> >                                                     sock_orphan(sk)
>> >> >                                                       sk->sk_wq  = NULL
>> >> >                                                 release_sock
>> >> >             ****
>> >> >            lock_sock(__sk);
>> >> >           remove_wait_queue(sk_sleep(sk), &wait);
>> >> >              sk_sleep(sk)
>> >> >              //NULL pointer dereference
>> >> >              &rcu_dereference_raw(sk->sk_wq)->wait
>> >> >
>> >> > While waiting for memory in thread1, the socket is released with
>> >> >its  wait queue because thread2 has closed it. This caused by
>> >> >tcp_bpf_send_verdict didn't increase the f_count of psock->sk_redir-
>> >> >sk_socket->file in thread1.
>> >>
>> >> I'm not sure about this approach. Keeping a closed sock file alive,
>> >> just so we can wakeup from sleep, seems like wasted effort.
>> >>
>> >> __tcp_close sets sk->sk_shutdown = RCV_SHUTDOWN |
>> SEND_SHUTDOWN.
>> >> So we will return from sk_stream_wait_memory via the do_error path.
>> >>
>> >> SEND_SHUTDOWN might be set because socket got closed and orphaned
>> -
>> >> dead and detached from its file, like in this case.
>> >>
>> >> So, IMHO, we should check if SOCK_DEAD flag is set on wakeup due to
>> >> SEND_SHUTDOWN in sk_stream_wait_memory, before accessing the
>> wait
>> >> queue.
>> >>
>> >> [...]
>> > As jakub's approach, this problem can be solved.
>> >
>> > diff --git a/include/net/sock.h b/include/net/sock.h index
>> > a7273b289188..a3dab7140f1e 100644
>> > --- a/include/net/sock.h
>> > +++ b/include/net/sock.h
>> > @@ -1998,6 +1998,8 @@ static inline void sk_set_socket(struct sock
>> > *sk, struct socket *sock)  static inline wait_queue_head_t
>> > *sk_sleep(struct sock *sk)  {
>> >         BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
>> > +       if (sock_flag(sk, SOCK_DEAD))
>> > +               return NULL;
>> >         return &rcu_dereference_raw(sk->sk_wq)->wait;
>> >  }
>> >  /* Detach socket from process context.
>> > diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index
>> > 9860bb9a847c..da1be17d0b19 100644
>> > --- a/kernel/sched/wait.c
>> > +++ b/kernel/sched/wait.c
>> > @@ -51,6 +51,8 @@ void remove_wait_queue(struct wait_queue_head
>> > *wq_head, struct wait_queue_entry  {
>> >         unsigned long flags;
>> >
>> > +       if (wq_head == NULL)
>> > +               return;
>> >         spin_lock_irqsave(&wq_head->lock, flags);
>> >         __remove_wait_queue(wq_head, wq_entry);
>> >         spin_unlock_irqrestore(&wq_head->lock, flags);
>> 
>> I don't know if we want to change the contract for sk_sleep()
>> remove_wait_queue() so that they accept dead sockets or nulls.
>> 
>> How about just:
>
> It is all ok to me, thank you. Cloud you provide a format patch?
>
> Tested-by: Liu Jian <liujian56@huawei.com>

Feel free to pull it into your patch set. I'm a bit backlogged
ATM. Besides, we also want the selftest that you have added.

You can add Suggested-by if you want.

[...]
Eric Dumazet Aug. 22, 2022, 3:52 p.m. UTC | #9
On Fri, Aug 19, 2022 at 3:37 AM Jakub Sitnicki <jakub@cloudflare.com> wrote:
>
>
> On Fri, Aug 19, 2022 at 10:01 AM GMT, liujian (CE) wrote:
> >> -----Original Message-----
> >> From: Jakub Sitnicki [mailto:jakub@cloudflare.com]
> >> Sent: Friday, August 19, 2022 4:39 PM
> >> To: liujian (CE) <liujian56@huawei.com>; john.fastabend@gmail.com;
> >> edumazet@google.com
> >> Cc: davem@davemloft.net; yoshfuji@linux-ipv6.org; dsahern@kernel.org;
> >> kuba@kernel.org; pabeni@redhat.com; andrii@kernel.org; mykolal@fb.com;
> >> ast@kernel.org; daniel@iogearbox.net; martin.lau@linux.dev;
> >> song@kernel.org; yhs@fb.com; kpsingh@kernel.org; sdf@google.com;
> >> haoluo@google.com; jolsa@kernel.org; shuah@kernel.org;
> >> bpf@vger.kernel.org
> >> Subject: Re: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket file
> >> while wait_memory
> >>
> >> On Mon, Aug 15, 2022 at 10:33 AM +08, Liu Jian wrote:
> >> > Fix the below NULL pointer dereference:
> >> >
> >> > [   14.471200] Call Trace:
> >> > [   14.471562]  <TASK>
> >> > [   14.471882]  lock_acquire+0x245/0x2e0
> >> > [   14.472416]  ? remove_wait_queue+0x12/0x50
> >> > [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
> >> > [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
> >> > [   14.474318]  ? remove_wait_queue+0x12/0x50
> >> > [   14.474907]  remove_wait_queue+0x12/0x50
> >> > [   14.475480]  sk_stream_wait_memory+0x20d/0x340
> >> > [   14.476127]  ? do_wait_intr_irq+0x80/0x80
> >> > [   14.476704]  do_tcp_sendpages+0x287/0x600
> >> > [   14.477283]  tcp_bpf_push+0xab/0x260
> >> > [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
> >> > [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
> >> > [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
> >> > [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
> >> > [   14.480311]  sock_sendmsg+0x2d/0x40
> >> > [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
> >> > [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
> >> > [   14.482048]  ___sys_sendmsg+0x78/0xb0
> >> > [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
> >> > [   14.483215]  ? __do_fault+0x2a/0x1a0
> >> > [   14.483738]  ? do_fault+0x15e/0x5d0
> >> > [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
> >> > [   14.484874]  ? lock_is_held_type+0xdf/0x130
> >> > [   14.485474]  ? find_held_lock+0x2d/0x90
> >> > [   14.486046]  ? __sys_sendmsg+0x41/0x70
> >> > [   14.486587]  __sys_sendmsg+0x41/0x70
> >> > [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
> >> > [   14.487822]  do_syscall_64+0x34/0x80
> >> > [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
> >> >
> >> > The test scene as following flow:
> >> > thread1                               thread2
> >> > -----------                           ---------------
> >> >  tcp_bpf_sendmsg
> >> >   tcp_bpf_send_verdict
> >> >    tcp_bpf_sendmsg_redir              sock_close
> >> >     tcp_bpf_push_locked                 __sock_release
> >> >      tcp_bpf_push                         //inet_release
> >> >       do_tcp_sendpages                    sock->ops->release
> >> >        sk_stream_wait_memory                  // tcp_close
> >> >           sk_wait_event                      sk->sk_prot->close
> >> >            release_sock(__sk);
> >> >             ***
> >> >
> >> >                                                 lock_sock(sk);
> >> >                                                   __tcp_close
> >> >                                                     sock_orphan(sk)
> >> >                                                       sk->sk_wq  = NULL
> >> >                                                 release_sock
> >> >             ****
> >> >            lock_sock(__sk);
> >> >           remove_wait_queue(sk_sleep(sk), &wait);
> >> >              sk_sleep(sk)
> >> >              //NULL pointer dereference
> >> >              &rcu_dereference_raw(sk->sk_wq)->wait
> >> >
> >> > While waiting for memory in thread1, the socket is released with its
> >> > wait queue because thread2 has closed it. This caused by
> >> > tcp_bpf_send_verdict didn't increase the f_count of psock->sk_redir-
> >> >sk_socket->file in thread1.
> >>
> >> I'm not sure about this approach. Keeping a closed sock file alive, just so we
> >> can wakeup from sleep, seems like wasted effort.
> >>
> >> __tcp_close sets sk->sk_shutdown = RCV_SHUTDOWN | SEND_SHUTDOWN.
> >> So we will return from sk_stream_wait_memory via the do_error path.
> >>
> >> SEND_SHUTDOWN might be set because socket got closed and orphaned -
> >> dead and detached from its file, like in this case.
> >>
> >> So, IMHO, we should check if SOCK_DEAD flag is set on wakeup due to
> >> SEND_SHUTDOWN in sk_stream_wait_memory, before accessing the wait
> >> queue.
> >>
> >> [...]
> > As jakub's approach, this problem can be solved.
> >
> > diff --git a/include/net/sock.h b/include/net/sock.h
> > index a7273b289188..a3dab7140f1e 100644
> > --- a/include/net/sock.h
> > +++ b/include/net/sock.h
> > @@ -1998,6 +1998,8 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
> >  static inline wait_queue_head_t *sk_sleep(struct sock *sk)
> >  {
> >         BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
> > +       if (sock_flag(sk, SOCK_DEAD))
> > +               return NULL;
> >         return &rcu_dereference_raw(sk->sk_wq)->wait;
> >  }
> >  /* Detach socket from process context.
> > diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
> > index 9860bb9a847c..da1be17d0b19 100644
> > --- a/kernel/sched/wait.c
> > +++ b/kernel/sched/wait.c
> > @@ -51,6 +51,8 @@ void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry
> >  {
> >         unsigned long flags;
> >
> > +       if (wq_head == NULL)
> > +               return;
> >         spin_lock_irqsave(&wq_head->lock, flags);
> >         __remove_wait_queue(wq_head, wq_entry);
> >         spin_unlock_irqrestore(&wq_head->lock, flags);
>
> I don't know if we want to change the contract for sk_sleep()
> remove_wait_queue() so that they accept dead sockets or nulls.
>
> How about just:
>
> diff --git a/net/core/stream.c b/net/core/stream.c
> index ccc083cdef23..1105057ce00a 100644
> --- a/net/core/stream.c
> +++ b/net/core/stream.c
> @@ -159,7 +159,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
>                 *timeo_p = current_timeo;
>         }
>  out:
> -       remove_wait_queue(sk_sleep(sk), &wait);
> +       if (!sock_flag(sk, SOCK_DEAD))
> +               remove_wait_queue(sk_sleep(sk), &wait);
>         return err;
>
>  do_error:
>

OK, but what about tcp_msg_wait_data() and udp_msg_wait_data () ?
It seems they could be vulnerable as well..
Jakub Sitnicki Aug. 22, 2022, 4:12 p.m. UTC | #10
On Mon, Aug 22, 2022 at 08:52 AM -07, Eric Dumazet wrote:
> On Fri, Aug 19, 2022 at 3:37 AM Jakub Sitnicki <jakub@cloudflare.com> wrote:
>>
>>
>> On Fri, Aug 19, 2022 at 10:01 AM GMT, liujian (CE) wrote:
>> >> -----Original Message-----
>> >> From: Jakub Sitnicki [mailto:jakub@cloudflare.com]
>> >> Sent: Friday, August 19, 2022 4:39 PM
>> >> To: liujian (CE) <liujian56@huawei.com>; john.fastabend@gmail.com;
>> >> edumazet@google.com
>> >> Cc: davem@davemloft.net; yoshfuji@linux-ipv6.org; dsahern@kernel.org;
>> >> kuba@kernel.org; pabeni@redhat.com; andrii@kernel.org; mykolal@fb.com;
>> >> ast@kernel.org; daniel@iogearbox.net; martin.lau@linux.dev;
>> >> song@kernel.org; yhs@fb.com; kpsingh@kernel.org; sdf@google.com;
>> >> haoluo@google.com; jolsa@kernel.org; shuah@kernel.org;
>> >> bpf@vger.kernel.org
>> >> Subject: Re: [PATCH bpf-next 1/2] sk_msg: Keep reference on socket file
>> >> while wait_memory
>> >>
>> >> On Mon, Aug 15, 2022 at 10:33 AM +08, Liu Jian wrote:
>> >> > Fix the below NULL pointer dereference:
>> >> >
>> >> > [   14.471200] Call Trace:
>> >> > [   14.471562]  <TASK>
>> >> > [   14.471882]  lock_acquire+0x245/0x2e0
>> >> > [   14.472416]  ? remove_wait_queue+0x12/0x50
>> >> > [   14.473014]  ? _raw_spin_lock_irqsave+0x17/0x50
>> >> > [   14.473681]  _raw_spin_lock_irqsave+0x3d/0x50
>> >> > [   14.474318]  ? remove_wait_queue+0x12/0x50
>> >> > [   14.474907]  remove_wait_queue+0x12/0x50
>> >> > [   14.475480]  sk_stream_wait_memory+0x20d/0x340
>> >> > [   14.476127]  ? do_wait_intr_irq+0x80/0x80
>> >> > [   14.476704]  do_tcp_sendpages+0x287/0x600
>> >> > [   14.477283]  tcp_bpf_push+0xab/0x260
>> >> > [   14.477817]  tcp_bpf_sendmsg_redir+0x297/0x500
>> >> > [   14.478461]  ? __local_bh_enable_ip+0x77/0xe0
>> >> > [   14.479096]  tcp_bpf_send_verdict+0x105/0x470
>> >> > [   14.479729]  tcp_bpf_sendmsg+0x318/0x4f0
>> >> > [   14.480311]  sock_sendmsg+0x2d/0x40
>> >> > [   14.480822]  ____sys_sendmsg+0x1b4/0x1c0
>> >> > [   14.481390]  ? copy_msghdr_from_user+0x62/0x80
>> >> > [   14.482048]  ___sys_sendmsg+0x78/0xb0
>> >> > [   14.482580]  ? vmf_insert_pfn_prot+0x91/0x150
>> >> > [   14.483215]  ? __do_fault+0x2a/0x1a0
>> >> > [   14.483738]  ? do_fault+0x15e/0x5d0
>> >> > [   14.484246]  ? __handle_mm_fault+0x56b/0x1040
>> >> > [   14.484874]  ? lock_is_held_type+0xdf/0x130
>> >> > [   14.485474]  ? find_held_lock+0x2d/0x90
>> >> > [   14.486046]  ? __sys_sendmsg+0x41/0x70
>> >> > [   14.486587]  __sys_sendmsg+0x41/0x70
>> >> > [   14.487105]  ? intel_pmu_drain_pebs_core+0x350/0x350
>> >> > [   14.487822]  do_syscall_64+0x34/0x80
>> >> > [   14.488345]  entry_SYSCALL_64_after_hwframe+0x63/0xcd
>> >> >
>> >> > The test scene as following flow:
>> >> > thread1                               thread2
>> >> > -----------                           ---------------
>> >> >  tcp_bpf_sendmsg
>> >> >   tcp_bpf_send_verdict
>> >> >    tcp_bpf_sendmsg_redir              sock_close
>> >> >     tcp_bpf_push_locked                 __sock_release
>> >> >      tcp_bpf_push                         //inet_release
>> >> >       do_tcp_sendpages                    sock->ops->release
>> >> >        sk_stream_wait_memory                  // tcp_close
>> >> >           sk_wait_event                      sk->sk_prot->close
>> >> >            release_sock(__sk);
>> >> >             ***
>> >> >
>> >> >                                                 lock_sock(sk);
>> >> >                                                   __tcp_close
>> >> >                                                     sock_orphan(sk)
>> >> >                                                       sk->sk_wq  = NULL
>> >> >                                                 release_sock
>> >> >             ****
>> >> >            lock_sock(__sk);
>> >> >           remove_wait_queue(sk_sleep(sk), &wait);
>> >> >              sk_sleep(sk)
>> >> >              //NULL pointer dereference
>> >> >              &rcu_dereference_raw(sk->sk_wq)->wait
>> >> >
>> >> > While waiting for memory in thread1, the socket is released with its
>> >> > wait queue because thread2 has closed it. This caused by
>> >> > tcp_bpf_send_verdict didn't increase the f_count of psock->sk_redir-
>> >> >sk_socket->file in thread1.
>> >>
>> >> I'm not sure about this approach. Keeping a closed sock file alive, just so we
>> >> can wakeup from sleep, seems like wasted effort.
>> >>
>> >> __tcp_close sets sk->sk_shutdown = RCV_SHUTDOWN | SEND_SHUTDOWN.
>> >> So we will return from sk_stream_wait_memory via the do_error path.
>> >>
>> >> SEND_SHUTDOWN might be set because socket got closed and orphaned -
>> >> dead and detached from its file, like in this case.
>> >>
>> >> So, IMHO, we should check if SOCK_DEAD flag is set on wakeup due to
>> >> SEND_SHUTDOWN in sk_stream_wait_memory, before accessing the wait
>> >> queue.
>> >>
>> >> [...]
>> > As jakub's approach, this problem can be solved.
>> >
>> > diff --git a/include/net/sock.h b/include/net/sock.h
>> > index a7273b289188..a3dab7140f1e 100644
>> > --- a/include/net/sock.h
>> > +++ b/include/net/sock.h
>> > @@ -1998,6 +1998,8 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
>> >  static inline wait_queue_head_t *sk_sleep(struct sock *sk)
>> >  {
>> >         BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
>> > +       if (sock_flag(sk, SOCK_DEAD))
>> > +               return NULL;
>> >         return &rcu_dereference_raw(sk->sk_wq)->wait;
>> >  }
>> >  /* Detach socket from process context.
>> > diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
>> > index 9860bb9a847c..da1be17d0b19 100644
>> > --- a/kernel/sched/wait.c
>> > +++ b/kernel/sched/wait.c
>> > @@ -51,6 +51,8 @@ void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry
>> >  {
>> >         unsigned long flags;
>> >
>> > +       if (wq_head == NULL)
>> > +               return;
>> >         spin_lock_irqsave(&wq_head->lock, flags);
>> >         __remove_wait_queue(wq_head, wq_entry);
>> >         spin_unlock_irqrestore(&wq_head->lock, flags);
>>
>> I don't know if we want to change the contract for sk_sleep()
>> remove_wait_queue() so that they accept dead sockets or nulls.
>>
>> How about just:
>>
>> diff --git a/net/core/stream.c b/net/core/stream.c
>> index ccc083cdef23..1105057ce00a 100644
>> --- a/net/core/stream.c
>> +++ b/net/core/stream.c
>> @@ -159,7 +159,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
>>                 *timeo_p = current_timeo;
>>         }
>>  out:
>> -       remove_wait_queue(sk_sleep(sk), &wait);
>> +       if (!sock_flag(sk, SOCK_DEAD))
>> +               remove_wait_queue(sk_sleep(sk), &wait);
>>         return err;
>>
>>  do_error:
>>
>
> OK, but what about tcp_msg_wait_data() and udp_msg_wait_data () ?
> It seems they could be vulnerable as well..

TBH haven't thought about these until now.

They are reachable only via sk->sk_prot->recvmsg, so from recvmsg(2) or
recvmmsg(2). syscall layer will keep the socket file and the wait queue
alive.
diff mbox series

Patch

diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index a1626afe87a1..201375829367 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -125,9 +125,17 @@  static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
 {
 	int ret;
 
+	/* Hold on to socket wait queue. */
+	if (sk->sk_socket && sk->sk_socket->file)
+		get_file(sk->sk_socket->file);
+
 	lock_sock(sk);
 	ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
 	release_sock(sk);
+
+	if (sk->sk_socket && sk->sk_socket->file)
+		fput(sk->sk_socket->file);
+
 	return ret;
 }