@@ -1231,17 +1231,24 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
static void sk_psock_verdict_data_ready(struct sock *sk)
{
- struct socket *sock = sk->sk_socket;
+ struct socket *sock;
const struct proto_ops *ops;
int copied;
trace_sk_data_ready(sk);
- if (unlikely(!sock))
+ rcu_read_lock();
+ sock = sk->sk_socket;
+ if (unlikely(!sock)) {
+ rcu_read_unlock();
return;
+ }
ops = READ_ONCE(sock->ops);
- if (!ops || !ops->read_skb)
+ if (!ops || !ops->read_skb) {
+ rcu_read_unlock();
return;
+ }
+ rcu_read_unlock();
copied = ops->read_skb(sk, sk_psock_verdict_recv);
if (copied >= 0) {
struct sk_psock *psock;
There are potential concurrency issues, as shown below. ''' CPU0 CPU1 sk_psock_verdict_data_ready: socket *sock = sk->sk_socket if (!sock) return close(fd): ... ops->release() if (!sock->ops) return sock->ops = NULL rcu_call(sock) free(sock) READ_ONCE(sock->ops) ^ use 'sock' after free ''' RCU is not applicable to Unix sockets read path, because the Unix socket implementation itself assumes it's always in process context and heavily uses mutex_lock, so, we can't call read_skb within rcu lock. Incrementing the psock reference count would not help either, since sock_map_close() does not wait for data_ready() to complete its execution. While we don't utilize sk_socket here, implementing read_skb at the sock layer instead of socket layer might be architecturally preferable ? However, deferring this optimization as current fix adequately addresses the immediate issue. Fixes: c63829182c37 ("af_unix: Implement ->psock_update_sk_prot()") Reported-by: syzbot+dd90a702f518e0eac072@syzkaller.appspotmail.com Closes: https://lore.kernel.org/bpf/6734c033.050a0220.2a2fcc.0015.GAE@google.com/ Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev> --- net/core/skmsg.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-)