diff mbox series

[RFC,net] net: fix data-races around sk->sk_forward_alloc

Message ID 20241031122344.2148586-1-wangliang74@huawei.com (mailing list archive)
State RFC
Delegated to: Netdev Maintainers
Headers show
Series [RFC,net] net: fix data-races around sk->sk_forward_alloc | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for net, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 12 this patch: 12
netdev/build_tools success Errors and warnings before: 0 (+0) this patch: 0 (+0)
netdev/cc_maintainers success CCed 6 of 6 maintainers
netdev/build_clang success Errors and warnings before: 15 this patch: 15
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn fail Errors and warnings before: 909 this patch: 910
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 80 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 2 this patch: 2
netdev/source_inline success Was 0 now: 0

Commit Message

Wang Liang Oct. 31, 2024, 12:23 p.m. UTC
Syzkaller reported this warning:
[   65.568203][    C0] ------------[ cut here ]------------
[   65.569339][    C0] WARNING: CPU: 0 PID: 16 at net/ipv4/af_inet.c:156 inet_sock_destruct+0x1c5/0x1e0
[   65.575017][    C0] Modules linked in:
[   65.575699][    C0] CPU: 0 UID: 0 PID: 16 Comm: ksoftirqd/0 Not tainted 6.12.0-rc5 #26
[   65.577086][    C0] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
[   65.577094][    C0] RIP: 0010:inet_sock_destruct+0x1c5/0x1e0
[   65.577100][    C0] Code: 24 12 4c 89 e2 5b 48 c7 c7 98 ec bb 82 41 5c e9 d1 18 17 ff 4c 89 e6 5b 48 c7 c7 d0 ec bb 82 41 5c e9 bf 18 17 ff 0f 0b eb 83 <0f> 0b eb 97 0f 0b eb 87 0f 0b e9 68 ff ff ff 66 66 2e 0f 1f 84 00
[   65.577107][    C0] RSP: 0018:ffffc9000008bd90 EFLAGS: 00010206
[   65.577113][    C0] RAX: 0000000000000300 RBX: ffff88810b172a90 RCX: 0000000000000007
[   65.577117][    C0] RDX: 0000000000000002 RSI: 0000000000000300 RDI: ffff88810b172a00
[   65.577120][    C0] RBP: ffff88810b172a00 R08: ffff888104273c00 R09: 0000000000100007
[   65.577123][    C0] R10: 0000000000020000 R11: 0000000000000006 R12: ffff88810b172a00
[   65.577125][    C0] R13: 0000000000000004 R14: 0000000000000000 R15: ffff888237c31f78
[   65.577131][    C0] FS:  0000000000000000(0000) GS:ffff888237c00000(0000) knlGS:0000000000000000
[   65.592485][    C0] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[   65.592489][    C0] CR2: 00007ffc63fecac8 CR3: 000000000342e000 CR4: 00000000000006f0
[   65.592491][    C0] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[   65.592492][    C0] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
[   65.592495][    C0] Call Trace:
[   65.596277][    C0]  <TASK>
[   65.598171][    C0]  ? __warn+0x88/0x130
[   65.598874][    C0]  ? inet_sock_destruct+0x1c5/0x1e0
[   65.598879][    C0]  ? report_bug+0x18e/0x1a0
[   65.598883][    C0]  ? handle_bug+0x53/0x90
[   65.598886][    C0]  ? exc_invalid_op+0x18/0x70
[   65.598888][    C0]  ? asm_exc_invalid_op+0x1a/0x20
[   65.598893][    C0]  ? inet_sock_destruct+0x1c5/0x1e0
[   65.598897][    C0]  __sk_destruct+0x2a/0x200
[   65.604664][    C0]  rcu_do_batch+0x1aa/0x530
[   65.605450][    C0]  ? rcu_do_batch+0x13b/0x530
[   65.605456][    C0]  rcu_core+0x159/0x2f0
[   65.605466][    C0]  handle_softirqs+0xd3/0x2b0
[   65.607689][    C0]  ? __pfx_smpboot_thread_fn+0x10/0x10
[   65.607695][    C0]  run_ksoftirqd+0x25/0x30
[   65.607699][    C0]  smpboot_thread_fn+0xdd/0x1d0
[   65.610152][    C0]  kthread+0xd3/0x100
[   65.610158][    C0]  ? __pfx_kthread+0x10/0x10
[   65.610160][    C0]  ret_from_fork+0x34/0x50
[   65.610170][    C0]  ? __pfx_kthread+0x10/0x10
[   65.610172][    C0]  ret_from_fork_asm+0x1a/0x30
[   65.610181][    C0]  </TASK>
[   65.610182][    C0] ---[ end trace 0000000000000000 ]---

Its possible that two threads call tcp_v6_do_rcv()/sk_forward_alloc_add()
concurrently when sk->sk_state == TCP_LISTEN with sk->sk_lock unlocked,
which triggers a data-race around sk->sk_forward_alloc:
tcp_v6_rcv
    tcp_v6_do_rcv
        skb_clone_and_charge_r
            sk_rmem_schedule
                __sk_mem_schedule
                    sk_forward_alloc_add()
            skb_set_owner_r
                sk_mem_charge
                    sk_forward_alloc_add()
        __kfree_skb
            skb_release_all
                skb_release_head_state
                    sock_rfree
                        sk_mem_uncharge
                            sk_forward_alloc_add()
                            sk_mem_reclaim
                                // set local var reclaimable
                                __sk_mem_reclaim
                                    sk_forward_alloc_add()

In this syzkaller testcase, two threads call tcp_v6_do_rcv() with
skb->truesize=768, the sk_forward_alloc changes like this:
 (cpu 1)             | (cpu 2)             | sk_forward_alloc
 ...                 | ...                 | 0
 __sk_mem_schedule() |                     | +4096 = 4096
                     | __sk_mem_schedule() | +4096 = 8192
 sk_mem_charge()     |                     | -768  = 7424
                     | sk_mem_charge()     | -768  = 6656
 ...                 |    ...              |
 sk_mem_uncharge()   |                     | +768  = 7424
 reclaimable=7424    |                     |
                     | sk_mem_uncharge()   | +768  = 8192
                     | reclaimable=8192    |
 __sk_mem_reclaim()  |                     | -4096 = 4096
                     | __sk_mem_reclaim()  | -8192 = -4096 != 0

Add lock around tcp_v6_do_rcv() in tcp_v6_rcv() will have some the
performance impacts, only add lock when opt_skb clone occurs. In some
scenes, tcp_v6_do_rcv() is embraced by sk->sk_lock, add
TCP_SKB_CB(skb)->sk_lock_capability to avoid re-locking.

Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets")
Signed-off-by: Wang Liang <wangliang74@huawei.com>
---
 include/net/tcp.h   |  3 ++-
 net/ipv6/tcp_ipv6.c | 21 ++++++++++++++++-----
 2 files changed, 18 insertions(+), 6 deletions(-)

Comments

Eric Dumazet Oct. 31, 2024, 2:08 p.m. UTC | #1
On Thu, Oct 31, 2024 at 1:06 PM Wang Liang <wangliang74@huawei.com> wrote:
>
> Syzkaller reported this warning:

Was this a public report ?

> [   65.568203][    C0] ------------[ cut here ]------------
> [   65.569339][    C0] WARNING: CPU: 0 PID: 16 at net/ipv4/af_inet.c:156 inet_sock_destruct+0x1c5/0x1e0
> [   65.575017][    C0] Modules linked in:
> [   65.575699][    C0] CPU: 0 UID: 0 PID: 16 Comm: ksoftirqd/0 Not tainted 6.12.0-rc5 #26
> [   65.577086][    C0] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
> [   65.577094][    C0] RIP: 0010:inet_sock_destruct+0x1c5/0x1e0
> [   65.577100][    C0] Code: 24 12 4c 89 e2 5b 48 c7 c7 98 ec bb 82 41 5c e9 d1 18 17 ff 4c 89 e6 5b 48 c7 c7 d0 ec bb 82 41 5c e9 bf 18 17 ff 0f 0b eb 83 <0f> 0b eb 97 0f 0b eb 87 0f 0b e9 68 ff ff ff 66 66 2e 0f 1f 84 00
> [   65.577107][    C0] RSP: 0018:ffffc9000008bd90 EFLAGS: 00010206
> [   65.577113][    C0] RAX: 0000000000000300 RBX: ffff88810b172a90 RCX: 0000000000000007
> [   65.577117][    C0] RDX: 0000000000000002 RSI: 0000000000000300 RDI: ffff88810b172a00
> [   65.577120][    C0] RBP: ffff88810b172a00 R08: ffff888104273c00 R09: 0000000000100007
> [   65.577123][    C0] R10: 0000000000020000 R11: 0000000000000006 R12: ffff88810b172a00
> [   65.577125][    C0] R13: 0000000000000004 R14: 0000000000000000 R15: ffff888237c31f78
> [   65.577131][    C0] FS:  0000000000000000(0000) GS:ffff888237c00000(0000) knlGS:0000000000000000
> [   65.592485][    C0] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
> [   65.592489][    C0] CR2: 00007ffc63fecac8 CR3: 000000000342e000 CR4: 00000000000006f0
> [   65.592491][    C0] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
> [   65.592492][    C0] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
> [   65.592495][    C0] Call Trace:
> [   65.596277][    C0]  <TASK>
> [   65.598171][    C0]  ? __warn+0x88/0x130
> [   65.598874][    C0]  ? inet_sock_destruct+0x1c5/0x1e0
> [   65.598879][    C0]  ? report_bug+0x18e/0x1a0
> [   65.598883][    C0]  ? handle_bug+0x53/0x90
> [   65.598886][    C0]  ? exc_invalid_op+0x18/0x70
> [   65.598888][    C0]  ? asm_exc_invalid_op+0x1a/0x20
> [   65.598893][    C0]  ? inet_sock_destruct+0x1c5/0x1e0
> [   65.598897][    C0]  __sk_destruct+0x2a/0x200
> [   65.604664][    C0]  rcu_do_batch+0x1aa/0x530
> [   65.605450][    C0]  ? rcu_do_batch+0x13b/0x530
> [   65.605456][    C0]  rcu_core+0x159/0x2f0
> [   65.605466][    C0]  handle_softirqs+0xd3/0x2b0
> [   65.607689][    C0]  ? __pfx_smpboot_thread_fn+0x10/0x10
> [   65.607695][    C0]  run_ksoftirqd+0x25/0x30
> [   65.607699][    C0]  smpboot_thread_fn+0xdd/0x1d0
> [   65.610152][    C0]  kthread+0xd3/0x100
> [   65.610158][    C0]  ? __pfx_kthread+0x10/0x10
> [   65.610160][    C0]  ret_from_fork+0x34/0x50
> [   65.610170][    C0]  ? __pfx_kthread+0x10/0x10
> [   65.610172][    C0]  ret_from_fork_asm+0x1a/0x30
> [   65.610181][    C0]  </TASK>
> [   65.610182][    C0] ---[ end trace 0000000000000000 ]---
>
> Its possible that two threads call tcp_v6_do_rcv()/sk_forward_alloc_add()
> concurrently when sk->sk_state == TCP_LISTEN with sk->sk_lock unlocked,
> which triggers a data-race around sk->sk_forward_alloc:
> tcp_v6_rcv
>     tcp_v6_do_rcv
>         skb_clone_and_charge_r
>             sk_rmem_schedule
>                 __sk_mem_schedule
>                     sk_forward_alloc_add()
>             skb_set_owner_r
>                 sk_mem_charge
>                     sk_forward_alloc_add()
>         __kfree_skb
>             skb_release_all
>                 skb_release_head_state
>                     sock_rfree
>                         sk_mem_uncharge
>                             sk_forward_alloc_add()
>                             sk_mem_reclaim
>                                 // set local var reclaimable
>                                 __sk_mem_reclaim
>                                     sk_forward_alloc_add()
>
> In this syzkaller testcase, two threads call tcp_v6_do_rcv() with
> skb->truesize=768, the sk_forward_alloc changes like this:
>  (cpu 1)             | (cpu 2)             | sk_forward_alloc
>  ...                 | ...                 | 0
>  __sk_mem_schedule() |                     | +4096 = 4096
>                      | __sk_mem_schedule() | +4096 = 8192
>  sk_mem_charge()     |                     | -768  = 7424
>                      | sk_mem_charge()     | -768  = 6656
>  ...                 |    ...              |
>  sk_mem_uncharge()   |                     | +768  = 7424
>  reclaimable=7424    |                     |
>                      | sk_mem_uncharge()   | +768  = 8192
>                      | reclaimable=8192    |
>  __sk_mem_reclaim()  |                     | -4096 = 4096
>                      | __sk_mem_reclaim()  | -8192 = -4096 != 0
>
> Add lock around tcp_v6_do_rcv() in tcp_v6_rcv() will have some the
> performance impacts, only add lock when opt_skb clone occurs. In some
> scenes, tcp_v6_do_rcv() is embraced by sk->sk_lock, add
> TCP_SKB_CB(skb)->sk_lock_capability to avoid re-locking.
>
> Fixes: e994b2f0fb92 ("tcp: do not lock listener to process SYN packets")
> Signed-off-by: Wang Liang <wangliang74@huawei.com>
> ---
>  include/net/tcp.h   |  3 ++-
>  net/ipv6/tcp_ipv6.c | 21 ++++++++++++++++-----
>  2 files changed, 18 insertions(+), 6 deletions(-)
>
> diff --git a/include/net/tcp.h b/include/net/tcp.h
> index d1948d357dad..110a23dda1eb 100644
> --- a/include/net/tcp.h
> +++ b/include/net/tcp.h
> @@ -961,7 +961,8 @@ struct tcp_skb_cb {
>         __u8            txstamp_ack:1,  /* Record TX timestamp for ack? */
>                         eor:1,          /* Is skb MSG_EOR marked? */
>                         has_rxtstamp:1, /* SKB has a RX timestamp       */
> -                       unused:5;
> +                       sk_lock_capability:1, /* Avoid re-lock flag */
> +                       unused:4;
>         __u32           ack_seq;        /* Sequence number ACK'd        */
>         union {
>                 struct {

Oh the horror, this is completely wrong and unsafe anyway.

TCP listen path MUST be lockless, and stay lockless.

Ask yourself : Why would a listener even hold a pktoptions in the first place ?

Normally, each request socket can hold an ireq->pktopts (see in
tcp_v6_init_req())

The skb_clone_and_charge_r() happen later in tcp_v6_syn_recv_sock()

The correct fix is to _not_ call skb_clone_and_charge_r() for a
listener socket, of course, this never made _any_ sense.

The following patch should fix both TCP  and DCCP, and as a bonus make
TCP SYN processing faster
for listeners requesting these IPV6_PKTOPTIONS things.

diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index da5dba120bc9a55c5fd9d6feda791b0ffc887423..d6649246188d72b3df6c74750779b7aa5910dcb7
100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -618,7 +618,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct
sk_buff *skb)
           by tcp. Feel free to propose better solution.
                                               --ANK (980728)
         */
-       if (np->rxopt.all)
+       if (np->rxopt.all && sk->sk_state != DCCP_LISTEN)
                opt_skb = skb_clone_and_charge_r(skb, sk);

        if (sk->sk_state == DCCP_OPEN) { /* Fast path */
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d71ab4e1efe1c6598cf3d3e4334adf0881064ce9..e643dbaec9ccc92eb2d9103baf185c957ad1dd2e
100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1605,25 +1605,12 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
         *      is currently called with bh processing disabled.
         */

-       /* Do Stevens' IPV6_PKTOPTIONS.
-
-          Yes, guys, it is the only place in our code, where we
-          may make it not affecting IPv4.
-          The rest of code is protocol independent,
-          and I do not like idea to uglify IPv4.
-
-          Actually, all the idea behind IPV6_PKTOPTIONS
-          looks not very well thought. For now we latch
-          options, received in the last packet, enqueued
-          by tcp. Feel free to propose better solution.
-                                              --ANK (980728)
-        */
-       if (np->rxopt.all)
-               opt_skb = skb_clone_and_charge_r(skb, sk);

        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
                struct dst_entry *dst;

+               if (np->rxopt.all)
+                       opt_skb = skb_clone_and_charge_r(skb, sk);
                dst = rcu_dereference_protected(sk->sk_rx_dst,
                                                lockdep_sock_is_held(sk));

@@ -1656,13 +1643,13 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
                                if (reason)
                                        goto reset;
                        }
-                       if (opt_skb)
-                               __kfree_skb(opt_skb);
                        return 0;
                }
        } else
                sock_rps_save_rxhash(sk, skb);

+       if (np->rxopt.all)
+               opt_skb = skb_clone_and_charge_r(skb, sk);
        reason = tcp_rcv_state_process(sk, skb);
        if (reason)
                goto reset;
diff mbox series

Patch

diff --git a/include/net/tcp.h b/include/net/tcp.h
index d1948d357dad..110a23dda1eb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -961,7 +961,8 @@  struct tcp_skb_cb {
 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
 			eor:1,		/* Is skb MSG_EOR marked? */
 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
-			unused:5;
+			sk_lock_capability:1, /* Avoid re-lock flag */
+			unused:4;
 	__u32		ack_seq;	/* Sequence number ACK'd	*/
 	union {
 		struct {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d71ab4e1efe1..a1166035fbce 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1588,6 +1588,7 @@  int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 	struct sk_buff *opt_skb = NULL;
 	enum skb_drop_reason reason;
 	struct tcp_sock *tp;
+	bool sk_lock_flag = false;
 
 	/* Imagine: socket is IPv6. IPv4 packet arrives,
 	   goes to IPv4 receive handler and backlogged.
@@ -1618,8 +1619,13 @@  int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 	   by tcp. Feel free to propose better solution.
 					       --ANK (980728)
 	 */
-	if (np->rxopt.all)
+	if (np->rxopt.all) {
+		if (TCP_SKB_CB(skb)->sk_lock_capability) {
+			sk_lock_flag = true;
+			bh_lock_sock_nested(sk);
+		}
 		opt_skb = skb_clone_and_charge_r(skb, sk);
+	}
 
 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
 		struct dst_entry *dst;
@@ -1641,7 +1647,7 @@  int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 		tcp_rcv_established(sk, skb);
 		if (opt_skb)
 			goto ipv6_pktoptions;
-		return 0;
+		goto unlock;
 	}
 
 	if (tcp_checksum_complete(skb))
@@ -1658,7 +1664,7 @@  int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 			}
 			if (opt_skb)
 				__kfree_skb(opt_skb);
-			return 0;
+			goto unlock;
 		}
 	} else
 		sock_rps_save_rxhash(sk, skb);
@@ -1668,7 +1674,7 @@  int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 		goto reset;
 	if (opt_skb)
 		goto ipv6_pktoptions;
-	return 0;
+	goto unlock;
 
 reset:
 	tcp_v6_send_reset(sk, skb, sk_rst_convert_drop_reason(reason));
@@ -1676,7 +1682,7 @@  int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 	if (opt_skb)
 		__kfree_skb(opt_skb);
 	sk_skb_reason_drop(sk, skb, reason);
-	return 0;
+	goto unlock;
 csum_err:
 	reason = SKB_DROP_REASON_TCP_CSUM;
 	trace_tcp_bad_csum(skb);
@@ -1715,6 +1721,9 @@  int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
 	}
 
 	consume_skb(opt_skb);
+unlock:
+	if (sk_lock_flag)
+		bh_unlock_sock(sk);
 	return 0;
 }
 
@@ -1900,7 +1909,9 @@  INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
 	skb->dev = NULL;
 
 	if (sk->sk_state == TCP_LISTEN) {
+		TCP_SKB_CB(skb)->sk_lock_capability = true;
 		ret = tcp_v6_do_rcv(sk, skb);
+		TCP_SKB_CB(skb)->sk_lock_capability = false;
 		goto put_and_return;
 	}