diff mbox series

[net] Revert "tcp: avoid atomic operations on sk->sk_rmem_alloc"

Message ID 20250331075946.31960-1-edumazet@google.com (mailing list archive)
State Accepted
Commit f278b6d5bb465c7fd66f3d103812947e55b376ed
Delegated to: Netdev Maintainers
Headers show
Series [net] Revert "tcp: avoid atomic operations on sk->sk_rmem_alloc" | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for net, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag present in non-next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 518 this patch: 518
netdev/build_tools success Errors and warnings before: 26 (+1) this patch: 26 (+1)
netdev/cc_maintainers warning 1 maintainers not CCed: dsahern@kernel.org
netdev/build_clang success Errors and warnings before: 966 this patch: 966
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 15128 this patch: 911
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 84 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 3 this patch: 3
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2025-04-01--00-00 (tests: 902)

Commit Message

Eric Dumazet March 31, 2025, 7:59 a.m. UTC
This reverts commit 0de2a5c4b824da2205658ebebb99a55c43cdf60f.

I forgot that a TCP socket could receive messages in its error queue.

sock_queue_err_skb() can be called without socket lock being held,
and changes sk->sk_rmem_alloc.

The fact that skbs in error queue are limited by sk->sk_rcvbuf
means that error messages can be dropped if socket receive
queues are full, which is an orthogonal issue.

In future kernels, we could use a separate sk->sk_error_mem_alloc
counter specifically for the error queue.

Fixes: 0de2a5c4b824 ("tcp: avoid atomic operations on sk->sk_rmem_alloc")
Signed-off-by: Eric Dumazet <edumazet@google.com>
---
 include/net/tcp.h       | 15 ---------------
 net/ipv4/tcp.c          | 18 ++----------------
 net/ipv4/tcp_fastopen.c |  2 +-
 net/ipv4/tcp_input.c    |  6 +++---
 4 files changed, 6 insertions(+), 35 deletions(-)

Comments

patchwork-bot+netdevbpf@kernel.org April 1, 2025, 12:10 a.m. UTC | #1
Hello:

This patch was applied to netdev/net.git (main)
by Jakub Kicinski <kuba@kernel.org>:

On Mon, 31 Mar 2025 07:59:46 +0000 you wrote:
> This reverts commit 0de2a5c4b824da2205658ebebb99a55c43cdf60f.
> 
> I forgot that a TCP socket could receive messages in its error queue.
> 
> sock_queue_err_skb() can be called without socket lock being held,
> and changes sk->sk_rmem_alloc.
> 
> [...]

Here is the summary with links:
  - [net] Revert "tcp: avoid atomic operations on sk->sk_rmem_alloc"
    https://git.kernel.org/netdev/net/c/f278b6d5bb46

You are awesome, thank you!
diff mbox series

Patch

diff --git a/include/net/tcp.h b/include/net/tcp.h
index df04dc09c519d850579e22a17f49eeec7d22c607..4450c384ef178e860bd76c23653e9ce9d7a7289b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -779,7 +779,6 @@  static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 
 /* tcp.c */
 void tcp_get_info(struct sock *, struct tcp_info *);
-void tcp_sock_rfree(struct sk_buff *skb);
 
 /* Read 'sendfile()'-style from a TCP socket */
 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
@@ -2899,18 +2898,4 @@  enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
 		const void *saddr, const void *daddr,
 		int family, int dif, int sdif);
 
-/* version of skb_set_owner_r() avoiding one atomic_add() */
-static inline void tcp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
-{
-	skb_orphan(skb);
-	skb->sk = sk;
-	skb->destructor = tcp_sock_rfree;
-
-	sock_owned_by_me(sk);
-	atomic_set(&sk->sk_rmem_alloc,
-		   atomic_read(&sk->sk_rmem_alloc) + skb->truesize);
-
-	sk_forward_alloc_add(sk, -skb->truesize);
-}
-
 #endif	/* _TCP_H */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ea8de00f669d059d97766529e3b8c53d5040456d..6edc441b37023de48281aa810aa7a36199fd8bc3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1525,25 +1525,11 @@  void tcp_cleanup_rbuf(struct sock *sk, int copied)
 	__tcp_cleanup_rbuf(sk, copied);
 }
 
-/* private version of sock_rfree() avoiding one atomic_sub() */
-void tcp_sock_rfree(struct sk_buff *skb)
-{
-	struct sock *sk = skb->sk;
-	unsigned int len = skb->truesize;
-
-	sock_owned_by_me(sk);
-	atomic_set(&sk->sk_rmem_alloc,
-		   atomic_read(&sk->sk_rmem_alloc) - len);
-
-	sk_forward_alloc_add(sk, len);
-	sk_mem_reclaim(sk);
-}
-
 static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
 {
 	__skb_unlink(skb, &sk->sk_receive_queue);
-	if (likely(skb->destructor == tcp_sock_rfree)) {
-		tcp_sock_rfree(skb);
+	if (likely(skb->destructor == sock_rfree)) {
+		sock_rfree(skb);
 		skb->destructor = NULL;
 		skb->sk = NULL;
 		return skb_attempt_defer_free(skb);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index ca40665145c692ce0de518886bb366406606f7ac..1a6b1bc5424514e27a99cbb2fcedf001afd51d98 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -189,7 +189,7 @@  void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
 	tcp_segs_in(tp, skb);
 	__skb_pull(skb, tcp_hdrlen(skb));
 	sk_forced_mem_schedule(sk, skb->truesize);
-	tcp_skb_set_owner_r(skb, sk);
+	skb_set_owner_r(skb, sk);
 
 	TCP_SKB_CB(skb)->seq++;
 	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e1f952fbac48dfdc4f4f75a50a85b4904b93bbe2..a35018e2d0ba27b14d0b59d3728f7181b1a51161 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5171,7 +5171,7 @@  static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 		if (tcp_is_sack(tp))
 			tcp_grow_window(sk, skb, false);
 		skb_condense(skb);
-		tcp_skb_set_owner_r(skb, sk);
+		skb_set_owner_r(skb, sk);
 	}
 }
 
@@ -5187,7 +5187,7 @@  static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
 	tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
 	if (!eaten) {
 		tcp_add_receive_queue(sk, skb);
-		tcp_skb_set_owner_r(skb, sk);
+		skb_set_owner_r(skb, sk);
 	}
 	return eaten;
 }
@@ -5504,7 +5504,7 @@  tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
 			__skb_queue_before(list, skb, nskb);
 		else
 			__skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
-		tcp_skb_set_owner_r(nskb, sk);
+		skb_set_owner_r(nskb, sk);
 		mptcp_skb_ext_move(nskb, skb);
 
 		/* Copy data, releasing collapsed skbs. */