diff mbox series

[net-next] tcp: reorder tcp_congestion_ops for better cache locality

Message ID 20210402181037.19736-1-eric.dumazet@gmail.com (mailing list archive)
State Accepted
Commit 82506665179209e43d3c9d39ffa42f8c8ff968bd
Delegated to: Netdev Maintainers
Headers show
Series [net-next] tcp: reorder tcp_congestion_ops for better cache locality | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for net-next
netdev/subject_prefix success Link
netdev/cc_maintainers success CCed 4 of 4 maintainers
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 1373 this patch: 1373
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning CHECK: Please don't use multiple blank lines WARNING: please, no space before tabs
netdev/build_allmodconfig_warn success Errors and warnings before: 1375 this patch: 1375
netdev/header_inline success Link

Commit Message

Eric Dumazet April 2, 2021, 6:10 p.m. UTC
From: Eric Dumazet <edumazet@google.com>

Group all the often used fields in the first cache line,
to reduce cache line misses.

Signed-off-by: Eric Dumazet <edumazet@google.com>
---
 include/net/tcp.h | 42 +++++++++++++++++++++++++++---------------
 1 file changed, 27 insertions(+), 15 deletions(-)

Comments

Stephen Hemminger April 2, 2021, 8:21 p.m. UTC | #1
On Fri,  2 Apr 2021 11:10:37 -0700
Eric Dumazet <eric.dumazet@gmail.com> wrote:

> From: Eric Dumazet <edumazet@google.com>
> 
> Group all the often used fields in the first cache line,
> to reduce cache line misses.
> 
> Signed-off-by: Eric Dumazet <edumazet@google.com>

Makes sense.
Acked-by: Stephen Hemminger <stephen@networkplumber.org>
patchwork-bot+netdevbpf@kernel.org April 2, 2021, 9:50 p.m. UTC | #2
Hello:

This patch was applied to netdev/net-next.git (refs/heads/master):

On Fri,  2 Apr 2021 11:10:37 -0700 you wrote:
> From: Eric Dumazet <edumazet@google.com>
> 
> Group all the often used fields in the first cache line,
> to reduce cache line misses.
> 
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> 
> [...]

Here is the summary with links:
  - [net-next] tcp: reorder tcp_congestion_ops for better cache locality
    https://git.kernel.org/netdev/net-next/c/825066651792

You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
diff mbox series

Patch

diff --git a/include/net/tcp.h b/include/net/tcp.h
index 075de26f449d27093ec6eeb114d7f53c328b2136..b34b2daf98d90930a7456f147d04e5b936185cbc 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1035,44 +1035,56 @@  struct rate_sample {
 };
 
 struct tcp_congestion_ops {
-	struct list_head	list;
-	u32 key;
-	u32 flags;
-
-	/* initialize private data (optional) */
-	void (*init)(struct sock *sk);
-	/* cleanup private data  (optional) */
-	void (*release)(struct sock *sk);
+/* fast path fields are put first to fill one cache line */
 
 	/* return slow start threshold (required) */
 	u32 (*ssthresh)(struct sock *sk);
+
 	/* do new cwnd calculation (required) */
 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
+
 	/* call before changing ca_state (optional) */
 	void (*set_state)(struct sock *sk, u8 new_state);
+
 	/* call when cwnd event occurs (optional) */
 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
+
 	/* call when ack arrives (optional) */
 	void (*in_ack_event)(struct sock *sk, u32 flags);
-	/* new value of cwnd after loss (required) */
-	u32  (*undo_cwnd)(struct sock *sk);
+
 	/* hook for packet ack accounting (optional) */
 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+
 	/* override sysctl_tcp_min_tso_segs */
 	u32 (*min_tso_segs)(struct sock *sk);
-	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
-	u32 (*sndbuf_expand)(struct sock *sk);
+
 	/* call when packets are delivered to update cwnd and pacing rate,
 	 * after all the ca_state processing. (optional)
 	 */
 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
+
+
+	/* new value of cwnd after loss (required) */
+	u32  (*undo_cwnd)(struct sock *sk);
+	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
+	u32 (*sndbuf_expand)(struct sock *sk);
+
+/* control/slow paths put last */
 	/* get info for inet_diag (optional) */
 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
 			   union tcp_cc_info *info);
 
-	char 		name[TCP_CA_NAME_MAX];
-	struct module 	*owner;
-};
+	char 			name[TCP_CA_NAME_MAX];
+	struct module		*owner;
+	struct list_head	list;
+	u32			key;
+	u32			flags;
+
+	/* initialize private data (optional) */
+	void (*init)(struct sock *sk);
+	/* cleanup private data  (optional) */
+	void (*release)(struct sock *sk);
+} ____cacheline_aligned_in_smp;
 
 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);