Message ID | 20210719101107.3203943-1-eric.dumazet@gmail.com (mailing list archive) |
---|---|
State | Accepted |
Commit | e93abb840a2c356ed2809c31fcedb058601ac2e4 |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | [net-next] net/tcp_fastopen: remove tcp_fastopen_ctx_lock | expand |
Context | Check | Description |
---|---|---|
netdev/cover_letter | success | Link |
netdev/fixes_present | success | Link |
netdev/patch_count | success | Link |
netdev/tree_selection | success | Clearly marked for net-next |
netdev/subject_prefix | success | Link |
netdev/cc_maintainers | warning | 7 maintainers not CCed: dsahern@kernel.org yoshfuji@linux-ipv6.org idosch@OSS.NVIDIA.COM andreas.a.roeseler@gmail.com kuniyu@amazon.co.jp fw@strlen.de benh@amazon.com |
netdev/source_inline | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Link |
netdev/module_param | success | Was 0 now: 0 |
netdev/build_32bit | success | Errors and warnings before: 5122 this patch: 5122 |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/verify_fixes | success | Link |
netdev/checkpatch | warning | WARNING: line length of 102 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns |
netdev/build_allmodconfig_warn | success | Errors and warnings before: 5183 this patch: 5183 |
netdev/header_inline | success | Link |
On Mon, Jul 19, 2021 at 3:11 AM Eric Dumazet <eric.dumazet@gmail.com> wrote: > > From: Eric Dumazet <edumazet@google.com> > > Remove the (per netns) spinlock in favor of xchg() atomic operations. > > Signed-off-by: Eric Dumazet <edumazet@google.com> > Cc: Wei Wang <weiwan@google.com> > Cc: Yuchung Cheng <ycheng@google.com> > Cc: Neal Cardwell <ncardwell@google.com> > --- Acked-by: Wei Wang <weiwan@google.com> > include/net/netns/ipv4.h | 1 - > net/ipv4/tcp_fastopen.c | 17 +++-------------- > net/ipv4/tcp_ipv4.c | 1 - > 3 files changed, 3 insertions(+), 16 deletions(-) > > diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h > index b8620519eace8191c76c41f37bd51ac0d3788bc2..2f65701a43c953bd3a9a9e3d491882cb7bb11859 100644 > --- a/include/net/netns/ipv4.h > +++ b/include/net/netns/ipv4.h > @@ -174,7 +174,6 @@ struct netns_ipv4 { > int sysctl_tcp_fastopen; > const struct tcp_congestion_ops __rcu *tcp_congestion_control; > struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; > - spinlock_t tcp_fastopen_ctx_lock; > unsigned int sysctl_tcp_fastopen_blackhole_timeout; > atomic_t tfo_active_disable_times; > unsigned long tfo_active_disable_stamp; > diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c > index 47c32604d38fca960d2cd56f3588bfd2e390b789..1a9fbd5448a719bb5407a8d1e8fbfbe54f56f258 100644 > --- a/net/ipv4/tcp_fastopen.c > +++ b/net/ipv4/tcp_fastopen.c > @@ -55,12 +55,7 @@ void tcp_fastopen_ctx_destroy(struct net *net) > { > struct tcp_fastopen_context *ctxt; > > - spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); > - > - ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, > - lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); > - rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL); > - spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); > + ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL); > > if (ctxt) > call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); > @@ -89,18 +84,12 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, > ctx->num = 1; > } > > - spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); > if (sk) { > q = &inet_csk(sk)->icsk_accept_queue.fastopenq; > - octx = rcu_dereference_protected(q->ctx, > - lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); > - rcu_assign_pointer(q->ctx, ctx); > + octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx); > } else { > - octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, > - lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); > - rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx); > + octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx); > } > - spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); > > if (octx) > call_rcu(&octx->rcu, tcp_fastopen_ctx_free); > diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c > index b9dc2d6197be8b8b03a4d052ad1c87987c7a62aa..e9321dd39cdbcb664843d4ada09a21685b93abb7 100644 > --- a/net/ipv4/tcp_ipv4.c > +++ b/net/ipv4/tcp_ipv4.c > @@ -2964,7 +2964,6 @@ static int __net_init tcp_sk_init(struct net *net) > net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC; > net->ipv4.sysctl_tcp_comp_sack_nr = 44; > net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE; > - spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock); > net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60; > atomic_set(&net->ipv4.tfo_active_disable_times, 0); > > -- > 2.32.0.402.g57bb445576-goog >
Hello: This patch was applied to netdev/net-next.git (refs/heads/master): On Mon, 19 Jul 2021 03:11:07 -0700 you wrote: > From: Eric Dumazet <edumazet@google.com> > > Remove the (per netns) spinlock in favor of xchg() atomic operations. > > Signed-off-by: Eric Dumazet <edumazet@google.com> > Cc: Wei Wang <weiwan@google.com> > Cc: Yuchung Cheng <ycheng@google.com> > Cc: Neal Cardwell <ncardwell@google.com> > > [...] Here is the summary with links: - [net-next] net/tcp_fastopen: remove tcp_fastopen_ctx_lock https://git.kernel.org/netdev/net-next/c/e93abb840a2c You are awesome, thank you! -- Deet-doot-dot, I am a bot. https://korg.docs.kernel.org/patchwork/pwbot.html
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index b8620519eace8191c76c41f37bd51ac0d3788bc2..2f65701a43c953bd3a9a9e3d491882cb7bb11859 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -174,7 +174,6 @@ struct netns_ipv4 { int sysctl_tcp_fastopen; const struct tcp_congestion_ops __rcu *tcp_congestion_control; struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; - spinlock_t tcp_fastopen_ctx_lock; unsigned int sysctl_tcp_fastopen_blackhole_timeout; atomic_t tfo_active_disable_times; unsigned long tfo_active_disable_stamp; diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 47c32604d38fca960d2cd56f3588bfd2e390b789..1a9fbd5448a719bb5407a8d1e8fbfbe54f56f258 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -55,12 +55,7 @@ void tcp_fastopen_ctx_destroy(struct net *net) { struct tcp_fastopen_context *ctxt; - spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); - - ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, - lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); - rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL); - spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); + ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL); if (ctxt) call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); @@ -89,18 +84,12 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, ctx->num = 1; } - spin_lock(&net->ipv4.tcp_fastopen_ctx_lock); if (sk) { q = &inet_csk(sk)->icsk_accept_queue.fastopenq; - octx = rcu_dereference_protected(q->ctx, - lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); - rcu_assign_pointer(q->ctx, ctx); + octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx); } else { - octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx, - lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock)); - rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx); + octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx); } - spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock); if (octx) call_rcu(&octx->rcu, tcp_fastopen_ctx_free); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b9dc2d6197be8b8b03a4d052ad1c87987c7a62aa..e9321dd39cdbcb664843d4ada09a21685b93abb7 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2964,7 +2964,6 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC; net->ipv4.sysctl_tcp_comp_sack_nr = 44; net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE; - spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock); net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60; atomic_set(&net->ipv4.tfo_active_disable_times, 0);