diff mbox series

[v2,mptcp-next] mptcp: use fast lock for subflows when possible.

Message ID ef0582a975599149d8a127d6f6914760bb64f136.1622131853.git.pabeni@redhat.com (mailing list archive)
State Accepted, archived
Commit 60aec9b8f6456ff25a5b3a12f1d8c764e570c1c1
Delegated to: Matthieu Baerts
Headers show
Series [v2,mptcp-next] mptcp: use fast lock for subflows when possible. | expand

Commit Message

Paolo Abeni May 27, 2021, 4:11 p.m. UTC
There are a bunch of callsite where the ssk socket
lock is acquired using the full-blown version eligible for
the fast variant. Let's move to the latter.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
v1 -> v2:
 - add more chunks in protocol.c
---
 net/mptcp/pm_netlink.c | 10 ++++++----
 net/mptcp/protocol.c   | 15 +++++++++------
 2 files changed, 15 insertions(+), 10 deletions(-)

Comments

Mat Martineau May 28, 2021, 12:05 a.m. UTC | #1
On Thu, 27 May 2021, Paolo Abeni wrote:

> There are a bunch of callsite where the ssk socket
> lock is acquired using the full-blown version eligible for
> the fast variant. Let's move to the latter.
>
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> ---
> v1 -> v2:
> - add more chunks in protocol.c

Looks good, thanks Paolo.

Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>



> ---
> net/mptcp/pm_netlink.c | 10 ++++++----
> net/mptcp/protocol.c   | 15 +++++++++------
> 2 files changed, 15 insertions(+), 10 deletions(-)
>
> diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
> index 09722598994d..d4732a4f223e 100644
> --- a/net/mptcp/pm_netlink.c
> +++ b/net/mptcp/pm_netlink.c
> @@ -540,6 +540,7 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
> 	subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
> 	if (subflow) {
> 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
> +		bool slow;
>
> 		spin_unlock_bh(&msk->pm.lock);
> 		pr_debug("send ack for %s%s%s",
> @@ -547,9 +548,9 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
> 			 mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
> 			 mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
>
> -		lock_sock(ssk);
> +		slow = lock_sock_fast(ssk);
> 		tcp_send_ack(ssk);
> -		release_sock(ssk);
> +		unlock_sock_fast(ssk, slow);
> 		spin_lock_bh(&msk->pm.lock);
> 	}
> }
> @@ -566,6 +567,7 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
> 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
> 		struct sock *sk = (struct sock *)msk;
> 		struct mptcp_addr_info local;
> +		bool slow;
>
> 		local_address((struct sock_common *)ssk, &local);
> 		if (!addresses_equal(&local, addr, addr->port))
> @@ -578,9 +580,9 @@ int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
>
> 		spin_unlock_bh(&msk->pm.lock);
> 		pr_debug("send ack for mp_prio");
> -		lock_sock(ssk);
> +		slow = lock_sock_fast(ssk);
> 		tcp_send_ack(ssk);
> -		release_sock(ssk);
> +		unlock_sock_fast(ssk, slow);
> 		spin_lock_bh(&msk->pm.lock);
>
> 		return 0;
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index 785e74c13b3c..786f09d83d35 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -433,23 +433,25 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
>
> 	mptcp_for_each_subflow(msk, subflow) {
> 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
> +		bool slow;
>
> -		lock_sock(ssk);
> +		slow = lock_sock_fast(ssk);
> 		if (tcp_can_send_ack(ssk))
> 			tcp_send_ack(ssk);
> -		release_sock(ssk);
> +		unlock_sock_fast(ssk, slow);
> 	}
> }
>
> static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk)
> {
> +	bool slow;
> 	int ret;
>
> -	lock_sock(ssk);
> +	slow = lock_sock_fast(ssk);
> 	ret = tcp_can_send_ack(ssk);
> 	if (ret)
> 		tcp_cleanup_rbuf(ssk, 1);
> -	release_sock(ssk);
> +	unlock_sock_fast(ssk, slow);
> 	return ret;
> }
>
> @@ -2252,13 +2254,14 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
>
> 	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
> 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
> +		bool slow;
>
> -		lock_sock(tcp_sk);
> +		slow = lock_sock_fast(tcp_sk);
> 		if (tcp_sk->sk_state != TCP_CLOSE) {
> 			tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
> 			tcp_set_state(tcp_sk, TCP_CLOSE);
> 		}
> -		release_sock(tcp_sk);
> +		unlock_sock_fast(tcp_sk, slow);
> 	}
>
> 	inet_sk_state_store(sk, TCP_CLOSE);
> -- 
> 2.26.3
>
>
>

--
Mat Martineau
Intel
Matthieu Baerts June 7, 2021, 3:07 p.m. UTC | #2
Hi Paolo, Mat,

On 27/05/2021 18:11, Paolo Abeni wrote:
> There are a bunch of callsite where the ssk socket
> lock is acquired using the full-blown version eligible for
> the fast variant. Let's move to the latter.
> 
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>

Thank you for the patch and the review!

Now in our tree (without the dot at the end of the commit title ;-) )

- 60aec9b8f645: mptcp: use fast lock for subflows when possible
- Results: b01ac56d4eb3..e6629296aed2

Builds and tests are now in progress:

https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export/20210607T150702
https://github.com/multipath-tcp/mptcp_net-next/actions/workflows/build-validation.yml?query=branch:export/20210607T150702

Cheers,
Matt
diff mbox series

Patch

diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 09722598994d..d4732a4f223e 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -540,6 +540,7 @@  void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
 	subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
 	if (subflow) {
 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+		bool slow;
 
 		spin_unlock_bh(&msk->pm.lock);
 		pr_debug("send ack for %s%s%s",
@@ -547,9 +548,9 @@  void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
 			 mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
 			 mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
 
-		lock_sock(ssk);
+		slow = lock_sock_fast(ssk);
 		tcp_send_ack(ssk);
-		release_sock(ssk);
+		unlock_sock_fast(ssk, slow);
 		spin_lock_bh(&msk->pm.lock);
 	}
 }
@@ -566,6 +567,7 @@  int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
 		struct sock *sk = (struct sock *)msk;
 		struct mptcp_addr_info local;
+		bool slow;
 
 		local_address((struct sock_common *)ssk, &local);
 		if (!addresses_equal(&local, addr, addr->port))
@@ -578,9 +580,9 @@  int mptcp_pm_nl_mp_prio_send_ack(struct mptcp_sock *msk,
 
 		spin_unlock_bh(&msk->pm.lock);
 		pr_debug("send ack for mp_prio");
-		lock_sock(ssk);
+		slow = lock_sock_fast(ssk);
 		tcp_send_ack(ssk);
-		release_sock(ssk);
+		unlock_sock_fast(ssk, slow);
 		spin_lock_bh(&msk->pm.lock);
 
 		return 0;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 785e74c13b3c..786f09d83d35 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -433,23 +433,25 @@  static void mptcp_send_ack(struct mptcp_sock *msk)
 
 	mptcp_for_each_subflow(msk, subflow) {
 		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+		bool slow;
 
-		lock_sock(ssk);
+		slow = lock_sock_fast(ssk);
 		if (tcp_can_send_ack(ssk))
 			tcp_send_ack(ssk);
-		release_sock(ssk);
+		unlock_sock_fast(ssk, slow);
 	}
 }
 
 static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk)
 {
+	bool slow;
 	int ret;
 
-	lock_sock(ssk);
+	slow = lock_sock_fast(ssk);
 	ret = tcp_can_send_ack(ssk);
 	if (ret)
 		tcp_cleanup_rbuf(ssk, 1);
-	release_sock(ssk);
+	unlock_sock_fast(ssk, slow);
 	return ret;
 }
 
@@ -2252,13 +2254,14 @@  static void mptcp_check_fastclose(struct mptcp_sock *msk)
 
 	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
 		struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
+		bool slow;
 
-		lock_sock(tcp_sk);
+		slow = lock_sock_fast(tcp_sk);
 		if (tcp_sk->sk_state != TCP_CLOSE) {
 			tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
 			tcp_set_state(tcp_sk, TCP_CLOSE);
 		}
-		release_sock(tcp_sk);
+		unlock_sock_fast(tcp_sk, slow);
 	}
 
 	inet_sk_state_store(sk, TCP_CLOSE);