@@ -931,7 +931,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
if (TCP_SKB_CB(skb)->seq == subflow->ssn_offset + 1 &&
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
subflow->mp_join && (mp_opt->suboptions & OPTIONS_MPTCP_MPJ) &&
- READ_ONCE(msk->pm.server_side))
+ !subflow->request_join)
tcp_send_ack(ssk);
goto fully_established;
}
@@ -3327,15 +3327,12 @@ bool mptcp_finish_join(struct sock *ssk)
return false;
}
- if (!msk->pm.server_side)
+ if (!list_empty(&subflow->node))
goto out;
if (!mptcp_pm_allow_new_subflow(msk))
goto err_prohibited;
- if (WARN_ON_ONCE(!list_empty(&subflow->node)))
- goto err_prohibited;
-
/* active connections are already on conn_list.
* If we can't acquire msk socket lock here, let the release callback
* handle it
@@ -911,13 +911,17 @@ static inline bool mptcp_check_infinite_map(struct sk_buff *skb)
return false;
}
+static inline bool is_active_ssk(struct mptcp_subflow_context *subflow)
+{
+ return (subflow->request_mptcp || subflow->request_join);
+}
+
static inline bool subflow_simultaneous_connect(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- struct sock *parent = subflow->conn;
return sk->sk_state == TCP_ESTABLISHED &&
- !mptcp_sk(parent)->pm.server_side &&
+ is_active_ssk(subflow) &&
!subflow->conn_finished;
}
This change updates internal logic to permit subflows to be established from either the client or server ends of MPTCP connections. This symmetry and added flexibility may be harnessed by PM implementations running on either end in creating new subflows. The essence of this change lies in not relying on the "server_side" flag (which continues to be available if needed). Signed-off-by: Kishen Maloor <kishen.maloor@intel.com> --- net/mptcp/options.c | 2 +- net/mptcp/protocol.c | 5 +---- net/mptcp/protocol.h | 8 ++++++-- 3 files changed, 8 insertions(+), 7 deletions(-)