@@ -4,6 +4,61 @@
#include "protocol.h"
+struct mptcp_skb_cb {
+ u64 map_seq;
+ u64 end_seq;
+ u32 offset;
+ u8 has_rxtstamp:1;
+};
+
+#define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
+
+void subflow_fastopen_send_synack_set_params(struct mptcp_subflow_context *subflow,
+ struct request_sock *treq)
+{
+ struct tcp_request_sock *tcp_r_sock = tcp_rsk(treq);
+ struct sock *ssk = subflow->tcp_sock;
+ struct sock *sk = subflow->conn;
+ struct mptcp_sock *msk;
+ struct sk_buff *skb;
+ struct tcp_sock *tp;
+ u32 offset;
+
+ msk = mptcp_sk(sk);
+ tp = tcp_sk(ssk);
+
+ /* mark subflow/msk as "mptfo" */
+ msk->is_mptfo = 1;
+
+ skb = skb_peek(&ssk->sk_receive_queue);
+
+ /* dequeue the skb from sk receive queue */
+ __skb_unlink(skb, &ssk->sk_receive_queue);
+ skb_ext_reset(skb);
+ skb_orphan(skb);
+
+ /* set the skb mapping */
+ tp->copied_seq += tp->rcv_nxt - tcp_r_sock->rcv_isn - 1;
+ subflow->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
+ subflow->ssn_offset = tp->copied_seq - 1;
+
+ /* innitialize MPTCP_CB */
+ offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
+ MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
+ MPTCP_SKB_CB(skb)->end_seq = msk->ack_seq;
+ MPTCP_SKB_CB(skb)->offset = offset;
+ MPTCP_SKB_CB(skb)->has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
+
+ mptcp_data_lock(sk);
+
+ mptcp_set_owner_r(skb, sk);
+ __skb_queue_tail(&msk->receive_queue, skb);
+
+ (sk)->sk_data_ready(sk);
+
+ mptcp_data_unlock(sk);
+}
+
void __mptcp_pre_connect(struct mptcp_sock *msk, struct sock *ssk,
struct msghdr *msg, size_t size)
{
@@ -200,7 +200,7 @@ static void mptcp_rfree(struct sk_buff *skb)
mptcp_rmem_uncharge(sk, len);
}
-static void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
+void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb_orphan(skb);
skb->sk = sk;
@@ -846,6 +846,9 @@ void __mptcp_pre_connect(struct mptcp_sock *msk, struct sock *ssk,
struct msghdr *msg, size_t len);
void mptcp_gen_msk_ackseq_fastopen(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
struct mptcp_options_received mp_opt);
+void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk);
+void subflow_fastopen_send_synack_set_params(struct mptcp_subflow_context *subflow,
+ struct request_sock *req);
// Fast Open Mechanism functions end
static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
@@ -307,6 +307,46 @@ static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
return NULL;
}
+static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
+ struct flowi *fl,
+ struct request_sock *req,
+ struct tcp_fastopen_cookie *foc,
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct inet_request_sock *ireq = inet_rsk(req);
+
+ /* clear tstamp_ok, as needed depending on cookie */
+ if (foc && foc->len > -1)
+ ireq->tstamp_ok = 0;
+
+ if (synack_type == TCP_SYNACK_FASTOPEN)
+ subflow_fastopen_send_synack_set_params(subflow, req);
+
+ return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc, synack_type, syn_skb);
+}
+
+static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
+ struct flowi *fl,
+ struct request_sock *req,
+ struct tcp_fastopen_cookie *foc,
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct inet_request_sock *ireq = inet_rsk(req);
+
+ /* clear tstamp_ok, as needed depending on cookie */
+ if (foc && foc->len > -1)
+ ireq->tstamp_ok = 0;
+
+ if (synack_type == TCP_SYNACK_FASTOPEN)
+ subflow_fastopen_send_synack_set_params(subflow, req);
+
+ return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc, synack_type, syn_skb);
+}
+
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
struct sk_buff *skb,
@@ -1920,6 +1960,7 @@ void __init mptcp_subflow_init(void)
subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
+ subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
subflow_specific = ipv4_specific;
subflow_specific.conn_request = subflow_v4_conn_request;
@@ -1933,6 +1974,7 @@ void __init mptcp_subflow_init(void)
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
+ subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
subflow_v6_specific = ipv6_specific;
subflow_v6_specific.conn_request = subflow_v6_conn_request;
In this patch we add skb to the msk, dequeue it from sk, remove TSs and do skb mapping. Signed-off-by: Dmytro Shytyi <dmytro@shytyi.net> --- net/mptcp/fastopen.c | 55 ++++++++++++++++++++++++++++++++++++++++++++ net/mptcp/protocol.c | 2 +- net/mptcp/protocol.h | 3 +++ net/mptcp/subflow.c | 42 +++++++++++++++++++++++++++++++++ 4 files changed, 101 insertions(+), 1 deletion(-)