Message ID | e32e444b498565f85da14285dfc5a865fedc6e58.1631188109.git.geliangtang@xiaomi.com (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | The infinite mapping support | expand |
On Thu, 2021-09-09 at 19:51 +0800, Geliang Tang wrote: > From: Geliang Tang <geliangtang@xiaomi.com> > > This patch added the infinite mapping sending logic. > > Added a new flag snd_infinite_mapping_enable in mptcp_sock. Set it true > when a single contiguous subflow is in use in mptcp_pm_mp_fail_received. > In mptcp_sendmsg_frag, if this flag is true, call the new function > mptcp_update_infinite_mapping to set the infinite mapping. > > Signed-off-by: Geliang Tang <geliangtang@xiaomi.com> > --- > net/mptcp/pm.c | 6 ++++++ > net/mptcp/protocol.c | 18 ++++++++++++++++++ > net/mptcp/protocol.h | 1 + > 3 files changed, 25 insertions(+) > > diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c > index 6ab386ff3294..2830adf64f79 100644 > --- a/net/mptcp/pm.c > +++ b/net/mptcp/pm.c > @@ -251,7 +251,13 @@ void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup) > > void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) > { > + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); > + struct mptcp_sock *msk = mptcp_sk(subflow->conn); > + > pr_debug("fail_seq=%llu", fail_seq); > + > + if (!mptcp_has_another_subflow(sk) && !READ_ONCE(msk->noncontiguous)) > + WRITE_ONCE(msk->snd_infinite_mapping_enable, true); > } > > /* path manager helpers */ > diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c > index c7ecd3e3b537..4ebbbc6f1d01 100644 > --- a/net/mptcp/protocol.c > +++ b/net/mptcp/protocol.c > @@ -1278,6 +1278,21 @@ static void mptcp_update_data_checksum(struct sk_buff *skb, int added) > mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset)); > } > > +static void mptcp_update_infinite_mapping(struct mptcp_sock *msk, struct mptcp_ext *mpext) > +{ > + if (!mpext) > + return; > + > + mpext->data_seq = READ_ONCE(msk->start_seq); > + mpext->subflow_seq = 0; > + mpext->data_len = 0; > + mpext->csum = 0; > + > + WRITE_ONCE(msk->snd_infinite_mapping_enable, false); > + pr_infinite(msk); > + __mptcp_do_infinite(msk); > +} If you move the 'snd_infinite_mapping_enable' flag at the subflow level (inside struct mptcp_subflow_context), all the _ONCE annotation will not be needed. And the flag will be already initializated to 0 at ctx creation time. Additionally, I'm unsure the MPTCP_INFINITE_DONE bit is required ?!? can we simply rely on FALLBACK ??? /P
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 6ab386ff3294..2830adf64f79 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -251,7 +251,13 @@ void mptcp_pm_mp_prio_received(struct sock *sk, u8 bkup) void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) { + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + struct mptcp_sock *msk = mptcp_sk(subflow->conn); + pr_debug("fail_seq=%llu", fail_seq); + + if (!mptcp_has_another_subflow(sk) && !READ_ONCE(msk->noncontiguous)) + WRITE_ONCE(msk->snd_infinite_mapping_enable, true); } /* path manager helpers */ diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index c7ecd3e3b537..4ebbbc6f1d01 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1278,6 +1278,21 @@ static void mptcp_update_data_checksum(struct sk_buff *skb, int added) mpext->csum = csum_fold(csum_block_add(csum, skb_checksum(skb, offset, added, 0), offset)); } +static void mptcp_update_infinite_mapping(struct mptcp_sock *msk, struct mptcp_ext *mpext) +{ + if (!mpext) + return; + + mpext->data_seq = READ_ONCE(msk->start_seq); + mpext->subflow_seq = 0; + mpext->data_len = 0; + mpext->csum = 0; + + WRITE_ONCE(msk->snd_infinite_mapping_enable, false); + pr_infinite(msk); + __mptcp_do_infinite(msk); +} + static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, struct mptcp_data_frag *dfrag, struct mptcp_sendmsg_info *info) @@ -1410,6 +1425,8 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, out: if (READ_ONCE(msk->csum_enabled)) mptcp_update_data_checksum(skb, copy); + if (READ_ONCE(msk->snd_infinite_mapping_enable)) + mptcp_update_infinite_mapping(msk, mpext); mptcp_subflow_ctx(ssk)->rel_write_seq += copy; return copy; } @@ -2881,6 +2898,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk, if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) WRITE_ONCE(msk->csum_enabled, true); WRITE_ONCE(msk->noncontiguous, false); + WRITE_ONCE(msk->snd_infinite_mapping_enable, false); msk->write_seq = subflow_req->idsn + 1; msk->snd_nxt = msk->write_seq; diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 77af55171ded..b4a7c54f0d78 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -253,6 +253,7 @@ struct mptcp_sock { bool use_64bit_ack; /* Set when we received a 64-bit DSN */ bool csum_enabled; bool noncontiguous; + bool snd_infinite_mapping_enable; spinlock_t join_list_lock; struct work_struct work; struct sk_buff *ooo_last_skb;