@@ -642,6 +642,10 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
}
}
+ /* over limit? can't append more skbs to msk */
+ if (__mptcp_rmem(sk) > sk_rbuf)
+ return true;
+
pr_debug("msk=%p ssk=%p", msk, ssk);
tp = tcp_sk(ssk);
do {
@@ -786,8 +790,6 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
void mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
- struct mptcp_sock *msk = mptcp_sk(sk);
- int sk_rbuf, ssk_rbuf;
/* The peer can send data while we are shutting down this
* subflow at msk destruction time, but we must avoid enqueuing
@@ -796,20 +798,9 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
if (unlikely(subflow->disposable))
return;
- ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
- sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
- if (unlikely(ssk_rbuf > sk_rbuf))
- sk_rbuf = ssk_rbuf;
-
- /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
- if (__mptcp_rmem(sk) > sk_rbuf) {
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
- return;
- }
-
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
- if (move_skbs_to_msk(msk, ssk))
+ if (move_skbs_to_msk(mptcp_sk(sk), ssk))
sk->sk_data_ready(sk);
mptcp_data_unlock(sk);
This clean the code a bit, and avoid skipping msk receive buffer update on some weird corner case. Signed-off-by: Paolo Abeni <pabeni@redhat.com> --- net/mptcp/protocol.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-)