Message ID | a552af9d09a972538976a1b5ae3159707baa24a8.1666349129.git.geliang.tang@suse.com (mailing list archive) |
---|---|
State | Superseded, archived |
Delegated to: | Mat Martineau |
Headers | show |
Series | BPF redundant scheduler | expand |
Context | Check | Description |
---|---|---|
matttbe/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 106 lines checked |
matttbe/build | success | Build and static analysis OK |
matttbe/KVM_Validation__normal | warning | Unstable: 3 failed test(s): packetdrill_add_addr selftest_mptcp_join selftest_simult_flows |
On Fri, 21 Oct 2022, Geliang Tang wrote: > Drop the useless msk->last_snd and macro MPTCP_RESET_SCHEDULER. Move > last_snd from struct mptcp_sock into struct mptcp_sched_ops. > > Signed-off-by: Geliang Tang <geliang.tang@suse.com> > --- > include/net/mptcp.h | 3 +++ > net/mptcp/pm.c | 9 +-------- > net/mptcp/pm_netlink.c | 3 --- > net/mptcp/protocol.c | 13 ++++--------- > net/mptcp/protocol.h | 2 -- > net/mptcp/sched.c | 3 +++ > 6 files changed, 11 insertions(+), 22 deletions(-) > > diff --git a/include/net/mptcp.h b/include/net/mptcp.h > index 0f386d805957..a45e00bf2f3e 100644 > --- a/include/net/mptcp.h > +++ b/include/net/mptcp.h > @@ -110,6 +110,9 @@ struct mptcp_sched_ops { > int (*get_subflow)(const struct mptcp_sock *msk, > struct mptcp_sched_data *data); > > + /* round-robin scheduler */ > + struct sock *last_snd; > + Hi Geliang - There's one mptcp_sched_ops per scheduler, not one per socket. So it doesn't work to add per-socket data (like last_snd and snd_burst) to this structure. - Mat > char name[MPTCP_SCHED_NAME_MAX]; > struct module *owner; > struct list_head list; > diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c > index 45e2a48397b9..cdeb7280ac76 100644 > --- a/net/mptcp/pm.c > +++ b/net/mptcp/pm.c > @@ -282,15 +282,8 @@ void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup) > > pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); > msk = mptcp_sk(sk); > - if (subflow->backup != bkup) { > + if (subflow->backup != bkup) > subflow->backup = bkup; > - mptcp_data_lock(sk); > - if (!sock_owned_by_user(sk)) > - msk->last_snd = NULL; > - else > - __set_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags); > - mptcp_data_unlock(sk); > - } > > mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC); > } > diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c > index 9813ed0fde9b..1f2da4aedcb4 100644 > --- a/net/mptcp/pm_netlink.c > +++ b/net/mptcp/pm_netlink.c > @@ -475,9 +475,6 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con > > slow = lock_sock_fast(ssk); > if (prio) { > - if (subflow->backup != backup) > - msk->last_snd = NULL; > - > subflow->send_mp_prio = 1; > subflow->backup = backup; > subflow->request_bkup = backup; > diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c > index 03312e762606..a9d33dea2a8a 100644 > --- a/net/mptcp/protocol.c > +++ b/net/mptcp/protocol.c > @@ -1469,16 +1469,13 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) > > burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); > wmem = READ_ONCE(ssk->sk_wmem_queued); > - if (!burst) { > - msk->last_snd = NULL; > + if (!burst) > return ssk; > - } > > subflow = mptcp_subflow_ctx(ssk); > subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + > READ_ONCE(ssk->sk_pacing_rate) * burst, > burst + wmem); > - msk->last_snd = ssk; > msk->snd_burst = burst; > return ssk; > } > @@ -2378,8 +2375,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, > msk->first = NULL; > > out: > - if (ssk == msk->last_snd) > - msk->last_snd = NULL; > + if (msk->sched && ssk == msk->sched->last_snd) > + msk->sched->last_snd = NULL; > > if (need_push) > __mptcp_push_pending(sk, 0); > @@ -3013,7 +3010,7 @@ static int mptcp_disconnect(struct sock *sk, int flags) > * subflow > */ > mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); > - msk->last_snd = NULL; > + msk->sched->last_snd = NULL; > WRITE_ONCE(msk->flags, 0); > msk->cb_flags = 0; > msk->push_pending = 0; > @@ -3274,8 +3271,6 @@ static void mptcp_release_cb(struct sock *sk) > __mptcp_set_connected(sk); > if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) > __mptcp_error_report(sk); > - if (__test_and_clear_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags)) > - msk->last_snd = NULL; > } > > __mptcp_update_rmem(sk); > diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h > index 8f48f881adf8..a58fa261f487 100644 > --- a/net/mptcp/protocol.h > +++ b/net/mptcp/protocol.h > @@ -124,7 +124,6 @@ > #define MPTCP_RETRANSMIT 4 > #define MPTCP_FLUSH_JOIN_LIST 5 > #define MPTCP_CONNECTED 6 > -#define MPTCP_RESET_SCHEDULER 7 > > static inline bool before64(__u64 seq1, __u64 seq2) > { > @@ -258,7 +257,6 @@ struct mptcp_sock { > atomic64_t rcv_wnd_sent; > u64 rcv_data_fin_seq; > int rmem_fwd_alloc; > - struct sock *last_snd; > int snd_burst; > int old_wspace; > u64 recovery_snd_nxt; /* in recovery mode accept up to this seq; > diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c > index 6324a8a76382..f6c622e15584 100644 > --- a/net/mptcp/sched.c > +++ b/net/mptcp/sched.c > @@ -68,6 +68,8 @@ int mptcp_init_sched(struct mptcp_sock *msk, > if (msk->sched->init) > msk->sched->init(msk); > > + msk->sched->last_snd = NULL; > + > pr_debug("sched=%s", msk->sched->name); > > out: > @@ -81,6 +83,7 @@ void mptcp_release_sched(struct mptcp_sock *msk) > if (!sched) > return; > > + msk->sched->last_snd = NULL; > msk->sched = NULL; > if (sched->release) > sched->release(msk); > -- > 2.35.3 > > > -- Mat Martineau Intel
diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 0f386d805957..a45e00bf2f3e 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -110,6 +110,9 @@ struct mptcp_sched_ops { int (*get_subflow)(const struct mptcp_sock *msk, struct mptcp_sched_data *data); + /* round-robin scheduler */ + struct sock *last_snd; + char name[MPTCP_SCHED_NAME_MAX]; struct module *owner; struct list_head list; diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 45e2a48397b9..cdeb7280ac76 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -282,15 +282,8 @@ void mptcp_pm_mp_prio_received(struct sock *ssk, u8 bkup) pr_debug("subflow->backup=%d, bkup=%d\n", subflow->backup, bkup); msk = mptcp_sk(sk); - if (subflow->backup != bkup) { + if (subflow->backup != bkup) subflow->backup = bkup; - mptcp_data_lock(sk); - if (!sock_owned_by_user(sk)) - msk->last_snd = NULL; - else - __set_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags); - mptcp_data_unlock(sk); - } mptcp_event(MPTCP_EVENT_SUB_PRIORITY, msk, ssk, GFP_ATOMIC); } diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 9813ed0fde9b..1f2da4aedcb4 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -475,9 +475,6 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con slow = lock_sock_fast(ssk); if (prio) { - if (subflow->backup != backup) - msk->last_snd = NULL; - subflow->send_mp_prio = 1; subflow->backup = backup; subflow->request_bkup = backup; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 03312e762606..a9d33dea2a8a 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1469,16 +1469,13 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); wmem = READ_ONCE(ssk->sk_wmem_queued); - if (!burst) { - msk->last_snd = NULL; + if (!burst) return ssk; - } subflow = mptcp_subflow_ctx(ssk); subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + READ_ONCE(ssk->sk_pacing_rate) * burst, burst + wmem); - msk->last_snd = ssk; msk->snd_burst = burst; return ssk; } @@ -2378,8 +2375,8 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, msk->first = NULL; out: - if (ssk == msk->last_snd) - msk->last_snd = NULL; + if (msk->sched && ssk == msk->sched->last_snd) + msk->sched->last_snd = NULL; if (need_push) __mptcp_push_pending(sk, 0); @@ -3013,7 +3010,7 @@ static int mptcp_disconnect(struct sock *sk, int flags) * subflow */ mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); - msk->last_snd = NULL; + msk->sched->last_snd = NULL; WRITE_ONCE(msk->flags, 0); msk->cb_flags = 0; msk->push_pending = 0; @@ -3274,8 +3271,6 @@ static void mptcp_release_cb(struct sock *sk) __mptcp_set_connected(sk); if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) __mptcp_error_report(sk); - if (__test_and_clear_bit(MPTCP_RESET_SCHEDULER, &msk->cb_flags)) - msk->last_snd = NULL; } __mptcp_update_rmem(sk); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 8f48f881adf8..a58fa261f487 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -124,7 +124,6 @@ #define MPTCP_RETRANSMIT 4 #define MPTCP_FLUSH_JOIN_LIST 5 #define MPTCP_CONNECTED 6 -#define MPTCP_RESET_SCHEDULER 7 static inline bool before64(__u64 seq1, __u64 seq2) { @@ -258,7 +257,6 @@ struct mptcp_sock { atomic64_t rcv_wnd_sent; u64 rcv_data_fin_seq; int rmem_fwd_alloc; - struct sock *last_snd; int snd_burst; int old_wspace; u64 recovery_snd_nxt; /* in recovery mode accept up to this seq; diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c index 6324a8a76382..f6c622e15584 100644 --- a/net/mptcp/sched.c +++ b/net/mptcp/sched.c @@ -68,6 +68,8 @@ int mptcp_init_sched(struct mptcp_sock *msk, if (msk->sched->init) msk->sched->init(msk); + msk->sched->last_snd = NULL; + pr_debug("sched=%s", msk->sched->name); out: @@ -81,6 +83,7 @@ void mptcp_release_sched(struct mptcp_sock *msk) if (!sched) return; + msk->sched->last_snd = NULL; msk->sched = NULL; if (sched->release) sched->release(msk);
Drop the useless msk->last_snd and macro MPTCP_RESET_SCHEDULER. Move last_snd from struct mptcp_sock into struct mptcp_sched_ops. Signed-off-by: Geliang Tang <geliang.tang@suse.com> --- include/net/mptcp.h | 3 +++ net/mptcp/pm.c | 9 +-------- net/mptcp/pm_netlink.c | 3 --- net/mptcp/protocol.c | 13 ++++--------- net/mptcp/protocol.h | 2 -- net/mptcp/sched.c | 3 +++ 6 files changed, 11 insertions(+), 22 deletions(-)