@@ -2726,6 +2726,7 @@ static void __mptcp_init_sock(struct sock *sk)
msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
WRITE_ONCE(msk->first, NULL);
+ WRITE_ONCE(msk->bpf_iter_task, NULL);
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
WRITE_ONCE(msk->allow_infinite_fallback, true);
@@ -327,6 +327,7 @@ struct mptcp_sock {
struct list_head conn_list;
struct list_head rtx_queue;
struct mptcp_data_frag *first_pending;
+ struct task_struct *bpf_iter_task;
struct list_head join_list;
struct sock *first; /* The mptcp ops can safely dereference, using suitable
* ONCE annotation, the subflow outside the socket
@@ -1291,4 +1292,19 @@ mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_re
static inline void mptcp_join_cookie_init(void) {}
#endif
+static inline void mptcp_set_bpf_iter_task(struct mptcp_sock *msk)
+{
+ WRITE_ONCE(msk->bpf_iter_task, current);
+}
+
+static inline void mptcp_clear_bpf_iter_task(struct mptcp_sock *msk)
+{
+ WRITE_ONCE(msk->bpf_iter_task, NULL);
+}
+
+static inline struct task_struct *mptcp_get_bpf_iter_task(struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->bpf_iter_task);
+}
+
#endif /* __MPTCP_PROTOCOL_H */