diff mbox series

[mptcp-next,v2,3/5] bpf: Add mptcp_address bpf_iter

Message ID d000583ad69a4f122552e3b06641d25f5200437d.1729582332.git.tanggeliang@kylinos.cn (mailing list archive)
State New
Headers show
Series add mptcp_address bpf_iter | expand

Checks

Context Check Description
matttbe/KVM_Validation__normal success Success! ✅
matttbe/KVM_Validation__debug success Success! ✅
matttbe/KVM_Validation__btf-normal__only_bpftest_all_ success Success! ✅
matttbe/KVM_Validation__btf-debug__only_bpftest_all_ success Success! ✅
matttbe/checkpatch success total: 0 errors, 0 warnings, 0 checks, 63 lines checked
matttbe/shellcheck success MPTCP selftests files have not been modified
matttbe/build success Build and static analysis OK

Commit Message

Geliang Tang Oct. 22, 2024, 7:47 a.m. UTC
From: Geliang Tang <tanggeliang@kylinos.cn>

Just like the mptcp_subflow bpf_iter used to implement the MPTCP BPF
packet scheduler, another bpf_iter is also needed, named mptcp_address,
to traverse all address entries on userspace_pm_local_addr_list of an
MPTCP socket for implementing the MPTCP BPF path manager.

In kernel space, we walk this list like this:

 list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list)
	 kfunc(entry);

With the mptcp_address bpf_iter, bpf_for_each() can be used to do the
same thing in BPF program:

 bpf_for_each(mptcp_address, entry, msk)
	 kfunc(entry);

This bpf_iter should be invoked under holding the msk pm lock, so use
lockdep_assert_held() to assert the lock is holding.

Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
---
 net/mptcp/bpf.c | 45 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 45 insertions(+)
diff mbox series

Patch

diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
index e9db856972cb..8889e5351897 100644
--- a/net/mptcp/bpf.c
+++ b/net/mptcp/bpf.c
@@ -214,6 +214,15 @@  struct bpf_iter_mptcp_subflow_kern {
 	struct list_head *pos;
 } __aligned(8);
 
+struct bpf_iter_mptcp_address {
+	__u64 __opaque[2];
+} __aligned(8);
+
+struct bpf_iter_mptcp_address_kern {
+	struct mptcp_sock *msk;
+	struct list_head *pos;
+} __aligned(8);
+
 __bpf_kfunc_start_defs();
 
 __bpf_kfunc static struct mptcp_sock *bpf_mptcp_sk(struct sock *sk)
@@ -266,6 +275,39 @@  bpf_iter_mptcp_subflow_destroy(struct bpf_iter_mptcp_subflow *it)
 {
 }
 
+__bpf_kfunc static int
+bpf_iter_mptcp_address_new(struct bpf_iter_mptcp_address *it,
+			   struct mptcp_sock *msk)
+{
+	struct bpf_iter_mptcp_address_kern *kit = (void *)it;
+
+	kit->msk = msk;
+	if (!msk)
+		return -EINVAL;
+
+	lockdep_assert_held(&msk->pm.lock);
+
+	kit->pos = &msk->pm.userspace_pm_local_addr_list;
+	return 0;
+}
+
+__bpf_kfunc static struct mptcp_pm_addr_entry *
+bpf_iter_mptcp_address_next(struct bpf_iter_mptcp_address *it)
+{
+	struct bpf_iter_mptcp_address_kern *kit = (void *)it;
+
+	if (!kit->msk || list_is_last(kit->pos, &kit->msk->pm.userspace_pm_local_addr_list))
+		return NULL;
+
+	kit->pos = kit->pos->next;
+	return list_entry(kit->pos, struct mptcp_pm_addr_entry, list);
+}
+
+__bpf_kfunc static void
+bpf_iter_mptcp_address_destroy(struct bpf_iter_mptcp_address *it)
+{
+}
+
 __bpf_kfunc static struct
 mptcp_sock *bpf_mptcp_sock_acquire(struct mptcp_sock *msk)
 {
@@ -305,6 +347,9 @@  BTF_ID_FLAGS(func, bpf_mptcp_subflow_tcp_sock)
 BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
 BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_iter_mptcp_address_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_iter_mptcp_address_next, KF_ITER_NEXT | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_iter_mptcp_address_destroy, KF_ITER_DESTROY)
 BTF_ID_FLAGS(func, bpf_mptcp_sock_acquire, KF_ACQUIRE | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_mptcp_sock_release, KF_RELEASE)
 BTF_KFUNCS_END(bpf_mptcp_common_kfunc_ids)