diff mbox series

[mptcp-next,v4,3/5] bpf: Add mptcp_userspace_pm_addr bpf_iter

Message ID 10551d58b505f20754a13302ce2340e220a3e7a5.1736325184.git.tanggeliang@kylinos.cn (mailing list archive)
State Needs ACK
Headers show
Series add mptcp_address bpf_iter | expand

Checks

Context Check Description
matttbe/build success Build and static analysis OK
matttbe/checkpatch warning total: 0 errors, 11 warnings, 0 checks, 70 lines checked
matttbe/shellcheck success MPTCP selftests files have not been modified
matttbe/KVM_Validation__normal success Success! ✅
matttbe/KVM_Validation__debug success Success! ✅
matttbe/KVM_Validation__btf-normal__only_bpftest_all_ success Success! ✅
matttbe/KVM_Validation__btf-debug__only_bpftest_all_ success Success! ✅

Commit Message

Geliang Tang Jan. 8, 2025, 8:38 a.m. UTC
From: Geliang Tang <tanggeliang@kylinos.cn>

Just like the mptcp_subflow bpf_iter used to implement the MPTCP
BPF packet scheduler, another bpf_iter is also needed, named
mptcp_userspace_pm_addr, to traverse all address entries on
userspace_pm_local_addr_list of an MPTCP socket for implementing
the MPTCP BPF path manager.

In kernel space, we walk this list like this:

	mptcp_for_each_userspace_pm_addr(msk, entry)
		kfunc(entry);

With the mptcp_userspace_pm_addr bpf_iter, bpf_for_each() can be
used to do the same thing in BPF program:

	bpf_for_each(mptcp_userspace_pm_addr, entry, msk)
		kfunc(entry);

This bpf_iter should be invoked under holding the msk pm lock, so
use lockdep_assert_held() to assert the lock is holding.

Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
---
 net/mptcp/bpf.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 52 insertions(+)

Comments

Geliang Tang Jan. 8, 2025, 8:43 a.m. UTC | #1
On Wed, 2025-01-08 at 16:38 +0800, Geliang Tang wrote:
> From: Geliang Tang <tanggeliang@kylinos.cn>
> 
> Just like the mptcp_subflow bpf_iter used to implement the MPTCP
> BPF packet scheduler, another bpf_iter is also needed, named
> mptcp_userspace_pm_addr, to traverse all address entries on
> userspace_pm_local_addr_list of an MPTCP socket for implementing
> the MPTCP BPF path manager.
> spin_is_locked
> In kernel space, we walk this list like this:
> 
> 	mptcp_for_each_userspace_pm_addr(msk, entry)
> 		kfunc(entry);
> 
> With the mptcp_userspace_pm_addr bpf_iter, bpf_for_each() can be
> used to do the same thing in BPF program:
> 
> 	bpf_for_each(mptcp_userspace_pm_addr, entry, msk)
> 		kfunc(entry);
> 
> This bpf_iter should be invoked under holding the msk pm lock, so
> use lockdep_assert_held() to assert the lock is holding.

Please update the line as:

use spin_is_locked() to check whether the lock is holding.

Thanks,
-Geliang

> 
> Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
> ---
>  net/mptcp/bpf.c | 52
> +++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 52 insertions(+)
> 
> diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
> index 923895322b2c..c7f5d208f6cc 100644
> --- a/net/mptcp/bpf.c
> +++ b/net/mptcp/bpf.c
> @@ -220,6 +220,15 @@ struct bpf_iter_mptcp_subflow_kern {
>  	struct list_head *pos;
>  } __aligned(8);
>  
> +struct bpf_iter_mptcp_userspace_pm_addr {
> +	__u64 __opaque[2];
> +} __aligned(8);
> +
> +struct bpf_iter_mptcp_userspace_pm_addr_kern {
> +	struct mptcp_sock *msk;
> +	struct list_head *pos;
> +} __aligned(8);
> +
>  __bpf_kfunc_start_defs();
>  
>  __bpf_kfunc static struct mptcp_subflow_context *
> @@ -273,6 +282,46 @@ bpf_iter_mptcp_subflow_destroy(struct
> bpf_iter_mptcp_subflow *it)
>  {
>  }
>  
> +__bpf_kfunc static int
> +bpf_iter_mptcp_userspace_pm_addr_new(struct
> bpf_iter_mptcp_userspace_pm_addr *it,
> +				     struct mptcp_sock *msk)
> +{
> +	struct bpf_iter_mptcp_userspace_pm_addr_kern *kit = (void
> *)it;
> +
> +	BUILD_BUG_ON(sizeof(struct
> bpf_iter_mptcp_userspace_pm_addr_kern) >
> +		     sizeof(struct
> bpf_iter_mptcp_userspace_pm_addr));
> +	BUILD_BUG_ON(__alignof__(struct
> bpf_iter_mptcp_userspace_pm_addr_kern) !=
> +		     __alignof__(struct
> bpf_iter_mptcp_userspace_pm_addr));
> +
> +	kit->msk = msk;
> +	if (!msk)
> +		return -EINVAL;
> +
> +	if (!spin_is_locked(&msk->pm.lock))
> +		return -EINVAL;
> +
> +	kit->pos = &msk->pm.userspace_pm_local_addr_list;
> +	return 0;
> +}
> +
> +__bpf_kfunc static struct mptcp_pm_addr_entry *
> +bpf_iter_mptcp_userspace_pm_addr_next(struct
> bpf_iter_mptcp_userspace_pm_addr *it)
> +{
> +	struct bpf_iter_mptcp_userspace_pm_addr_kern *kit = (void
> *)it;
> +
> +	if (!kit->msk || list_is_last(kit->pos,
> +				      &kit->msk-
> >pm.userspace_pm_local_addr_list))
> +		return NULL;
> +
> +	kit->pos = kit->pos->next;
> +	return list_entry(kit->pos, struct mptcp_pm_addr_entry,
> list);
> +}
> +
> +__bpf_kfunc static void
> +bpf_iter_mptcp_userspace_pm_addr_destroy(struct
> bpf_iter_mptcp_userspace_pm_addr *it)
> +{
> +}
> +
>  __bpf_kfunc static struct
>  mptcp_sock *bpf_mptcp_sock_acquire(struct mptcp_sock *msk)
>  {
> @@ -310,6 +359,9 @@ BTF_ID_FLAGS(func, bpf_mptcp_subflow_ctx,
> KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_new, KF_ITER_NEW |
> KF_TRUSTED_ARGS)
>  BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_next, KF_ITER_NEXT |
> KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_destroy, KF_ITER_DESTROY)
> +BTF_ID_FLAGS(func, bpf_iter_mptcp_userspace_pm_addr_new, KF_ITER_NEW
> | KF_TRUSTED_ARGS)
> +BTF_ID_FLAGS(func, bpf_iter_mptcp_userspace_pm_addr_next,
> KF_ITER_NEXT | KF_RET_NULL)
> +BTF_ID_FLAGS(func, bpf_iter_mptcp_userspace_pm_addr_destroy,
> KF_ITER_DESTROY)
>  BTF_ID_FLAGS(func, bpf_mptcp_sock_acquire, KF_ACQUIRE | KF_RET_NULL)
>  BTF_ID_FLAGS(func, bpf_mptcp_sock_release, KF_RELEASE)
>  BTF_KFUNCS_END(bpf_mptcp_common_kfunc_ids)
diff mbox series

Patch

diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
index 923895322b2c..c7f5d208f6cc 100644
--- a/net/mptcp/bpf.c
+++ b/net/mptcp/bpf.c
@@ -220,6 +220,15 @@  struct bpf_iter_mptcp_subflow_kern {
 	struct list_head *pos;
 } __aligned(8);
 
+struct bpf_iter_mptcp_userspace_pm_addr {
+	__u64 __opaque[2];
+} __aligned(8);
+
+struct bpf_iter_mptcp_userspace_pm_addr_kern {
+	struct mptcp_sock *msk;
+	struct list_head *pos;
+} __aligned(8);
+
 __bpf_kfunc_start_defs();
 
 __bpf_kfunc static struct mptcp_subflow_context *
@@ -273,6 +282,46 @@  bpf_iter_mptcp_subflow_destroy(struct bpf_iter_mptcp_subflow *it)
 {
 }
 
+__bpf_kfunc static int
+bpf_iter_mptcp_userspace_pm_addr_new(struct bpf_iter_mptcp_userspace_pm_addr *it,
+				     struct mptcp_sock *msk)
+{
+	struct bpf_iter_mptcp_userspace_pm_addr_kern *kit = (void *)it;
+
+	BUILD_BUG_ON(sizeof(struct bpf_iter_mptcp_userspace_pm_addr_kern) >
+		     sizeof(struct bpf_iter_mptcp_userspace_pm_addr));
+	BUILD_BUG_ON(__alignof__(struct bpf_iter_mptcp_userspace_pm_addr_kern) !=
+		     __alignof__(struct bpf_iter_mptcp_userspace_pm_addr));
+
+	kit->msk = msk;
+	if (!msk)
+		return -EINVAL;
+
+	if (!spin_is_locked(&msk->pm.lock))
+		return -EINVAL;
+
+	kit->pos = &msk->pm.userspace_pm_local_addr_list;
+	return 0;
+}
+
+__bpf_kfunc static struct mptcp_pm_addr_entry *
+bpf_iter_mptcp_userspace_pm_addr_next(struct bpf_iter_mptcp_userspace_pm_addr *it)
+{
+	struct bpf_iter_mptcp_userspace_pm_addr_kern *kit = (void *)it;
+
+	if (!kit->msk || list_is_last(kit->pos,
+				      &kit->msk->pm.userspace_pm_local_addr_list))
+		return NULL;
+
+	kit->pos = kit->pos->next;
+	return list_entry(kit->pos, struct mptcp_pm_addr_entry, list);
+}
+
+__bpf_kfunc static void
+bpf_iter_mptcp_userspace_pm_addr_destroy(struct bpf_iter_mptcp_userspace_pm_addr *it)
+{
+}
+
 __bpf_kfunc static struct
 mptcp_sock *bpf_mptcp_sock_acquire(struct mptcp_sock *msk)
 {
@@ -310,6 +359,9 @@  BTF_ID_FLAGS(func, bpf_mptcp_subflow_ctx, KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
 BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_mptcp_subflow_destroy, KF_ITER_DESTROY)
+BTF_ID_FLAGS(func, bpf_iter_mptcp_userspace_pm_addr_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_iter_mptcp_userspace_pm_addr_next, KF_ITER_NEXT | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_iter_mptcp_userspace_pm_addr_destroy, KF_ITER_DESTROY)
 BTF_ID_FLAGS(func, bpf_mptcp_sock_acquire, KF_ACQUIRE | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_mptcp_sock_release, KF_RELEASE)
 BTF_KFUNCS_END(bpf_mptcp_common_kfunc_ids)