Context |
Check |
Description |
bpf/vmtest-bpf-next-PR |
success
|
PR summary
|
netdev/tree_selection |
success
|
Guessing tree name failed - patch did not apply, async
|
bpf/vmtest-bpf-next-VM_Test-0 |
success
|
Logs for Lint
|
bpf/vmtest-bpf-next-VM_Test-3 |
success
|
Logs for Validate matrix.py
|
bpf/vmtest-bpf-next-VM_Test-2 |
success
|
Logs for Unittests
|
bpf/vmtest-bpf-next-VM_Test-1 |
success
|
Logs for ShellCheck
|
bpf/vmtest-bpf-next-VM_Test-5 |
success
|
Logs for aarch64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-6 |
success
|
Logs for aarch64-gcc / test
|
bpf/vmtest-bpf-next-VM_Test-7 |
success
|
Logs for aarch64-gcc / veristat
|
bpf/vmtest-bpf-next-VM_Test-9 |
success
|
Logs for s390x-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-4 |
fail
|
Logs for aarch64-gcc / build / build for aarch64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-26 |
success
|
Logs for x86_64-llvm-17 / veristat
|
bpf/vmtest-bpf-next-VM_Test-34 |
success
|
Logs for x86_64-llvm-18 / veristat
|
bpf/vmtest-bpf-next-VM_Test-8 |
fail
|
Logs for s390x-gcc / build / build for s390x with gcc
|
bpf/vmtest-bpf-next-VM_Test-10 |
success
|
Logs for s390x-gcc / test
|
bpf/vmtest-bpf-next-VM_Test-11 |
success
|
Logs for s390x-gcc / veristat
|
bpf/vmtest-bpf-next-VM_Test-12 |
success
|
Logs for set-matrix
|
bpf/vmtest-bpf-next-VM_Test-13 |
success
|
Logs for x86_64-gcc / build / build for x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-14 |
success
|
Logs for x86_64-gcc / build-release
|
bpf/vmtest-bpf-next-VM_Test-17 |
pending
|
Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-22 |
success
|
Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-25 |
pending
|
Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-28 |
success
|
Logs for x86_64-llvm-17 / veristat
|
bpf/vmtest-bpf-next-VM_Test-29 |
success
|
Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-30 |
success
|
Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
|
bpf/vmtest-bpf-next-VM_Test-32 |
pending
|
Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-33 |
pending
|
Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-36 |
success
|
Logs for x86_64-llvm-18 / veristat
|
bpf/vmtest-bpf-next-VM_Test-15 |
success
|
Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-16 |
success
|
Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-18 |
success
|
Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-19 |
success
|
Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-20 |
success
|
Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-21 |
success
|
Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
|
bpf/vmtest-bpf-next-VM_Test-23 |
success
|
Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
|
bpf/vmtest-bpf-next-VM_Test-24 |
success
|
Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-27 |
success
|
Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
|
bpf/vmtest-bpf-next-VM_Test-31 |
success
|
Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
|
bpf/vmtest-bpf-next-VM_Test-35 |
success
|
Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
|
@@ -452,6 +452,10 @@ static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
.set = &hid_bpf_syscall_kfunc_ids,
};
+BTF_ID_LIST(hid_bpf_dtor_id_list)
+BTF_ID(struct, hid_bpf_ctx)
+BTF_ID(func, hid_bpf_release_context)
+
int hid_bpf_connect_device(struct hid_device *hdev)
{
struct hid_bpf_prog_list *prog_list;
@@ -496,6 +500,13 @@ EXPORT_SYMBOL_GPL(hid_bpf_device_init);
static int __init hid_bpf_init(void)
{
+ const struct btf_id_dtor_kfunc dtors[] = {
+ {
+ .btf_id = hid_bpf_dtor_id_list[0],
+ .kfunc_btf_id = hid_bpf_dtor_id_list[1],
+ .flags = BPF_DTOR_CLEANUP,
+ },
+ };
int err;
/* Note: if we exit with an error any time here, we would entirely break HID, which
@@ -505,6 +516,12 @@ static int __init hid_bpf_init(void)
* will not be available, so nobody will be able to use the functionality.
*/
+ err = register_btf_id_dtor_kfuncs(dtors, ARRAY_SIZE(dtors), THIS_MODULE);
+ if (err) {
+ pr_warn("error while registering hid_bpf cleanup dtors: %d", err);
+ return 0;
+ }
+
err = register_btf_fmodret_id_set(&hid_bpf_fmodret_set);
if (err) {
pr_warn("error while registering fmodret entrypoints: %d", err);
@@ -120,9 +120,15 @@ struct btf_kfunc_id_set {
btf_kfunc_filter_t filter;
};
+enum {
+ BPF_DTOR_KPTR = (1 << 0),
+ BPF_DTOR_CLEANUP = (1 << 1),
+};
+
struct btf_id_dtor_kfunc {
u32 btf_id;
u32 kfunc_btf_id;
+ u32 flags;
};
struct btf_struct_meta {
@@ -521,7 +527,7 @@ u32 *btf_kfunc_is_modify_return(const struct btf *btf, u32 kfunc_btf_id,
int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
const struct btf_kfunc_id_set *s);
int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset);
-s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id);
+s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id, u32 flags);
int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt,
struct module *owner);
struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id);
@@ -555,7 +561,7 @@ static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type,
{
return 0;
}
-static inline s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
+static inline s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id, u32 flags)
{
return -ENOENT;
}
@@ -3657,7 +3657,7 @@ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
* can be used as a referenced pointer and be stored in a map at
* the same time.
*/
- dtor_btf_id = btf_find_dtor_kfunc(kptr_btf, id);
+ dtor_btf_id = btf_find_dtor_kfunc(kptr_btf, id, BPF_DTOR_KPTR);
if (dtor_btf_id < 0) {
ret = dtor_btf_id;
goto end_btf;
@@ -8144,7 +8144,7 @@ int register_btf_fmodret_id_set(const struct btf_kfunc_id_set *kset)
}
EXPORT_SYMBOL_GPL(register_btf_fmodret_id_set);
-s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
+s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id, u32 flags)
{
struct btf_id_dtor_kfunc_tab *tab = btf->dtor_kfunc_tab;
struct btf_id_dtor_kfunc *dtor;
@@ -8156,7 +8156,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id)
*/
BUILD_BUG_ON(offsetof(struct btf_id_dtor_kfunc, btf_id) != 0);
dtor = bsearch(&btf_id, tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func);
- if (!dtor)
+ if (!dtor || !(dtor->flags & flags))
return -ENOENT;
return dtor->kfunc_btf_id;
}
@@ -8171,6 +8171,11 @@ static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc
for (i = 0; i < cnt; i++) {
dtor_btf_id = dtors[i].kfunc_btf_id;
+ if (!dtors[i].flags) {
+ pr_err("missing flag for btf_id_dtor_kfunc entry\n");
+ return -EINVAL;
+ }
+
dtor_func = btf_type_by_id(btf, dtor_btf_id);
if (!dtor_func || !btf_type_is_func(dtor_func))
return -EINVAL;
@@ -467,7 +467,8 @@ static int __init cpumask_kfunc_init(void)
const struct btf_id_dtor_kfunc cpumask_dtors[] = {
{
.btf_id = cpumask_dtor_ids[0],
- .kfunc_btf_id = cpumask_dtor_ids[1]
+ .kfunc_btf_id = cpumask_dtor_ids[1],
+ .flags = BPF_DTOR_KPTR | BPF_DTOR_CLEANUP,
},
};
@@ -2685,9 +2685,19 @@ static const struct btf_kfunc_id_set generic_kfunc_set = {
BTF_ID_LIST(generic_dtor_ids)
BTF_ID(struct, task_struct)
BTF_ID(func, bpf_task_release_dtor)
+BTF_ID(struct, bpf_iter_num)
+BTF_ID(func, bpf_iter_num_destroy)
+BTF_ID(struct, bpf_iter_task)
+BTF_ID(func, bpf_iter_task_destroy)
+BTF_ID(struct, bpf_iter_task_vma)
+BTF_ID(func, bpf_iter_task_vma_destroy)
#ifdef CONFIG_CGROUPS
BTF_ID(struct, cgroup)
BTF_ID(func, bpf_cgroup_release_dtor)
+BTF_ID(struct, bpf_iter_css)
+BTF_ID(func, bpf_iter_css_destroy)
+BTF_ID(struct, bpf_iter_css_task)
+BTF_ID(func, bpf_iter_css_task_destroy)
#endif
BTF_KFUNCS_START(common_btf_ids)
@@ -2732,12 +2742,39 @@ static int __init kfunc_init(void)
const struct btf_id_dtor_kfunc generic_dtors[] = {
{
.btf_id = generic_dtor_ids[0],
- .kfunc_btf_id = generic_dtor_ids[1]
+ .kfunc_btf_id = generic_dtor_ids[1],
+ .flags = BPF_DTOR_KPTR | BPF_DTOR_CLEANUP,
},
-#ifdef CONFIG_CGROUPS
{
.btf_id = generic_dtor_ids[2],
- .kfunc_btf_id = generic_dtor_ids[3]
+ .kfunc_btf_id = generic_dtor_ids[3],
+ .flags = BPF_DTOR_CLEANUP,
+ },
+ {
+ .btf_id = generic_dtor_ids[4],
+ .kfunc_btf_id = generic_dtor_ids[5],
+ .flags = BPF_DTOR_CLEANUP,
+ },
+ {
+ .btf_id = generic_dtor_ids[6],
+ .kfunc_btf_id = generic_dtor_ids[7],
+ .flags = BPF_DTOR_CLEANUP,
+ },
+#ifdef CONFIG_CGROUPS
+ {
+ .btf_id = generic_dtor_ids[8],
+ .kfunc_btf_id = generic_dtor_ids[9],
+ .flags = BPF_DTOR_KPTR | BPF_DTOR_CLEANUP,
+ },
+ {
+ .btf_id = generic_dtor_ids[10],
+ .kfunc_btf_id = generic_dtor_ids[11],
+ .flags = BPF_DTOR_CLEANUP,
+ },
+ {
+ .btf_id = generic_dtor_ids[12],
+ .kfunc_btf_id = generic_dtor_ids[13],
+ .flags = BPF_DTOR_CLEANUP,
},
#endif
};
@@ -1426,8 +1426,24 @@ static const struct btf_kfunc_id_set bpf_key_sig_kfunc_set = {
.set = &key_sig_kfunc_set,
};
+BTF_ID_LIST(bpf_key_dtor_id_list)
+BTF_ID(struct, bpf_key)
+BTF_ID(func, bpf_key_put)
+
static int __init bpf_key_sig_kfuncs_init(void)
{
+ const struct btf_id_dtor_kfunc dtors[] = {
+ {
+ .btf_id = bpf_key_dtor_id_list[0],
+ .kfunc_btf_id = bpf_key_dtor_id_list[1],
+ .flags = BPF_DTOR_CLEANUP,
+ },
+ };
+ int ret;
+
+ ret = register_btf_id_dtor_kfuncs(dtors, ARRAY_SIZE(dtors), THIS_MODULE);
+ if (ret < 0)
+ return 0;
return register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING,
&bpf_key_sig_kfunc_set);
}
@@ -1691,11 +1691,13 @@ static int __init bpf_prog_test_run_init(void)
const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = {
{
.btf_id = bpf_prog_test_dtor_kfunc_ids[0],
- .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1]
+ .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1],
+ .flags = BPF_DTOR_KPTR,
},
{
.btf_id = bpf_prog_test_dtor_kfunc_ids[2],
.kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3],
+ .flags = BPF_DTOR_KPTR,
},
};
int ret;
@@ -485,11 +485,23 @@ static const struct btf_kfunc_id_set nf_conntrack_kfunc_set = {
.set = &nf_ct_kfunc_set,
};
+BTF_ID_LIST(nf_dtor_id_list)
+BTF_ID(struct, nf_conn)
+BTF_ID(func, bpf_ct_release)
+
int register_nf_conntrack_bpf(void)
{
+ const struct btf_id_dtor_kfunc dtors[] = {
+ {
+ .btf_id = nf_dtor_id_list[0],
+ .kfunc_btf_id = nf_dtor_id_list[1],
+ .flags = BPF_DTOR_CLEANUP,
+ },
+ };
int ret;
- ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &nf_conntrack_kfunc_set);
+ ret = register_btf_id_dtor_kfuncs(dtors, ARRAY_SIZE(dtors), THIS_MODULE);
+ ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &nf_conntrack_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &nf_conntrack_kfunc_set);
if (!ret) {
mutex_lock(&nf_conn_btf_access_lock);
@@ -127,8 +127,24 @@ static const struct btf_kfunc_id_set xfrm_state_xdp_kfunc_set = {
.set = &xfrm_state_kfunc_set,
};
+BTF_ID_LIST(dtor_id_list)
+BTF_ID(struct, xfrm_state)
+BTF_ID(func, bpf_xdp_xfrm_state_release)
+
int __init register_xfrm_state_bpf(void)
{
+ const struct btf_id_dtor_kfunc dtors[] = {
+ {
+ .btf_id = dtor_id_list[0],
+ .kfunc_btf_id = dtor_id_list[1],
+ .flags = BPF_DTOR_CLEANUP,
+ },
+ };
+ int ret;
+
+ ret = register_btf_id_dtor_kfuncs(dtors, ARRAY_SIZE(dtors), THIS_MODULE);
+ if (ret < 0)
+ return ret;
return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP,
&xfrm_state_xdp_kfunc_set);
}
Reuse exist BTF dtor infrastructure to also include dtor kfuncs that can be used to release PTR_TO_BTF_ID pointers and other BTF objects (iterators). For this purpose, we extend btf_id_dtor_kfunc object with a flags field, and ensure that entries that cannot work as kptrs are not allowed to be embedded in map values. Prior to this change, btf_id_dtor_kfunc served a dual role of allow list of kptrs and finding their dtors. To separate this role, we must now explicitly pass only BPF_DTOR_KPTR to ensure we don't look up other cleanup kfuncs in the dtor table. Finally, set up iterator and other objects that can be acquired to be released by adding their cleanup kfunc dtor entries and registering them with the BTF. Cc: Jiri Kosina <jikos@kernel.org> Cc: Benjamin Tissoires <benjamin.tissoires@redhat.com> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Florian Westphal <fw@strlen.de> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> --- drivers/hid/bpf/hid_bpf_dispatch.c | 17 ++++++++++++ include/linux/btf.h | 10 +++++-- kernel/bpf/btf.c | 11 +++++--- kernel/bpf/cpumask.c | 3 ++- kernel/bpf/helpers.c | 43 +++++++++++++++++++++++++++--- kernel/trace/bpf_trace.c | 16 +++++++++++ net/bpf/test_run.c | 4 ++- net/netfilter/nf_conntrack_bpf.c | 14 +++++++++- net/xfrm/xfrm_state_bpf.c | 16 +++++++++++ 9 files changed, 123 insertions(+), 11 deletions(-)