diff mbox series

[bpf-next,v6,4/5] bpf: Make fs kfuncs available for SYSCALL program type

Message ID AM6PR03MB5080E0DFE4F9BAFFDB9D113B99042@AM6PR03MB5080.eurprd03.prod.outlook.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series bpf: Add open-coded style process file iterator and bpf_fget_task() kfunc | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-VM_Test-49 success Logs for x86_64-llvm-18 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-50 success Logs for x86_64-llvm-18 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-48 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-47 fail Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/build_tools success Errors and warnings before: 0 (+0) this patch: 0 (+0)
netdev/cc_maintainers warning 6 maintainers not CCed: jack@suse.cz mattbobrowski@google.com linux-kselftest@vger.kernel.org viro@zeniv.linux.org.uk mykolal@fb.com shuah@kernel.org
netdev/build_clang success Errors and warnings before: 6 this patch: 6
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 7 this patch: 7
netdev/checkpatch warning WARNING: line length of 90 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 1 this patch: 1
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-11 success Logs for aarch64-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-19 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-17 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-17 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-18 success Logs for s390x-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-45 success Logs for x86_64-llvm-18 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-46 success Logs for x86_64-llvm-18 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-7 fail Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-gcc / veristat-meta / x86_64-gcc veristat_meta
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-gcc / veristat-kernel / x86_64-gcc veristat_kernel
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-44 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-23 fail Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 fail Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 fail Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 fail Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-41 fail Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-42 fail Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-43 fail Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18

Commit Message

Juntong Deng Dec. 17, 2024, 11:37 p.m. UTC
Currently fs kfuncs are only available for LSM program type, but fs
kfuncs are generic and useful for scenarios other than LSM.

This patch makes fs kfuncs available for SYSCALL program type.

Signed-off-by: Juntong Deng <juntong.deng@outlook.com>
---
 fs/bpf_fs_kfuncs.c                            | 20 ++++---------------
 .../selftests/bpf/progs/verifier_vfs_reject.c | 10 ----------
 2 files changed, 4 insertions(+), 26 deletions(-)

Comments

Alexei Starovoitov Dec. 19, 2024, 4:41 p.m. UTC | #1
On Tue, Dec 17, 2024 at 3:45 PM Juntong Deng <juntong.deng@outlook.com> wrote:
>
> -static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
> -{
> -       if (!btf_id_set8_contains(&bpf_fs_kfunc_set_ids, kfunc_id) ||
> -           prog->type == BPF_PROG_TYPE_LSM)
> -               return 0;
> -       return -EACCES;
> -}
> -
>  static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
>         .owner = THIS_MODULE,
>         .set = &bpf_fs_kfunc_set_ids,
> -       .filter = bpf_fs_kfuncs_filter,
>  };
>
>  static int __init bpf_fs_kfuncs_init(void)
>  {
> -       return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
> +       int ret;
> +
> +       ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
> +       return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_fs_kfunc_set);
>  }
>
>  late_initcall(bpf_fs_kfuncs_init);
> diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
> index d6d3f4fcb24c..5aab75fd2fa5 100644
> --- a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
> +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
> @@ -148,14 +148,4 @@ int BPF_PROG(path_d_path_kfunc_invalid_buf_sz, struct file *file)
>         return 0;
>  }
>
> -SEC("fentry/vfs_open")
> -__failure __msg("calling kernel function bpf_path_d_path is not allowed")

This is incorrect.
You have to keep bpf_fs_kfuncs_filter() and prog->type == BPF_PROG_TYPE_LSM
check because bpf_prog_type_to_kfunc_hook() aliases LSM and fentry
into BTF_KFUNC_HOOK_TRACING category. It's been an annoying quirk.
We're figuring out details for significant refactoring of
register_btf_kfunc_id_set() and the whole registration process.

Maybe you would be interested in working on it?

The main goal is to get rid of run-time mask check in SCX_CALL_OP() and
make it static by the verifier. To make that happen scx_kf_mask flags
would need to become KF_* flags while each struct-ops callback will
specify the expected mask.
Then at struct-ops prog attach time the verifier will see the expected mask
and can check that all kfuncs calls of this particular program
satisfy the mask. Then all of the runtime overhead of
current->scx.kf_mask and scx_kf_allowed() will go away.
Juntong Deng Dec. 24, 2024, 12:51 a.m. UTC | #2
On 2024/12/19 16:41, Alexei Starovoitov wrote:
> On Tue, Dec 17, 2024 at 3:45 PM Juntong Deng <juntong.deng@outlook.com> wrote:
>>
>> -static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
>> -{
>> -       if (!btf_id_set8_contains(&bpf_fs_kfunc_set_ids, kfunc_id) ||
>> -           prog->type == BPF_PROG_TYPE_LSM)
>> -               return 0;
>> -       return -EACCES;
>> -}
>> -
>>   static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
>>          .owner = THIS_MODULE,
>>          .set = &bpf_fs_kfunc_set_ids,
>> -       .filter = bpf_fs_kfuncs_filter,
>>   };
>>
>>   static int __init bpf_fs_kfuncs_init(void)
>>   {
>> -       return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
>> +       int ret;
>> +
>> +       ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
>> +       return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_fs_kfunc_set);
>>   }
>>
>>   late_initcall(bpf_fs_kfuncs_init);
>> diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
>> index d6d3f4fcb24c..5aab75fd2fa5 100644
>> --- a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
>> +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
>> @@ -148,14 +148,4 @@ int BPF_PROG(path_d_path_kfunc_invalid_buf_sz, struct file *file)
>>          return 0;
>>   }
>>
>> -SEC("fentry/vfs_open")
>> -__failure __msg("calling kernel function bpf_path_d_path is not allowed")
> 
> This is incorrect.
> You have to keep bpf_fs_kfuncs_filter() and prog->type == BPF_PROG_TYPE_LSM
> check because bpf_prog_type_to_kfunc_hook() aliases LSM and fentry
> into BTF_KFUNC_HOOK_TRACING category. It's been an annoying quirk.
> We're figuring out details for significant refactoring of
> register_btf_kfunc_id_set() and the whole registration process.
> 
> Maybe you would be interested in working on it?
> 
> The main goal is to get rid of run-time mask check in SCX_CALL_OP() and
> make it static by the verifier. To make that happen scx_kf_mask flags
> would need to become KF_* flags while each struct-ops callback will
> specify the expected mask.
> Then at struct-ops prog attach time the verifier will see the expected mask
> and can check that all kfuncs calls of this particular program
> satisfy the mask. Then all of the runtime overhead of
> current->scx.kf_mask and scx_kf_allowed() will go away.

Thanks for pointing this out.

Yes, I am interested in working on it.

I will try to solve this problem in a separate patch series.


The following are my thoughts:

Should we really use KF_* to do this? I think KF_* is currently more
like declaring that a kfunc has some kind of attribute, e.g.
KF_TRUSTED_ARGS means that the kfunc only accepts trusted arguments,
rather than being used to categorise kfuncs.

It is not sustainable to restrict the kfuncs that can be used based on
program types, which are coarse-grained. This problem will get worse
as kfuncs increase.

In my opinion, managing the kfuncs available to bpf programs should be
implemented as capabilities. Capabilities are a mature permission model.
We can treat a set of kfuncs as a capability (like the various current
kfunc_sets, but the current kfunc_sets did not carefully divide
permissions).

We should use separate BPF_CAP_XXX flags to manage these capabilities.
For example, SCX may define BPF_CAP_SCX_DISPATCH.

For program types, we should divide them into two levels, types and
subtypes. Types are used to register common capabilities and subtypes
are used to register specific capabilities. The verifier can check if
the used kfuncs are allowed based on the type and subtype of the bpf
program.

I understand that we need to maintain backward compatibility to
userspace, but capabilities are internal changes in the kernel.
Perhaps we can make the current program types as subtypes and
add 'types' that are only used internally, and more subtypes
(program types) can be added in the future.
Juntong Deng Dec. 24, 2024, 12:10 p.m. UTC | #3
On 2024/12/24 00:51, Juntong Deng wrote:
> On 2024/12/19 16:41, Alexei Starovoitov wrote:
>> On Tue, Dec 17, 2024 at 3:45 PM Juntong Deng 
>> <juntong.deng@outlook.com> wrote:
>>>
>>> -static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 
>>> kfunc_id)
>>> -{
>>> -       if (!btf_id_set8_contains(&bpf_fs_kfunc_set_ids, kfunc_id) ||
>>> -           prog->type == BPF_PROG_TYPE_LSM)
>>> -               return 0;
>>> -       return -EACCES;
>>> -}
>>> -
>>>   static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
>>>          .owner = THIS_MODULE,
>>>          .set = &bpf_fs_kfunc_set_ids,
>>> -       .filter = bpf_fs_kfuncs_filter,
>>>   };
>>>
>>>   static int __init bpf_fs_kfuncs_init(void)
>>>   {
>>> -       return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, 
>>> &bpf_fs_kfunc_set);
>>> +       int ret;
>>> +
>>> +       ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, 
>>> &bpf_fs_kfunc_set);
>>> +       return ret ?: 
>>> register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_fs_kfunc_set);
>>>   }
>>>
>>>   late_initcall(bpf_fs_kfuncs_init);
>>> diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c 
>>> b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
>>> index d6d3f4fcb24c..5aab75fd2fa5 100644
>>> --- a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
>>> +++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
>>> @@ -148,14 +148,4 @@ int BPF_PROG(path_d_path_kfunc_invalid_buf_sz, 
>>> struct file *file)
>>>          return 0;
>>>   }
>>>
>>> -SEC("fentry/vfs_open")
>>> -__failure __msg("calling kernel function bpf_path_d_path is not 
>>> allowed")
>>
>> This is incorrect.
>> You have to keep bpf_fs_kfuncs_filter() and prog->type == 
>> BPF_PROG_TYPE_LSM
>> check because bpf_prog_type_to_kfunc_hook() aliases LSM and fentry
>> into BTF_KFUNC_HOOK_TRACING category. It's been an annoying quirk.
>> We're figuring out details for significant refactoring of
>> register_btf_kfunc_id_set() and the whole registration process.
>>
>> Maybe you would be interested in working on it?
>>
>> The main goal is to get rid of run-time mask check in SCX_CALL_OP() and
>> make it static by the verifier. To make that happen scx_kf_mask flags
>> would need to become KF_* flags while each struct-ops callback will
>> specify the expected mask.
>> Then at struct-ops prog attach time the verifier will see the expected 
>> mask
>> and can check that all kfuncs calls of this particular program
>> satisfy the mask. Then all of the runtime overhead of
>> current->scx.kf_mask and scx_kf_allowed() will go away.
> 
> Thanks for pointing this out.
> 
> Yes, I am interested in working on it.
> 
> I will try to solve this problem in a separate patch series.
> 
> 
> The following are my thoughts:
> 
> Should we really use KF_* to do this? I think KF_* is currently more
> like declaring that a kfunc has some kind of attribute, e.g.
> KF_TRUSTED_ARGS means that the kfunc only accepts trusted arguments,
> rather than being used to categorise kfuncs.
> 
> It is not sustainable to restrict the kfuncs that can be used based on
> program types, which are coarse-grained. This problem will get worse
> as kfuncs increase.
> 
> In my opinion, managing the kfuncs available to bpf programs should be
> implemented as capabilities. Capabilities are a mature permission model.
> We can treat a set of kfuncs as a capability (like the various current
> kfunc_sets, but the current kfunc_sets did not carefully divide
> permissions).
> 
> We should use separate BPF_CAP_XXX flags to manage these capabilities.
> For example, SCX may define BPF_CAP_SCX_DISPATCH.
> 
> For program types, we should divide them into two levels, types and
> subtypes. Types are used to register common capabilities and subtypes
> are used to register specific capabilities. The verifier can check if
> the used kfuncs are allowed based on the type and subtype of the bpf
> program.
> 
> I understand that we need to maintain backward compatibility to
> userspace, but capabilities are internal changes in the kernel.
> Perhaps we can make the current program types as subtypes and
> add 'types' that are only used internally, and more subtypes
> (program types) can be added in the future.

Sorry, this email was sent at midnight before I went to bed, and yes,
it looks a bit radical.

But in the long run, as ebpf is used in more and more scenarios
(better kernel module), and we have more and more kfuncs
(better EXPORT_SYMBOL_GPL).

Managing (restricting) kfuncs that can be used in different contexts
will become more and more complex.

Therefore it might be better for ebpf to transition to fine-grained
permission management (capabilities).

Maybe we can have more discussion.

Many thanks.
diff mbox series

Patch

diff --git a/fs/bpf_fs_kfuncs.c b/fs/bpf_fs_kfuncs.c
index 4a810046dcf3..6010fccc9db8 100644
--- a/fs/bpf_fs_kfuncs.c
+++ b/fs/bpf_fs_kfuncs.c
@@ -26,8 +26,6 @@  __bpf_kfunc_start_defs();
  * acquired by this BPF kfunc will result in the BPF program being rejected by
  * the BPF verifier.
  *
- * This BPF kfunc may only be called from BPF LSM programs.
- *
  * Internally, this BPF kfunc leans on get_task_exe_file(), such that calling
  * bpf_get_task_exe_file() would be analogous to calling get_task_exe_file()
  * directly in kernel context.
@@ -49,8 +47,6 @@  __bpf_kfunc struct file *bpf_get_task_exe_file(struct task_struct *task)
  * passed to this BPF kfunc. Attempting to pass an unreferenced file pointer, or
  * any other arbitrary pointer for that matter, will result in the BPF program
  * being rejected by the BPF verifier.
- *
- * This BPF kfunc may only be called from BPF LSM programs.
  */
 __bpf_kfunc void bpf_put_file(struct file *file)
 {
@@ -70,8 +66,6 @@  __bpf_kfunc void bpf_put_file(struct file *file)
  * reference, or else the BPF program will be outright rejected by the BPF
  * verifier.
  *
- * This BPF kfunc may only be called from BPF LSM programs.
- *
  * Return: A positive integer corresponding to the length of the resolved
  * pathname in *buf*, including the NUL termination character. On error, a
  * negative integer is returned.
@@ -181,23 +175,17 @@  BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
 BTF_ID_FLAGS(func, bpf_fget_task, KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL)
 BTF_KFUNCS_END(bpf_fs_kfunc_set_ids)
 
-static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
-{
-	if (!btf_id_set8_contains(&bpf_fs_kfunc_set_ids, kfunc_id) ||
-	    prog->type == BPF_PROG_TYPE_LSM)
-		return 0;
-	return -EACCES;
-}
-
 static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
 	.owner = THIS_MODULE,
 	.set = &bpf_fs_kfunc_set_ids,
-	.filter = bpf_fs_kfuncs_filter,
 };
 
 static int __init bpf_fs_kfuncs_init(void)
 {
-	return register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
+	int ret;
+
+	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_LSM, &bpf_fs_kfunc_set);
+	return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_fs_kfunc_set);
 }
 
 late_initcall(bpf_fs_kfuncs_init);
diff --git a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
index d6d3f4fcb24c..5aab75fd2fa5 100644
--- a/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
+++ b/tools/testing/selftests/bpf/progs/verifier_vfs_reject.c
@@ -148,14 +148,4 @@  int BPF_PROG(path_d_path_kfunc_invalid_buf_sz, struct file *file)
 	return 0;
 }
 
-SEC("fentry/vfs_open")
-__failure __msg("calling kernel function bpf_path_d_path is not allowed")
-int BPF_PROG(path_d_path_kfunc_non_lsm, struct path *path, struct file *f)
-{
-	/* Calling bpf_path_d_path() from a non-LSM BPF program isn't permitted.
-	 */
-	bpf_path_d_path(path, buf, sizeof(buf));
-	return 0;
-}
-
 char _license[] SEC("license") = "GPL";