diff mbox series

[1/2] bpf: add kfunc for populating cpumask bits

Message ID 20250228003321.1409285-2-emil@etsalapatis.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: introduce helper for populating bpf_cpumask | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-49 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-50 success Logs for x86_64-llvm-18 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-12 success Logs for aarch64-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for s390x-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for aarch64-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-19 success Logs for s390x-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-43 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-17 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-17 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-44 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-51 success Logs for x86_64-llvm-18 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-21 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-47 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-gcc / veristat-kernel / x86_64-gcc veristat_kernel
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-gcc / veristat-meta / x86_64-gcc veristat_meta
bpf/vmtest-bpf-next-VM_Test-48 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-45 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-46 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Guessed tree name to be net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 7 maintainers not CCed: haoluo@google.com song@kernel.org kpsingh@kernel.org sdf@fomichev.me martin.lau@linux.dev john.fastabend@gmail.com jolsa@kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 26 this patch: 27
netdev/checkpatch warning CHECK: From:/Signed-off-by: email comments mismatch: 'From: Emil Tsalapatis <emil@etsalapatis.com>' != 'Signed-off-by: Emil Tsalapatis (Meta) <emil@etsalapatis.com>' WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc fail Errors and warnings before: 6 this patch: 7
netdev/source_inline success Was 0 now: 0

Commit Message

Emil Tsalapatis Feb. 28, 2025, 12:33 a.m. UTC
Add a helper kfunc that sets the bitmap of a bpf_cpumask from BPF
memory.

Signed-off-by: Emil Tsalapatis (Meta) <emil@etsalapatis.com>
---
 kernel/bpf/cpumask.c | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

Comments

Hou Tao March 1, 2025, 12:56 a.m. UTC | #1
Hi,

On 2/28/2025 8:33 AM, Emil Tsalapatis wrote:
> Add a helper kfunc that sets the bitmap of a bpf_cpumask from BPF
> memory.
>
> Signed-off-by: Emil Tsalapatis (Meta) <emil@etsalapatis.com>
> ---
>  kernel/bpf/cpumask.c | 21 +++++++++++++++++++++
>  1 file changed, 21 insertions(+)
>
> diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
> index cfa1c18e3a48..a13839b3595f 100644
> --- a/kernel/bpf/cpumask.c
> +++ b/kernel/bpf/cpumask.c
> @@ -420,6 +420,26 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
>  	return cpumask_weight(cpumask);
>  }
>  
> +/**
> + * bpf_cpumask_fill() - Populate the CPU mask from the contents of
> + * a BPF memory region.
> + *
> + * @cpumask: The cpumask being populated.
> + * @src: The BPF memory holding the bit pattern.
> + * @src__sz: Length of the BPF memory region in bytes.
> + *
> + */
> +__bpf_kfunc int bpf_cpumask_fill(struct cpumask *cpumask, void *src, size_t src__sz)
> +{
> +	/* The memory region must be large enough to populate the entire CPU mask. */
> +	if (src__sz < BITS_TO_BYTES(nr_cpu_ids))
> +		return -EACCES;
> +
> +	bitmap_copy(cpumask_bits(cpumask), src, nr_cpu_ids);

Should we use src__sz < bitmap_size(nr_cpu_ids) instead ? Because in
bitmap_copy(), it assumes the size of src should be bitmap_size(nr_cpu_ids).
> +
> +	return 0;
> +}
> +
>  __bpf_kfunc_end_defs();
>  
>  BTF_KFUNCS_START(cpumask_kfunc_btf_ids)
> @@ -448,6 +468,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
>  BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
>  BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
>  BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
> +BTF_ID_FLAGS(func, bpf_cpumask_fill, KF_RCU)
>  BTF_KFUNCS_END(cpumask_kfunc_btf_ids)
>  
>  static const struct btf_kfunc_id_set cpumask_kfunc_set = {
Emil Tsalapatis March 4, 2025, 3:18 a.m. UTC | #2
Hi,

On Fri, Feb 28, 2025 at 7:56 PM Hou Tao <houtao@huaweicloud.com> wrote:
>
> Hi,
>
> On 2/28/2025 8:33 AM, Emil Tsalapatis wrote:
> > Add a helper kfunc that sets the bitmap of a bpf_cpumask from BPF
> > memory.
> >
> > Signed-off-by: Emil Tsalapatis (Meta) <emil@etsalapatis.com>
> > ---
> >  kernel/bpf/cpumask.c | 21 +++++++++++++++++++++
> >  1 file changed, 21 insertions(+)
> >
> > diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
> > index cfa1c18e3a48..a13839b3595f 100644
> > --- a/kernel/bpf/cpumask.c
> > +++ b/kernel/bpf/cpumask.c
> > @@ -420,6 +420,26 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
> >       return cpumask_weight(cpumask);
> >  }
> >
> > +/**
> > + * bpf_cpumask_fill() - Populate the CPU mask from the contents of
> > + * a BPF memory region.
> > + *
> > + * @cpumask: The cpumask being populated.
> > + * @src: The BPF memory holding the bit pattern.
> > + * @src__sz: Length of the BPF memory region in bytes.
> > + *
> > + */
> > +__bpf_kfunc int bpf_cpumask_fill(struct cpumask *cpumask, void *src, size_t src__sz)
> > +{
> > +     /* The memory region must be large enough to populate the entire CPU mask. */
> > +     if (src__sz < BITS_TO_BYTES(nr_cpu_ids))
> > +             return -EACCES;
> > +
> > +     bitmap_copy(cpumask_bits(cpumask), src, nr_cpu_ids);
>
> Should we use src__sz < bitmap_size(nr_cpu_ids) instead ? Because in
> bitmap_copy(), it assumes the size of src should be bitmap_size(nr_cpu_ids).

This is a great catch, thank you. Comparing with
BITS_TO_BYTES(nr_cpu_ids) allows byte-aligned
masks through, even though bitmap_copy assumes all masks are long-aligned.

> > +
> > +     return 0;
> > +}
> > +
> >  __bpf_kfunc_end_defs();
> >
> >  BTF_KFUNCS_START(cpumask_kfunc_btf_ids)
> > @@ -448,6 +468,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
> >  BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
> >  BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
> >  BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
> > +BTF_ID_FLAGS(func, bpf_cpumask_fill, KF_RCU)
> >  BTF_KFUNCS_END(cpumask_kfunc_btf_ids)
> >
> >  static const struct btf_kfunc_id_set cpumask_kfunc_set = {
>
Hou Tao March 4, 2025, 2:04 p.m. UTC | #3
Hi,

On 3/4/2025 11:18 AM, Emil Tsalapatis wrote:
> Hi,
>
> On Fri, Feb 28, 2025 at 7:56 PM Hou Tao <houtao@huaweicloud.com> wrote:
>> Hi,
>>
>> On 2/28/2025 8:33 AM, Emil Tsalapatis wrote:
>>> Add a helper kfunc that sets the bitmap of a bpf_cpumask from BPF
>>> memory.
>>>
>>> Signed-off-by: Emil Tsalapatis (Meta) <emil@etsalapatis.com>
>>> ---
>>>  kernel/bpf/cpumask.c | 21 +++++++++++++++++++++
>>>  1 file changed, 21 insertions(+)
>>>
>>> diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
>>> index cfa1c18e3a48..a13839b3595f 100644
>>> --- a/kernel/bpf/cpumask.c
>>> +++ b/kernel/bpf/cpumask.c
>>> @@ -420,6 +420,26 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
>>>       return cpumask_weight(cpumask);
>>>  }
>>>
>>> +/**
>>> + * bpf_cpumask_fill() - Populate the CPU mask from the contents of
>>> + * a BPF memory region.
>>> + *
>>> + * @cpumask: The cpumask being populated.
>>> + * @src: The BPF memory holding the bit pattern.
>>> + * @src__sz: Length of the BPF memory region in bytes.
>>> + *
>>> + */
>>> +__bpf_kfunc int bpf_cpumask_fill(struct cpumask *cpumask, void *src, size_t src__sz)
>>> +{
>>> +     /* The memory region must be large enough to populate the entire CPU mask. */
>>> +     if (src__sz < BITS_TO_BYTES(nr_cpu_ids))
>>> +             return -EACCES;
>>> +
>>> +     bitmap_copy(cpumask_bits(cpumask), src, nr_cpu_ids);
>> Should we use src__sz < bitmap_size(nr_cpu_ids) instead ? Because in
>> bitmap_copy(), it assumes the size of src should be bitmap_size(nr_cpu_ids).
> This is a great catch, thank you. Comparing with
> BITS_TO_BYTES(nr_cpu_ids) allows byte-aligned
> masks through, even though bitmap_copy assumes all masks are long-aligned.

Er, the long-aligned assumption raises another problem. Do we need to
make the src pointer be long-aligned because bitmap_copy() may use "*dst
= *src" to dereference the src pointer ? Or would it be better to use
memcpy() to copy the cpumask directly ?
>>> +
>>> +     return 0;
>>> +}
>>> +
>>>  __bpf_kfunc_end_defs();
>>>
>>>  BTF_KFUNCS_START(cpumask_kfunc_btf_ids)
>>> @@ -448,6 +468,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
>>>  BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
>>>  BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
>>>  BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
>>> +BTF_ID_FLAGS(func, bpf_cpumask_fill, KF_RCU)
>>>  BTF_KFUNCS_END(cpumask_kfunc_btf_ids)
>>>
>>>  static const struct btf_kfunc_id_set cpumask_kfunc_set = {
Emil Tsalapatis March 4, 2025, 9:57 p.m. UTC | #4
Hi,


On Tue, Mar 4, 2025 at 9:04 AM Hou Tao <houtao@huaweicloud.com> wrote:
>
> Hi,
>
> On 3/4/2025 11:18 AM, Emil Tsalapatis wrote:
> > Hi,
> >
> > On Fri, Feb 28, 2025 at 7:56 PM Hou Tao <houtao@huaweicloud.com> wrote:
> >> Hi,
> >>
> >> On 2/28/2025 8:33 AM, Emil Tsalapatis wrote:
> >>> Add a helper kfunc that sets the bitmap of a bpf_cpumask from BPF
> >>> memory.
> >>>
> >>> Signed-off-by: Emil Tsalapatis (Meta) <emil@etsalapatis.com>
> >>> ---
> >>>  kernel/bpf/cpumask.c | 21 +++++++++++++++++++++
> >>>  1 file changed, 21 insertions(+)
> >>>
> >>> diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
> >>> index cfa1c18e3a48..a13839b3595f 100644
> >>> --- a/kernel/bpf/cpumask.c
> >>> +++ b/kernel/bpf/cpumask.c
> >>> @@ -420,6 +420,26 @@ __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
> >>>       return cpumask_weight(cpumask);
> >>>  }
> >>>
> >>> +/**
> >>> + * bpf_cpumask_fill() - Populate the CPU mask from the contents of
> >>> + * a BPF memory region.
> >>> + *
> >>> + * @cpumask: The cpumask being populated.
> >>> + * @src: The BPF memory holding the bit pattern.
> >>> + * @src__sz: Length of the BPF memory region in bytes.
> >>> + *
> >>> + */
> >>> +__bpf_kfunc int bpf_cpumask_fill(struct cpumask *cpumask, void *src, size_t src__sz)
> >>> +{
> >>> +     /* The memory region must be large enough to populate the entire CPU mask. */
> >>> +     if (src__sz < BITS_TO_BYTES(nr_cpu_ids))
> >>> +             return -EACCES;
> >>> +
> >>> +     bitmap_copy(cpumask_bits(cpumask), src, nr_cpu_ids);
> >> Should we use src__sz < bitmap_size(nr_cpu_ids) instead ? Because in
> >> bitmap_copy(), it assumes the size of src should be bitmap_size(nr_cpu_ids).
> > This is a great catch, thank you. Comparing with
> > BITS_TO_BYTES(nr_cpu_ids) allows byte-aligned
> > masks through, even though bitmap_copy assumes all masks are long-aligned.
>
> Er, the long-aligned assumption raises another problem. Do we need to
> make the src pointer be long-aligned because bitmap_copy() may use "*dst
> = *src" to dereference the src pointer ? Or would it be better to use
> memcpy() to copy the cpumask directly ?

I would be fine with either, IMO the former is preferable. We are
rounding up the
size of the BPF-side CPU mask to the nearest long anyway, so it makes
sense for the
memory region to be long-aligned. The alternative would make the copy
slightly slower
on machines with nr_cpu_ids <= 64, though at least for sched_ext this
function should
be rare enough that the performance impact is be minimal.

If that makes sense, I will add an alignment check and an associated selftest.




> >>> +
> >>> +     return 0;
> >>> +}
> >>> +
> >>>  __bpf_kfunc_end_defs();
> >>>
> >>>  BTF_KFUNCS_START(cpumask_kfunc_btf_ids)
> >>> @@ -448,6 +468,7 @@ BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
> >>>  BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
> >>>  BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
> >>>  BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
> >>> +BTF_ID_FLAGS(func, bpf_cpumask_fill, KF_RCU)
> >>>  BTF_KFUNCS_END(cpumask_kfunc_btf_ids)
> >>>
> >>>  static const struct btf_kfunc_id_set cpumask_kfunc_set = {
>
diff mbox series

Patch

diff --git a/kernel/bpf/cpumask.c b/kernel/bpf/cpumask.c
index cfa1c18e3a48..a13839b3595f 100644
--- a/kernel/bpf/cpumask.c
+++ b/kernel/bpf/cpumask.c
@@ -420,6 +420,26 @@  __bpf_kfunc u32 bpf_cpumask_weight(const struct cpumask *cpumask)
 	return cpumask_weight(cpumask);
 }
 
+/**
+ * bpf_cpumask_fill() - Populate the CPU mask from the contents of
+ * a BPF memory region.
+ *
+ * @cpumask: The cpumask being populated.
+ * @src: The BPF memory holding the bit pattern.
+ * @src__sz: Length of the BPF memory region in bytes.
+ *
+ */
+__bpf_kfunc int bpf_cpumask_fill(struct cpumask *cpumask, void *src, size_t src__sz)
+{
+	/* The memory region must be large enough to populate the entire CPU mask. */
+	if (src__sz < BITS_TO_BYTES(nr_cpu_ids))
+		return -EACCES;
+
+	bitmap_copy(cpumask_bits(cpumask), src, nr_cpu_ids);
+
+	return 0;
+}
+
 __bpf_kfunc_end_defs();
 
 BTF_KFUNCS_START(cpumask_kfunc_btf_ids)
@@ -448,6 +468,7 @@  BTF_ID_FLAGS(func, bpf_cpumask_copy, KF_RCU)
 BTF_ID_FLAGS(func, bpf_cpumask_any_distribute, KF_RCU)
 BTF_ID_FLAGS(func, bpf_cpumask_any_and_distribute, KF_RCU)
 BTF_ID_FLAGS(func, bpf_cpumask_weight, KF_RCU)
+BTF_ID_FLAGS(func, bpf_cpumask_fill, KF_RCU)
 BTF_KFUNCS_END(cpumask_kfunc_btf_ids)
 
 static const struct btf_kfunc_id_set cpumask_kfunc_set = {