diff mbox series

[bpf-next,v5,7/9] selftests/bpf: Test kptr arrays and kptrs in nested struct fields.

Message ID 20240510011312.1488046-8-thinker.li@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Enable BPF programs to declare arrays of kptr, bpf_rb_root, and bpf_list_head. | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc

Commit Message

Kui-Feng Lee May 10, 2024, 1:13 a.m. UTC
Make sure that BPF programs can declare global kptr arrays and kptr fields
in struct types that is the type of a global variable or the type of a
nested descendant field in a global variable.

An array with only one element is special case, that it treats the element
like a non-array kptr field. Nested arrays are also tested to ensure they
are handled properly.

Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
---
 .../selftests/bpf/prog_tests/cpumask.c        |   5 +
 .../selftests/bpf/progs/cpumask_success.c     | 133 ++++++++++++++++++
 2 files changed, 138 insertions(+)

Comments

Eduard Zingerman May 10, 2024, 10:03 a.m. UTC | #1
On Thu, 2024-05-09 at 18:13 -0700, Kui-Feng Lee wrote:
> Make sure that BPF programs can declare global kptr arrays and kptr fields
> in struct types that is the type of a global variable or the type of a
> nested descendant field in a global variable.
> 
> An array with only one element is special case, that it treats the element
> like a non-array kptr field. Nested arrays are also tested to ensure they
> are handled properly.
> 
> Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
> ---
>  .../selftests/bpf/prog_tests/cpumask.c        |   5 +
>  .../selftests/bpf/progs/cpumask_success.c     | 133 ++++++++++++++++++
>  2 files changed, 138 insertions(+)
> 
> diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
> index ecf89df78109..2570bd4b0cb2 100644
> --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c
> +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
> @@ -18,6 +18,11 @@ static const char * const cpumask_success_testcases[] = {
>  	"test_insert_leave",
>  	"test_insert_remove_release",
>  	"test_global_mask_rcu",
> +	"test_global_mask_array_one_rcu",
> +	"test_global_mask_array_rcu",
> +	"test_global_mask_array_l2_rcu",
> +	"test_global_mask_nested_rcu",
> +	"test_global_mask_nested_deep_rcu",
>  	"test_cpumask_weight",
>  };
>  
> diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
> index 7a1e64c6c065..0b6383fa9958 100644
> --- a/tools/testing/selftests/bpf/progs/cpumask_success.c
> +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
> @@ -12,6 +12,25 @@ char _license[] SEC("license") = "GPL";
>  
>  int pid, nr_cpus;
>  
> +struct kptr_nested {
> +	struct bpf_cpumask __kptr * mask;
> +};
> +
> +struct kptr_nested_mid {
> +	int dummy;
> +	struct kptr_nested m;
> +};
> +
> +struct kptr_nested_deep {
> +	struct kptr_nested_mid ptrs[2];
> +};

For the sake of completeness, would it be possible to create a test
case where there are several struct arrays following each other?
E.g. as below:

struct foo {
  ... __kptr *a;
  ... __kptr *b;
}

struct bar {
  ... __kptr *c;
}

struct {
  struct foo foos[3];
  struct bar bars[2];
}

Just to check that offset is propagated correctly.

Also, in the tests below you check that a pointer to some object could
be put into an array at different indexes. Tbh, I find it not very
interesting if we want to check that offsets are correct.
Would it be possible to create an array of object kptrs,
put specific references at specific indexes and somehow check which
object ended up where? (not necessarily 'bpf_cpumask').

> +
> +private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
> +private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
> +private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
> +private(MASK) static struct kptr_nested global_mask_nested[2];
> +private(MASK) static struct kptr_nested_deep global_mask_nested_deep;
> +
>  static bool is_test_task(void)
>  {
>  	int cur_pid = bpf_get_current_pid_tgid() >> 32;
> @@ -460,6 +479,120 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
>  	return 0;
>  }
>  
> +SEC("tp_btf/task_newtask")
> +int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
> +{
> +	struct bpf_cpumask *local, *prev;
> +
> +	if (!is_test_task())
> +		return 0;
> +
> +	/* Kptr arrays with one element are special cased, being treated
> +	 * just like a single pointer.
> +	 */
> +
> +	local = create_cpumask();
> +	if (!local)
> +		return 0;
> +
> +	prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
> +	if (prev) {
> +		bpf_cpumask_release(prev);
> +		err = 3;
> +		return 0;
> +	}
> +
> +	bpf_rcu_read_lock();
> +	local = global_mask_array_one[0];
> +	if (!local) {
> +		err = 4;
> +		bpf_rcu_read_unlock();
> +		return 0;
> +	}
> +
> +	bpf_rcu_read_unlock();
> +
> +	return 0;
> +}
> +
> +static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
> +				  struct bpf_cpumask **mask1)
> +{
> +	struct bpf_cpumask *local;
> +
> +	if (!is_test_task())
> +		return 0;
> +
> +	/* Check if two kptrs in the array work and independently */
> +
> +	local = create_cpumask();
> +	if (!local)
> +		return 0;
> +
> +	bpf_rcu_read_lock();
> +
> +	local = bpf_kptr_xchg(mask0, local);
> +	if (local) {
> +		err = 1;
> +		goto err_exit;
> +	}
> +
> +	/* [<mask 0>, NULL] */
> +	if (!*mask0 || *mask1) {
> +		err = 2;
> +		goto err_exit;
> +	}
> +
> +	local = create_cpumask();
> +	if (!local) {
> +		err = 9;
> +		goto err_exit;
> +	}
> +
> +	local = bpf_kptr_xchg(mask1, local);
> +	if (local) {
> +		err = 10;
> +		goto err_exit;
> +	}
> +
> +	/* [<mask 0>, <mask 1>] */
> +	if (!*mask0 || !*mask1 || *mask0 == *mask1) {
> +		err = 11;
> +		goto err_exit;
> +	}
> +
> +err_exit:
> +	if (local)
> +		bpf_cpumask_release(local);
> +	bpf_rcu_read_unlock();
> +	return 0;
> +}
> +
> +SEC("tp_btf/task_newtask")
> +int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
> +{
> +	return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
> +}
> +
> +SEC("tp_btf/task_newtask")
> +int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
> +{
> +	return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
> +}
> +
> +SEC("tp_btf/task_newtask")
> +int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
> +{
> +	return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
> +}
> +
> +SEC("tp_btf/task_newtask")
> +int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
> +{
> +	return _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
> +				      &global_mask_nested_deep.ptrs[1].m.mask);
> +}
> +
>  SEC("tp_btf/task_newtask")
>  int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
>  {
Kui-Feng Lee May 10, 2024, 9:59 p.m. UTC | #2
On 5/10/24 03:03, Eduard Zingerman wrote:
> On Thu, 2024-05-09 at 18:13 -0700, Kui-Feng Lee wrote:
>> Make sure that BPF programs can declare global kptr arrays and kptr fields
>> in struct types that is the type of a global variable or the type of a
>> nested descendant field in a global variable.
>>
>> An array with only one element is special case, that it treats the element
>> like a non-array kptr field. Nested arrays are also tested to ensure they
>> are handled properly.
>>
>> Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
>> ---
>>   .../selftests/bpf/prog_tests/cpumask.c        |   5 +
>>   .../selftests/bpf/progs/cpumask_success.c     | 133 ++++++++++++++++++
>>   2 files changed, 138 insertions(+)
>>
>> diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
>> index ecf89df78109..2570bd4b0cb2 100644
>> --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c
>> +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
>> @@ -18,6 +18,11 @@ static const char * const cpumask_success_testcases[] = {
>>   	"test_insert_leave",
>>   	"test_insert_remove_release",
>>   	"test_global_mask_rcu",
>> +	"test_global_mask_array_one_rcu",
>> +	"test_global_mask_array_rcu",
>> +	"test_global_mask_array_l2_rcu",
>> +	"test_global_mask_nested_rcu",
>> +	"test_global_mask_nested_deep_rcu",
>>   	"test_cpumask_weight",
>>   };
>>   
>> diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
>> index 7a1e64c6c065..0b6383fa9958 100644
>> --- a/tools/testing/selftests/bpf/progs/cpumask_success.c
>> +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
>> @@ -12,6 +12,25 @@ char _license[] SEC("license") = "GPL";
>>   
>>   int pid, nr_cpus;
>>   
>> +struct kptr_nested {
>> +	struct bpf_cpumask __kptr * mask;
>> +};
>> +
>> +struct kptr_nested_mid {
>> +	int dummy;
>> +	struct kptr_nested m;
>> +};
>> +
>> +struct kptr_nested_deep {
>> +	struct kptr_nested_mid ptrs[2];
>> +};
> 
> For the sake of completeness, would it be possible to create a test
> case where there are several struct arrays following each other?
> E.g. as below:
> 
> struct foo {
>    ... __kptr *a;
>    ... __kptr *b;
> }
> 
> struct bar {
>    ... __kptr *c;
> }
> 
> struct {
>    struct foo foos[3];
>    struct bar bars[2];
> }
> 
> Just to check that offset is propagated correctly.

Sure!

> 
> Also, in the tests below you check that a pointer to some object could
> be put into an array at different indexes. Tbh, I find it not very
> interesting if we want to check that offsets are correct.
> Would it be possible to create an array of object kptrs,
> put specific references at specific indexes and somehow check which
> object ended up where? (not necessarily 'bpf_cpumask').

Do you mean checking index in the way like the following code?

  if (array[0] != ref0 || array[1] != ref1 || array[2] != ref2 ....)
    return err;

> 
>> +
>> +private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
>> +private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
>> +private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
>> +private(MASK) static struct kptr_nested global_mask_nested[2];
>> +private(MASK) static struct kptr_nested_deep global_mask_nested_deep;
>> +
>>   static bool is_test_task(void)
>>   {
>>   	int cur_pid = bpf_get_current_pid_tgid() >> 32;
>> @@ -460,6 +479,120 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
>>   	return 0;
>>   }
>>   
>> +SEC("tp_btf/task_newtask")
>> +int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
>> +{
>> +	struct bpf_cpumask *local, *prev;
>> +
>> +	if (!is_test_task())
>> +		return 0;
>> +
>> +	/* Kptr arrays with one element are special cased, being treated
>> +	 * just like a single pointer.
>> +	 */
>> +
>> +	local = create_cpumask();
>> +	if (!local)
>> +		return 0;
>> +
>> +	prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
>> +	if (prev) {
>> +		bpf_cpumask_release(prev);
>> +		err = 3;
>> +		return 0;
>> +	}
>> +
>> +	bpf_rcu_read_lock();
>> +	local = global_mask_array_one[0];
>> +	if (!local) {
>> +		err = 4;
>> +		bpf_rcu_read_unlock();
>> +		return 0;
>> +	}
>> +
>> +	bpf_rcu_read_unlock();
>> +
>> +	return 0;
>> +}
>> +
>> +static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
>> +				  struct bpf_cpumask **mask1)
>> +{
>> +	struct bpf_cpumask *local;
>> +
>> +	if (!is_test_task())
>> +		return 0;
>> +
>> +	/* Check if two kptrs in the array work and independently */
>> +
>> +	local = create_cpumask();
>> +	if (!local)
>> +		return 0;
>> +
>> +	bpf_rcu_read_lock();
>> +
>> +	local = bpf_kptr_xchg(mask0, local);
>> +	if (local) {
>> +		err = 1;
>> +		goto err_exit;
>> +	}
>> +
>> +	/* [<mask 0>, NULL] */
>> +	if (!*mask0 || *mask1) {
>> +		err = 2;
>> +		goto err_exit;
>> +	}
>> +
>> +	local = create_cpumask();
>> +	if (!local) {
>> +		err = 9;
>> +		goto err_exit;
>> +	}
>> +
>> +	local = bpf_kptr_xchg(mask1, local);
>> +	if (local) {
>> +		err = 10;
>> +		goto err_exit;
>> +	}
>> +
>> +	/* [<mask 0>, <mask 1>] */
>> +	if (!*mask0 || !*mask1 || *mask0 == *mask1) {
>> +		err = 11;
>> +		goto err_exit;
>> +	}
>> +
>> +err_exit:
>> +	if (local)
>> +		bpf_cpumask_release(local);
>> +	bpf_rcu_read_unlock();
>> +	return 0;
>> +}
>> +
>> +SEC("tp_btf/task_newtask")
>> +int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
>> +{
>> +	return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
>> +}
>> +
>> +SEC("tp_btf/task_newtask")
>> +int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
>> +{
>> +	return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
>> +}
>> +
>> +SEC("tp_btf/task_newtask")
>> +int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
>> +{
>> +	return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
>> +}
>> +
>> +SEC("tp_btf/task_newtask")
>> +int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
>> +{
>> +	return _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
>> +				      &global_mask_nested_deep.ptrs[1].m.mask);
>> +}
>> +
>>   SEC("tp_btf/task_newtask")
>>   int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
>>   {
>
Eduard Zingerman May 10, 2024, 10:08 p.m. UTC | #3
On Fri, 2024-05-10 at 14:59 -0700, Kui-Feng Lee wrote:
> 
> For the sake of completeness, would it be possible to create a test
> > case where there are several struct arrays following each other?
> > E.g. as below:
> > 
> > struct foo {
> >    ... __kptr *a;
> >    ... __kptr *b;
> > }
> > 
> > struct bar {
> >    ... __kptr *c;
> > }
> > 
> > struct {
> >    struct foo foos[3];
> >    struct bar bars[2];
> > }
> > 
> > Just to check that offset is propagated correctly.
> 
> Sure!

Great, thank you

> > Also, in the tests below you check that a pointer to some object could
> > be put into an array at different indexes. Tbh, I find it not very
> > interesting if we want to check that offsets are correct.
> > Would it be possible to create an array of object kptrs,
> > put specific references at specific indexes and somehow check which
> > object ended up where? (not necessarily 'bpf_cpumask').
> 
> Do you mean checking index in the way like the following code?
> 
>   if (array[0] != ref0 || array[1] != ref1 || array[2] != ref2 ....)
>     return err;

Probably, but I'd need your help here.
There goal is to verify that offsets of __kptr's in the 'info' array
had been set correctly. Where is this information is used later on?
E.g. I'd like to trigger some action that "touches" __kptr at index N
and verify that all others had not been "touched".
But this "touch" action has to use offset stored in the 'info'.

[...]
Kui-Feng Lee May 10, 2024, 10:25 p.m. UTC | #4
On 5/10/24 15:08, Eduard Zingerman wrote:
> On Fri, 2024-05-10 at 14:59 -0700, Kui-Feng Lee wrote:
>>
>> For the sake of completeness, would it be possible to create a test
>>> case where there are several struct arrays following each other?
>>> E.g. as below:
>>>
>>> struct foo {
>>>     ... __kptr *a;
>>>     ... __kptr *b;
>>> }
>>>
>>> struct bar {
>>>     ... __kptr *c;
>>> }
>>>
>>> struct {
>>>     struct foo foos[3];
>>>     struct bar bars[2];
>>> }
>>>
>>> Just to check that offset is propagated correctly.
>>
>> Sure!
> 
> Great, thank you
> 
>>> Also, in the tests below you check that a pointer to some object could
>>> be put into an array at different indexes. Tbh, I find it not very
>>> interesting if we want to check that offsets are correct.
>>> Would it be possible to create an array of object kptrs,
>>> put specific references at specific indexes and somehow check which
>>> object ended up where? (not necessarily 'bpf_cpumask').
>>
>> Do you mean checking index in the way like the following code?
>>
>>    if (array[0] != ref0 || array[1] != ref1 || array[2] != ref2 ....)
>>      return err;
> 
> Probably, but I'd need your help here.
> There goal is to verify that offsets of __kptr's in the 'info' array
> had been set correctly. Where is this information is used later on?
> E.g. I'd like to trigger some action that "touches" __kptr at index N
> and verify that all others had not been "touched".
> But this "touch" action has to use offset stored in the 'info'.


They are used for verifying the offset of instructions.
Let's assume we have an array of size 10.
Then, we have 10 infos with 10 different offsets.
And, we have a program includes one instruction for each element, 10 in
total, to access the corresponding element.
Each instruction has an offset different from others, generated by the 
compiler. That means the verifier will fail to find an info for some of
instructions if there is one or more info having wrong offset.



> 
> [...]
Eduard Zingerman May 10, 2024, 10:31 p.m. UTC | #5
On Fri, 2024-05-10 at 15:25 -0700, Kui-Feng Lee wrote:

> > > > Also, in the tests below you check that a pointer to some object could
> > > > be put into an array at different indexes. Tbh, I find it not very
> > > > interesting if we want to check that offsets are correct.
> > > > Would it be possible to create an array of object kptrs,
> > > > put specific references at specific indexes and somehow check which
> > > > object ended up where? (not necessarily 'bpf_cpumask').
> > > 
> > > Do you mean checking index in the way like the following code?
> > > 
> > >    if (array[0] != ref0 || array[1] != ref1 || array[2] != ref2 ....)
> > >      return err;
> > 
> > Probably, but I'd need your help here.
> > There goal is to verify that offsets of __kptr's in the 'info' array
> > had been set correctly. Where is this information is used later on?
> > E.g. I'd like to trigger some action that "touches" __kptr at index N
> > and verify that all others had not been "touched".
> > But this "touch" action has to use offset stored in the 'info'.
> 
> They are used for verifying the offset of instructions.
> Let's assume we have an array of size 10.
> Then, we have 10 infos with 10 different offsets.
> And, we have a program includes one instruction for each element, 10 in
> total, to access the corresponding element.
> Each instruction has an offset different from others, generated by the 
> compiler. That means the verifier will fail to find an info for some of
> instructions if there is one or more info having wrong offset.

That's a bit depressing, as there would be no way to check if e.g. all
10 refer to the same offset. Is it possible to trigger printing of the
'info.offset' to verifier log? E.g. via some 'illegal' action.
Kui-Feng Lee May 10, 2024, 10:53 p.m. UTC | #6
On 5/10/24 15:31, Eduard Zingerman wrote:
> On Fri, 2024-05-10 at 15:25 -0700, Kui-Feng Lee wrote:
> 
>>>>> Also, in the tests below you check that a pointer to some object could
>>>>> be put into an array at different indexes. Tbh, I find it not very
>>>>> interesting if we want to check that offsets are correct.
>>>>> Would it be possible to create an array of object kptrs,
>>>>> put specific references at specific indexes and somehow check which
>>>>> object ended up where? (not necessarily 'bpf_cpumask').
>>>>
>>>> Do you mean checking index in the way like the following code?
>>>>
>>>>     if (array[0] != ref0 || array[1] != ref1 || array[2] != ref2 ....)
>>>>       return err;
>>>
>>> Probably, but I'd need your help here.
>>> There goal is to verify that offsets of __kptr's in the 'info' array
>>> had been set correctly. Where is this information is used later on?
>>> E.g. I'd like to trigger some action that "touches" __kptr at index N
>>> and verify that all others had not been "touched".
>>> But this "touch" action has to use offset stored in the 'info'.
>>
>> They are used for verifying the offset of instructions.
>> Let's assume we have an array of size 10.
>> Then, we have 10 infos with 10 different offsets.
>> And, we have a program includes one instruction for each element, 10 in
>> total, to access the corresponding element.
>> Each instruction has an offset different from others, generated by the
>> compiler. That means the verifier will fail to find an info for some of
>> instructions if there is one or more info having wrong offset.
> 
> That's a bit depressing, as there would be no way to check if e.g. all
> 10 refer to the same offset. Is it possible to trigger printing of the
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
How can that happen? Do you mean the compiler does it wrong?


> 'info.offset' to verifier log? E.g. via some 'illegal' action.
Yes if necessary!
Eduard Zingerman May 10, 2024, 10:57 p.m. UTC | #7
On Fri, 2024-05-10 at 15:53 -0700, Kui-Feng Lee wrote:

[...]

> > > > > Do you mean checking index in the way like the following code?
> > > > > 
> > > > >     if (array[0] != ref0 || array[1] != ref1 || array[2] != ref2 ....)
> > > > >       return err;
> > > > 
> > > > Probably, but I'd need your help here.
> > > > There goal is to verify that offsets of __kptr's in the 'info' array
> > > > had been set correctly. Where is this information is used later on?
> > > > E.g. I'd like to trigger some action that "touches" __kptr at index N
> > > > and verify that all others had not been "touched".
> > > > But this "touch" action has to use offset stored in the 'info'.
> > > 
> > > They are used for verifying the offset of instructions.
> > > Let's assume we have an array of size 10.
> > > Then, we have 10 infos with 10 different offsets.
> > > And, we have a program includes one instruction for each element, 10 in
> > > total, to access the corresponding element.
> > > Each instruction has an offset different from others, generated by the
> > > compiler. That means the verifier will fail to find an info for some of
> > > instructions if there is one or more info having wrong offset.
> > 
> > That's a bit depressing, as there would be no way to check if e.g. all
> > 10 refer to the same offset. Is it possible to trigger printing of the
> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> How can that happen? Do you mean the compiler does it wrong?

No, suppose that 'info.offset' is computed incorrectly because of some
bug in arrays handling. E.g. all .off fields in the infos have the
same value.

What is the shape of the test that could catch such bug?

> > 'info.offset' to verifier log? E.g. via some 'illegal' action.
> Yes if necessary!
Kui-Feng Lee May 10, 2024, 11:04 p.m. UTC | #8
On 5/10/24 15:57, Eduard Zingerman wrote:
> On Fri, 2024-05-10 at 15:53 -0700, Kui-Feng Lee wrote:
> 
> [...]
> 
>>>>>> Do you mean checking index in the way like the following code?
>>>>>>
>>>>>>      if (array[0] != ref0 || array[1] != ref1 || array[2] != ref2 ....)
>>>>>>        return err;
>>>>>
>>>>> Probably, but I'd need your help here.
>>>>> There goal is to verify that offsets of __kptr's in the 'info' array
>>>>> had been set correctly. Where is this information is used later on?
>>>>> E.g. I'd like to trigger some action that "touches" __kptr at index N
>>>>> and verify that all others had not been "touched".
>>>>> But this "touch" action has to use offset stored in the 'info'.
>>>>
>>>> They are used for verifying the offset of instructions.
>>>> Let's assume we have an array of size 10.
>>>> Then, we have 10 infos with 10 different offsets.
>>>> And, we have a program includes one instruction for each element, 10 in
>>>> total, to access the corresponding element.
>>>> Each instruction has an offset different from others, generated by the
>>>> compiler. That means the verifier will fail to find an info for some of
>>>> instructions if there is one or more info having wrong offset.
>>>
>>> That's a bit depressing, as there would be no way to check if e.g. all
>>> 10 refer to the same offset. Is it possible to trigger printing of the
>> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>> How can that happen? Do you mean the compiler does it wrong?
> 
> No, suppose that 'info.offset' is computed incorrectly because of some
> bug in arrays handling. E.g. all .off fields in the infos have the
> same value.
> 
> What is the shape of the test that could catch such bug?
> 

I am not sure if I read you question correctly.

For example, we have 3 correct info.

  [info(offset=0x8), info(offset=0x10), info(offset=0x18)]

And We have program that includes 3 instructions to access the offset 
0x8, 0x10, and 0x18. (let's assume these load instructions would be 
checked against infos)

  load r1, [0x8]
  load r1, [0x10]
  load r1, [0x18]

If everything works as expected, the verifier would accept the program.

Otherwise, like you said, all 3 info are pointing to the same offset.

  [info(0offset=0x8), info(offset=0x8), info(offset=0x8)]

Then, the later two instructions should fail the check.


>>> 'info.offset' to verifier log? E.g. via some 'illegal' action.
>> Yes if necessary!
>
Eduard Zingerman May 10, 2024, 11:17 p.m. UTC | #9
On Fri, 2024-05-10 at 16:04 -0700, Kui-Feng Lee wrote:

[...]


> I am not sure if I read you question correctly.
> 
> For example, we have 3 correct info.
> 
>   [info(offset=0x8), info(offset=0x10), info(offset=0x18)]
> 
> And We have program that includes 3 instructions to access the offset 
> 0x8, 0x10, and 0x18. (let's assume these load instructions would be 
> checked against infos)
> 
>   load r1, [0x8]
>   load r1, [0x10]
>   load r1, [0x18]
> 
> If everything works as expected, the verifier would accept the program.
> 
> Otherwise, like you said, all 3 info are pointing to the same offset.
> 
>   [info(0offset=0x8), info(offset=0x8), info(offset=0x8)]
> 
> Then, the later two instructions should fail the check.

I think it would be in reverse.
If for some offset there is no record of special semantics
verifier would threat the load as a regular memory access.

However, there is a btf.c:btf_struct_access(), which would report
an error if offset within a special field is accessed directly:

int btf_struct_access(struct bpf_verifier_log *log,
		      const struct bpf_reg_state *reg,
		      int off, int size, enum bpf_access_type atype __maybe_unused,
		      u32 *next_btf_id, enum bpf_type_flag *flag,
		      const char **field_name)
{
	...
	struct btf_struct_meta *meta;
	struct btf_record *rec;
	int i;

	meta = btf_find_struct_meta(btf, id);
	if (!meta)
		break;
	rec = meta->record;
	for (i = 0; i < rec->cnt; i++) {
		struct btf_field *field = &rec->fields[i];
		u32 offset = field->offset;
		if (off < offset + btf_field_type_size(field->type) && offset < off + size) {
			bpf_log(log,
				"direct access to %s is disallowed\n",
				btf_field_type_name(field->type));
			return -EACCES;
		}
	}
	break;
}

So it looks like we need a test with a following structure:

- global definition using an array, e.g. with a size of 3
- program #1 doing a direct access at offset of element #1, expect load time error message
- program #2 doing a direct access at offset of element #2, expect load time error message
- program #3 doing a direct access at offset of element #3, expect load time error message
If some of the offsets is computed incorrectly the error message will not be printed.

(And these could be packed as progs/verifier_*.c tests)
And some similar tests with different levels of nested arrays and structures.
But this looks a bit ugly/bulky.
Wdyt?
>
Eduard Zingerman May 10, 2024, 11:29 p.m. UTC | #10
On Fri, 2024-05-10 at 16:17 -0700, Eduard Zingerman wrote:
> On Fri, 2024-05-10 at 16:04 -0700, Kui-Feng Lee wrote:
> 
> [...]
> 
> 
> > I am not sure if I read you question correctly.
> > 
> > For example, we have 3 correct info.
> > 
> >   [info(offset=0x8), info(offset=0x10), info(offset=0x18)]
> > 
> > And We have program that includes 3 instructions to access the offset 
> > 0x8, 0x10, and 0x18. (let's assume these load instructions would be 
> > checked against infos)
> > 
> >   load r1, [0x8]
> >   load r1, [0x10]
> >   load r1, [0x18]
> > 
> > If everything works as expected, the verifier would accept the program.
> > 
> > Otherwise, like you said, all 3 info are pointing to the same offset.
> > 
> >   [info(0offset=0x8), info(offset=0x8), info(offset=0x8)]
> > 
> > Then, the later two instructions should fail the check.

Ok, what you are saying is possible not with load but with some kfunc
that accepts a special pointer. E.g. when verifier.c:check_kfunc_args()
expects an argument of KF_ARG_PTR_TO_LIST_HEAD type it would report an
error if special field is not found.

So the structure of the test would be:
- define a nested data structure with list head at some leafs;
- in the BPF program call a kfunc accessing each of the list heads;
- if all offsets are computed correctly there would be no load time error;
- this is a load time test, no need to actually run the BPF program.

[...]
Kui-Feng Lee May 20, 2024, 3:55 p.m. UTC | #11
On 5/10/24 16:29, Eduard Zingerman wrote:
> On Fri, 2024-05-10 at 16:17 -0700, Eduard Zingerman wrote:
>> On Fri, 2024-05-10 at 16:04 -0700, Kui-Feng Lee wrote:
>>
>> [...]
>>
>>
>>> I am not sure if I read you question correctly.
>>>
>>> For example, we have 3 correct info.
>>>
>>>    [info(offset=0x8), info(offset=0x10), info(offset=0x18)]
>>>
>>> And We have program that includes 3 instructions to access the offset
>>> 0x8, 0x10, and 0x18. (let's assume these load instructions would be
>>> checked against infos)
>>>
>>>    load r1, [0x8]
>>>    load r1, [0x10]
>>>    load r1, [0x18]
>>>
>>> If everything works as expected, the verifier would accept the program.
>>>
>>> Otherwise, like you said, all 3 info are pointing to the same offset.
>>>
>>>    [info(0offset=0x8), info(offset=0x8), info(offset=0x8)]
>>>
>>> Then, the later two instructions should fail the check.
> 
> Ok, what you are saying is possible not with load but with some kfunc
> that accepts a special pointer. E.g. when verifier.c:check_kfunc_args()
> expects an argument of KF_ARG_PTR_TO_LIST_HEAD type it would report an
> error if special field is not found.
> 
> So the structure of the test would be:
> - define a nested data structure with list head at some leafs;
> - in the BPF program call a kfunc accessing each of the list heads;
> - if all offsets are computed correctly there would be no load time error;
> - this is a load time test, no need to actually run the BPF program.
> 
> [...]

Yes, that is what I meant.
Sorry for replying late.
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c
index ecf89df78109..2570bd4b0cb2 100644
--- a/tools/testing/selftests/bpf/prog_tests/cpumask.c
+++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c
@@ -18,6 +18,11 @@  static const char * const cpumask_success_testcases[] = {
 	"test_insert_leave",
 	"test_insert_remove_release",
 	"test_global_mask_rcu",
+	"test_global_mask_array_one_rcu",
+	"test_global_mask_array_rcu",
+	"test_global_mask_array_l2_rcu",
+	"test_global_mask_nested_rcu",
+	"test_global_mask_nested_deep_rcu",
 	"test_cpumask_weight",
 };
 
diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c
index 7a1e64c6c065..0b6383fa9958 100644
--- a/tools/testing/selftests/bpf/progs/cpumask_success.c
+++ b/tools/testing/selftests/bpf/progs/cpumask_success.c
@@ -12,6 +12,25 @@  char _license[] SEC("license") = "GPL";
 
 int pid, nr_cpus;
 
+struct kptr_nested {
+	struct bpf_cpumask __kptr * mask;
+};
+
+struct kptr_nested_mid {
+	int dummy;
+	struct kptr_nested m;
+};
+
+struct kptr_nested_deep {
+	struct kptr_nested_mid ptrs[2];
+};
+
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1];
+private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1];
+private(MASK) static struct kptr_nested global_mask_nested[2];
+private(MASK) static struct kptr_nested_deep global_mask_nested_deep;
+
 static bool is_test_task(void)
 {
 	int cur_pid = bpf_get_current_pid_tgid() >> 32;
@@ -460,6 +479,120 @@  int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags)
 	return 0;
 }
 
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags)
+{
+	struct bpf_cpumask *local, *prev;
+
+	if (!is_test_task())
+		return 0;
+
+	/* Kptr arrays with one element are special cased, being treated
+	 * just like a single pointer.
+	 */
+
+	local = create_cpumask();
+	if (!local)
+		return 0;
+
+	prev = bpf_kptr_xchg(&global_mask_array_one[0], local);
+	if (prev) {
+		bpf_cpumask_release(prev);
+		err = 3;
+		return 0;
+	}
+
+	bpf_rcu_read_lock();
+	local = global_mask_array_one[0];
+	if (!local) {
+		err = 4;
+		bpf_rcu_read_unlock();
+		return 0;
+	}
+
+	bpf_rcu_read_unlock();
+
+	return 0;
+}
+
+static int _global_mask_array_rcu(struct bpf_cpumask **mask0,
+				  struct bpf_cpumask **mask1)
+{
+	struct bpf_cpumask *local;
+
+	if (!is_test_task())
+		return 0;
+
+	/* Check if two kptrs in the array work and independently */
+
+	local = create_cpumask();
+	if (!local)
+		return 0;
+
+	bpf_rcu_read_lock();
+
+	local = bpf_kptr_xchg(mask0, local);
+	if (local) {
+		err = 1;
+		goto err_exit;
+	}
+
+	/* [<mask 0>, NULL] */
+	if (!*mask0 || *mask1) {
+		err = 2;
+		goto err_exit;
+	}
+
+	local = create_cpumask();
+	if (!local) {
+		err = 9;
+		goto err_exit;
+	}
+
+	local = bpf_kptr_xchg(mask1, local);
+	if (local) {
+		err = 10;
+		goto err_exit;
+	}
+
+	/* [<mask 0>, <mask 1>] */
+	if (!*mask0 || !*mask1 || *mask0 == *mask1) {
+		err = 11;
+		goto err_exit;
+	}
+
+err_exit:
+	if (local)
+		bpf_cpumask_release(local);
+	bpf_rcu_read_unlock();
+	return 0;
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags)
+{
+	return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags)
+{
+	return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags)
+{
+	return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask);
+}
+
+SEC("tp_btf/task_newtask")
+int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags)
+{
+	return _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask,
+				      &global_mask_nested_deep.ptrs[1].m.mask);
+}
+
 SEC("tp_btf/task_newtask")
 int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags)
 {