diff mbox series

[v1,bpf-next,2/4] bpf: Refactor btf_find_field with btf_field_info_search

Message ID 20231023220030.2556229-3-davemarchevsky@fb.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series Descend into struct, array types when searching for fields | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 2862 this patch: 2862
netdev/cc_maintainers warning 8 maintainers not CCed: song@kernel.org yonghong.song@linux.dev jolsa@kernel.org kpsingh@kernel.org john.fastabend@gmail.com sdf@google.com haoluo@google.com martin.lau@linux.dev
netdev/build_clang success Errors and warnings before: 1537 this patch: 1537
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2950 this patch: 2950
netdev/checkpatch fail CHECK: Macro argument 'field_type' may be better as '(field_type)' to avoid precedence issues ERROR: Macros starting with if should be enclosed by a do - while loop to avoid possible if/else logic defects WARNING: Macros with flow control statements should be avoided WARNING: line length of 81 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-16 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-16 / veristat
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-16 / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-16 / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-3 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-llvm-16 / build / build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-llvm-16 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-15 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-9 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-llvm-16 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-18 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-llvm-16 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-16 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-16 / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc

Commit Message

Dave Marchevsky Oct. 23, 2023, 10 p.m. UTC
btf_find_field takes (btf_type, special_field_types) and returns info
about the specific special fields in btf_type, in the form of an array
of struct btf_field info. The meat of this 'search for special fields'
process happens in btf_find_datasec_var and btf_find_struct_field
helpers: each member is examined and, if it's special, a struct
btf_field_info describing it is added to the return array. Indeed, any
function that might add to the output probably also looks at struct
members or datasec vars.

Most of the parameters passed around between helpers doing the search
can be grouped into two categories: "info about the output array" and
"info about which fields to search for". This patch joins those together
in struct btf_field_info_search, simplifying the signatures of most
helpers involved in the search, including array flattening helper that
later patches in the series will add.

The aforementioned array flattening logic will greatly increase the
number of btf_field_info's needed to describe some structs, so this
patch also turns the statically-sized struct btf_field_info
info_arr[BTF_FIELDS_MAX] into a growable array with a larger max size.

Implementation notes:
  * BTF_FIELDS_MAX is now max size of growable btf_field_info *infos
    instead of initial (and max) size of static result array
    * Static array before had 10 elems (+1 tmp btf_field_info)
    * growable array starts with 16 and doubles every time it needs to
      grow, up to BTF_FIELDS_MAX of 256

  * __next_field_infos is used with next_cnt > 1 later in the series

  * btf_find_{datasec_var, struct_field} have special logic for an edge
    case where the result array is full but the field being examined
    gets BTF_FIELD_IGNORE return from btf_find_{struct, kptr,graph_root}
    * If result wasn't BTF_FIELD_IGNORE, a btf_field_info would have to
      be added to the array. Since it is we can look at next field.
    * Before this patch the logic handling this edge case was hard to
      follow and used a tmp btf_struct_info. This patch moves the
      add-if-not-ignore logic down into btf_find_{struct, kptr,
      graph_root}, removing the need to opportunistically grab a
      btf_field_info to populate before knowing if it's actually
      necessary. Now a new one is grabbed only if the field shouldn't
      be ignored.

  * Within btf_find_{datasec_var, struct_field}, each member is
    currently examined in two phases: first btf_get_field_type checks
    the member type name, then btf_find_{struct,graph_root,kptr} do
    additional sanity checking and populate struct btf_field_info. Kptr
    fields don't have a specific type name, though, so
    btf_get_field_type assumes that - if we're looking for kptrs - any
    member that fails type name check could be a kptr field.
    * As a result btf_find_kptr effectively does all the pointer
      hopping, sanity checking, and info population. Instead of trying
      to fit kptr handling into this two-phase model, where it's
      unwieldy, handle it in a separate codepath when name matching
      fails.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
---
 include/linux/bpf.h |   4 +-
 kernel/bpf/btf.c    | 331 +++++++++++++++++++++++++++++---------------
 2 files changed, 219 insertions(+), 116 deletions(-)

Comments

Jiri Olsa Oct. 28, 2023, 2:52 p.m. UTC | #1
On Mon, Oct 23, 2023 at 03:00:28PM -0700, Dave Marchevsky wrote:

SNIP

>  
>  #undef field_mask_test_name_check_seen
>  #undef field_mask_test_name
>  
> +static int __struct_member_check_align(u32 off, enum btf_field_type field_type)
> +{
> +	u32 align = btf_field_type_align(field_type);
> +
> +	if (off % align)
> +		return -EINVAL;
> +	return 0;
> +}
> +
>  static int btf_find_struct_field(const struct btf *btf,
> -				 const struct btf_type *t, u32 field_mask,
> -				 struct btf_field_info *info, int info_cnt)
> +				 const struct btf_type *t,
> +				 struct btf_field_info_search *srch)
>  {
> -	int ret, idx = 0, align, sz, field_type;
>  	const struct btf_member *member;
> -	struct btf_field_info tmp;
> -	u32 i, off, seen_mask = 0;
> +	int ret, field_type;
> +	u32 i, off, sz;
>  
>  	for_each_member(i, t, member) {
>  		const struct btf_type *member_type = btf_type_by_id(btf,
>  								    member->type);
> -
> -		field_type = btf_get_field_type(__btf_name_by_offset(btf, member_type->name_off),
> -						field_mask, &seen_mask, &align, &sz);
> -		if (field_type == 0)
> -			continue;
> -		if (field_type < 0)
> -			return field_type;
> -
>  		off = __btf_member_bit_offset(t, member);
>  		if (off % 8)
>  			/* valid C code cannot generate such BTF */
>  			return -EINVAL;
>  		off /= 8;
> -		if (off % align)
> +
> +		field_type = btf_get_field_type_by_name(btf, member_type, srch);
> +		if (field_type < 0)
> +			return field_type;
> +
> +		if (field_type == 0) {
> +			/* Maybe it's a kptr. Use BPF_KPTR_REF for align
> +			 * checks, all ptrs have same align.
> +			 * btf_maybe_find_kptr will find actual kptr type
> +			 */
> +			if (__struct_member_check_align(off, BPF_KPTR_REF))
> +				continue;
> +
> +			ret = btf_maybe_find_kptr(btf, member_type, off, srch);
> +			if (ret < 0)
> +				return ret;
> +			continue;
> +		}

would it be possible to split the change to:
  - factor the arguments to 'struct btf_field_info_search *srch' and
    passing it to btf_find_field and all related descedant calls
  - factor the allignment handling

I can't see these two changes being dependent on each other,
if that's the case I think the change would be simpler

jirka

> +
> +		sz = btf_field_type_size(field_type);
> +		if (__struct_member_check_align(off, field_type))
>  			continue;
>  
>  		switch (field_type) {
> @@ -3453,64 +3542,81 @@ static int btf_find_struct_field(const struct btf *btf,
>  		case BPF_RB_NODE:
>  		case BPF_REFCOUNT:
>  			ret = btf_find_struct(btf, member_type, off, sz, field_type,
> -					      idx < info_cnt ? &info[idx] : &tmp);
> -			if (ret < 0)
> -				return ret;
> -			break;
> -		case BPF_KPTR_UNREF:
> -		case BPF_KPTR_REF:
> -		case BPF_KPTR_PERCPU:
> -			ret = btf_find_kptr(btf, member_type, off, sz,
> -					    idx < info_cnt ? &info[idx] : &tmp);
> +					      srch);
>  			if (ret < 0)
>  				return ret;
>  			break;
>  		case BPF_LIST_HEAD:
>  		case BPF_RB_ROOT:
>  			ret = btf_find_graph_root(btf, t, member_type,
> -						  i, off, sz,
> -						  idx < info_cnt ? &info[idx] : &tmp,
> -						  field_type);
> +						  i, off, sz, srch, field_type);
>  			if (ret < 0)
>  				return ret;
>  			break;
> +		/* kptr fields are not handled in this switch, see
> +		 * btf_maybe_find_kptr above
> +		 */
> +		case BPF_KPTR_UNREF:
> +		case BPF_KPTR_REF:
> +		case BPF_KPTR_PERCPU:
>  		default:
>  			return -EFAULT;
>  		}
> -
> -		if (ret == BTF_FIELD_IGNORE)
> -			continue;
> -		if (idx >= info_cnt)
> -			return -E2BIG;
> -		++idx;
>  	}
> -	return idx;
> +	return srch->idx;
> +}
> +
> +static int __datasec_vsi_check_align_sz(const struct btf_var_secinfo *vsi,
> +					enum btf_field_type field_type,
> +					u32 expected_sz)
> +{
> +	u32 off, align;
> +
> +	off = vsi->offset;
> +	align = btf_field_type_align(field_type);
> +
> +	if (vsi->size != expected_sz)
> +		return -EINVAL;
> +	if (off % align)
> +		return -EINVAL;
> +
> +	return 0;
>  }
>  
>  static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
> -				u32 field_mask, struct btf_field_info *info,
> -				int info_cnt)
> +				struct btf_field_info_search *srch)
>  {
> -	int ret, idx = 0, align, sz, field_type;
>  	const struct btf_var_secinfo *vsi;
> -	struct btf_field_info tmp;
> -	u32 i, off, seen_mask = 0;
> +	int ret, field_type;
> +	u32 i, off, sz;
>  
>  	for_each_vsi(i, t, vsi) {
>  		const struct btf_type *var = btf_type_by_id(btf, vsi->type);
>  		const struct btf_type *var_type = btf_type_by_id(btf, var->type);
>  
> -		field_type = btf_get_field_type(__btf_name_by_offset(btf, var_type->name_off),
> -						field_mask, &seen_mask, &align, &sz);
> -		if (field_type == 0)
> -			continue;
> +		off = vsi->offset;
> +		field_type = btf_get_field_type_by_name(btf, var_type, srch);
>  		if (field_type < 0)
>  			return field_type;
>  
> -		off = vsi->offset;
> -		if (vsi->size != sz)
> +		if (field_type == 0) {
> +			/* Maybe it's a kptr. Use BPF_KPTR_REF for sz / align
> +			 * checks, all ptrs have same sz / align.
> +			 * btf_maybe_find_kptr will find actual kptr type
> +			 */
> +			sz = btf_field_type_size(BPF_KPTR_REF);
> +			if (__datasec_vsi_check_align_sz(vsi, BPF_KPTR_REF, sz))
> +				continue;
> +
> +			ret = btf_maybe_find_kptr(btf, var_type, off, srch);
> +			if (ret < 0)
> +				return ret;
>  			continue;
> -		if (off % align)
> +		}
> +
> +		sz = btf_field_type_size(field_type);
> +
> +		if (__datasec_vsi_check_align_sz(vsi, field_type, sz))
>  			continue;
>  
>  		switch (field_type) {
> @@ -3520,48 +3626,38 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
>  		case BPF_RB_NODE:
>  		case BPF_REFCOUNT:
>  			ret = btf_find_struct(btf, var_type, off, sz, field_type,
> -					      idx < info_cnt ? &info[idx] : &tmp);
> -			if (ret < 0)
> -				return ret;
> -			break;
> -		case BPF_KPTR_UNREF:
> -		case BPF_KPTR_REF:
> -		case BPF_KPTR_PERCPU:
> -			ret = btf_find_kptr(btf, var_type, off, sz,
> -					    idx < info_cnt ? &info[idx] : &tmp);
> +					      srch);
>  			if (ret < 0)
>  				return ret;
>  			break;
>  		case BPF_LIST_HEAD:
>  		case BPF_RB_ROOT:
>  			ret = btf_find_graph_root(btf, var, var_type,
> -						  -1, off, sz,
> -						  idx < info_cnt ? &info[idx] : &tmp,
> +						  -1, off, sz, srch,
>  						  field_type);
>  			if (ret < 0)
>  				return ret;
>  			break;
> +		/* kptr fields are not handled in this switch, see
> +		 * btf_maybe_find_kptr above
> +		 */
> +		case BPF_KPTR_UNREF:
> +		case BPF_KPTR_REF:
> +		case BPF_KPTR_PERCPU:
>  		default:
>  			return -EFAULT;
>  		}
> -
> -		if (ret == BTF_FIELD_IGNORE)
> -			continue;
> -		if (idx >= info_cnt)
> -			return -E2BIG;
> -		++idx;
>  	}
> -	return idx;
> +	return srch->idx;
>  }
>  
>  static int btf_find_field(const struct btf *btf, const struct btf_type *t,
> -			  u32 field_mask, struct btf_field_info *info,
> -			  int info_cnt)
> +			  struct btf_field_info_search *srch)
>  {
>  	if (__btf_type_is_struct(t))
> -		return btf_find_struct_field(btf, t, field_mask, info, info_cnt);
> +		return btf_find_struct_field(btf, t, srch);
>  	else if (btf_type_is_datasec(t))
> -		return btf_find_datasec_var(btf, t, field_mask, info, info_cnt);
> +		return btf_find_datasec_var(btf, t, srch);
>  	return -EINVAL;
>  }
>  
> @@ -3729,47 +3825,51 @@ static int btf_field_cmp(const void *_a, const void *_b, const void *priv)
>  struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
>  				    u32 field_mask, u32 value_size)
>  {
> -	struct btf_field_info info_arr[BTF_FIELDS_MAX];
> +	struct btf_field_info_search srch;
>  	u32 next_off = 0, field_type_size;
> +	struct btf_field_info *info;
>  	struct btf_record *rec;
>  	int ret, i, cnt;
>  
> -	ret = btf_find_field(btf, t, field_mask, info_arr, ARRAY_SIZE(info_arr));
> -	if (ret < 0)
> -		return ERR_PTR(ret);
> -	if (!ret)
> -		return NULL;
> +	memset(&srch, 0, sizeof(srch));
> +	srch.field_mask = field_mask;
> +	ret = btf_find_field(btf, t, &srch);
> +	if (ret <= 0)
> +		goto end_srch;
>  
>  	cnt = ret;
>  	/* This needs to be kzalloc to zero out padding and unused fields, see
>  	 * comment in btf_record_equal.
>  	 */
>  	rec = kzalloc(offsetof(struct btf_record, fields[cnt]), GFP_KERNEL | __GFP_NOWARN);
> -	if (!rec)
> -		return ERR_PTR(-ENOMEM);
> +	if (!rec) {
> +		ret = -ENOMEM;
> +		goto end_srch;
> +	}
>  
>  	rec->spin_lock_off = -EINVAL;
>  	rec->timer_off = -EINVAL;
>  	rec->refcount_off = -EINVAL;
>  	for (i = 0; i < cnt; i++) {
> -		field_type_size = btf_field_type_size(info_arr[i].type);
> -		if (info_arr[i].off + field_type_size > value_size) {
> -			WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size);
> +		info = &srch.infos[i];
> +		field_type_size = btf_field_type_size(info->type);
> +		if (info->off + field_type_size > value_size) {
> +			WARN_ONCE(1, "verifier bug off %d size %d", info->off, value_size);
>  			ret = -EFAULT;
>  			goto end;
>  		}
> -		if (info_arr[i].off < next_off) {
> +		if (info->off < next_off) {
>  			ret = -EEXIST;
>  			goto end;
>  		}
> -		next_off = info_arr[i].off + field_type_size;
> +		next_off = info->off + field_type_size;
>  
> -		rec->field_mask |= info_arr[i].type;
> -		rec->fields[i].offset = info_arr[i].off;
> -		rec->fields[i].type = info_arr[i].type;
> +		rec->field_mask |= info->type;
> +		rec->fields[i].offset = info->off;
> +		rec->fields[i].type = info->type;
>  		rec->fields[i].size = field_type_size;
>  
> -		switch (info_arr[i].type) {
> +		switch (info->type) {
>  		case BPF_SPIN_LOCK:
>  			WARN_ON_ONCE(rec->spin_lock_off >= 0);
>  			/* Cache offset for faster lookup at runtime */
> @@ -3788,17 +3888,17 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
>  		case BPF_KPTR_UNREF:
>  		case BPF_KPTR_REF:
>  		case BPF_KPTR_PERCPU:
> -			ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
> +			ret = btf_parse_kptr(btf, &rec->fields[i], info);
>  			if (ret < 0)
>  				goto end;
>  			break;
>  		case BPF_LIST_HEAD:
> -			ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
> +			ret = btf_parse_list_head(btf, &rec->fields[i], info);
>  			if (ret < 0)
>  				goto end;
>  			break;
>  		case BPF_RB_ROOT:
> -			ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
> +			ret = btf_parse_rb_root(btf, &rec->fields[i], info);
>  			if (ret < 0)
>  				goto end;
>  			break;
> @@ -3828,10 +3928,13 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
>  
>  	sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
>  	       NULL, rec);
> +	kfree(srch.infos);
>  
>  	return rec;
>  end:
>  	btf_record_free(rec);
> +end_srch:
> +	kfree(srch.infos);
>  	return ERR_PTR(ret);
>  }
>  
> -- 
> 2.34.1
> 
>
Yonghong Song Oct. 30, 2023, 7:31 p.m. UTC | #2
On 10/23/23 3:00 PM, Dave Marchevsky wrote:
> btf_find_field takes (btf_type, special_field_types) and returns info
> about the specific special fields in btf_type, in the form of an array
> of struct btf_field info. The meat of this 'search for special fields'

struct btf_field_info.

> process happens in btf_find_datasec_var and btf_find_struct_field
> helpers: each member is examined and, if it's special, a struct
> btf_field_info describing it is added to the return array. Indeed, any
> function that might add to the output probably also looks at struct
> members or datasec vars.
>
> Most of the parameters passed around between helpers doing the search
> can be grouped into two categories: "info about the output array" and
> "info about which fields to search for". This patch joins those together
> in struct btf_field_info_search, simplifying the signatures of most
> helpers involved in the search, including array flattening helper that
> later patches in the series will add.
>
> The aforementioned array flattening logic will greatly increase the
> number of btf_field_info's needed to describe some structs, so this
> patch also turns the statically-sized struct btf_field_info
> info_arr[BTF_FIELDS_MAX] into a growable array with a larger max size.

Since this patch is a 'refactoring' patch, let us delay this patch
until the next one. This patch should be strictly a refactoring
patch so it becomes easier to verify changes.

>
> Implementation notes:
>    * BTF_FIELDS_MAX is now max size of growable btf_field_info *infos
>      instead of initial (and max) size of static result array
>      * Static array before had 10 elems (+1 tmp btf_field_info)
>      * growable array starts with 16 and doubles every time it needs to
>        grow, up to BTF_FIELDS_MAX of 256
>
>    * __next_field_infos is used with next_cnt > 1 later in the series
>
>    * btf_find_{datasec_var, struct_field} have special logic for an edge
>      case where the result array is full but the field being examined
>      gets BTF_FIELD_IGNORE return from btf_find_{struct, kptr,graph_root}
>      * If result wasn't BTF_FIELD_IGNORE, a btf_field_info would have to
>        be added to the array. Since it is we can look at next field.
>      * Before this patch the logic handling this edge case was hard to
>        follow and used a tmp btf_struct_info. This patch moves the
>        add-if-not-ignore logic down into btf_find_{struct, kptr,
>        graph_root}, removing the need to opportunistically grab a
>        btf_field_info to populate before knowing if it's actually
>        necessary. Now a new one is grabbed only if the field shouldn't
>        be ignored.

This extra 'tmp' btf_field_info approach sounds okay to me
in the original code. The previous code has a static size
and there is no realloc. Now you introduced realloc, so
removing extra 'tmp' seems do make sense.


>
>    * Within btf_find_{datasec_var, struct_field}, each member is
>      currently examined in two phases: first btf_get_field_type checks
>      the member type name, then btf_find_{struct,graph_root,kptr} do
>      additional sanity checking and populate struct btf_field_info. Kptr
>      fields don't have a specific type name, though, so
>      btf_get_field_type assumes that - if we're looking for kptrs - any
>      member that fails type name check could be a kptr field.
>      * As a result btf_find_kptr effectively does all the pointer
>        hopping, sanity checking, and info population. Instead of trying
>        to fit kptr handling into this two-phase model, where it's
>        unwieldy, handle it in a separate codepath when name matching
>        fails.
>
> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
> ---
>   include/linux/bpf.h |   4 +-
>   kernel/bpf/btf.c    | 331 +++++++++++++++++++++++++++++---------------
>   2 files changed, 219 insertions(+), 116 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index b4825d3cdb29..e07cac5cc3cf 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -171,8 +171,8 @@ struct bpf_map_ops {
>   };
>   
>   enum {
> -	/* Support at most 10 fields in a BTF type */
> -	BTF_FIELDS_MAX	   = 10,
> +	/* Support at most 256 fields in a BTF type */
> +	BTF_FIELDS_MAX	   = 256,
>   };
>   
>   enum btf_field_type {
> diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
> index 975ef8e73393..e999ba85c363 100644
> --- a/kernel/bpf/btf.c
> +++ b/kernel/bpf/btf.c
> @@ -3257,25 +3257,94 @@ struct btf_field_info {
>   	};
>   };
>   
> +struct btf_field_info_search {
> +	/* growable array. allocated in __next_field_infos
> +	 * free'd in btf_parse_fields
> +	 */
> +	struct btf_field_info *infos;
> +	/* size of infos */
> +	int info_cnt;
> +	/* index of next btf_field_info to populate */
> +	int idx;
> +
> +	/* btf_field_types to search for */
> +	u32 field_mask;
> +	/* btf_field_types found earlier */
> +	u32 seen_mask;
> +};
> +
> +/* Reserve next_cnt contiguous btf_field_info's for caller to populate
> + * Returns ptr to first reserved btf_field_info
> + */
> +static struct btf_field_info *__next_field_infos(struct btf_field_info_search *srch,
> +						 u32 next_cnt)
> +{
> +	struct btf_field_info *new_infos, *ret;
> +
> +	if (!next_cnt)
> +		return ERR_PTR(-EINVAL);

Looks next_cnt is never 0.

> +
> +	if (srch->idx + next_cnt < srch->info_cnt)
> +		goto nogrow_out;
> +
> +	/* Need to grow */
> +	if (srch->idx + next_cnt > BTF_FIELDS_MAX)
> +		return ERR_PTR(-E2BIG);
> +
> +	while (srch->idx + next_cnt >= srch->info_cnt)
> +		srch->info_cnt = srch->infos ? srch->info_cnt * 2 : 16;
> +
> +	new_infos = krealloc(srch->infos,
> +			     srch->info_cnt * sizeof(struct btf_field_info),
> +			     GFP_KERNEL | __GFP_NOWARN);
> +	if (!new_infos)
> +		return ERR_PTR(-ENOMEM);
> +	srch->infos = new_infos;
> +
> +nogrow_out:
> +	ret = &srch->infos[srch->idx];
> +	srch->idx += next_cnt;
> +	return ret;
> +}
> +
> +/* Request srch's next free btf_field_info to populate, possibly growing
> + * srch->infos
> + */
> +static struct btf_field_info *__next_field_info(struct btf_field_info_search *srch)
> +{
> +	return __next_field_infos(srch, 1);
> +}
> +
>   static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
>   			   u32 off, int sz, enum btf_field_type field_type,
> -			   struct btf_field_info *info)
> +			   struct btf_field_info_search *srch)
>   {
> +	struct btf_field_info *info;
> +
>   	if (!__btf_type_is_struct(t))
>   		return BTF_FIELD_IGNORE;
>   	if (t->size != sz)
>   		return BTF_FIELD_IGNORE;
> +
> +	info = __next_field_info(srch);
> +	if (IS_ERR_OR_NULL(info))
> +		return PTR_ERR(info);

info cannot be NULL.

> +
>   	info->type = field_type;
>   	info->off = off;
>   	return BTF_FIELD_FOUND;
>   }
>   
> -static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
> -			 u32 off, int sz, struct btf_field_info *info)
> +static int btf_maybe_find_kptr(const struct btf *btf, const struct btf_type *t,
> +			       u32 off, struct btf_field_info_search *srch)
>   {
> +	struct btf_field_info *info;
>   	enum btf_field_type type;
>   	u32 res_id;
>   
> +	if (!(srch->field_mask & BPF_KPTR))
> +		return BTF_FIELD_IGNORE;
> +
>   	/* Permit modifiers on the pointer itself */
>   	if (btf_type_is_volatile(t))
>   		t = btf_type_by_id(btf, t->type);
> @@ -3304,6 +3373,10 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
>   	if (!__btf_type_is_struct(t))
>   		return -EINVAL;
>   
> +	info = __next_field_info(srch);
> +	if (IS_ERR_OR_NULL(info))
> +		return PTR_ERR(info);

info cannot be NULL.

> +
>   	info->type = type;
>   	info->off = off;
>   	info->kptr.type_id = res_id;
> @@ -3340,9 +3413,10 @@ const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type
>   static int
>   btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
>   		    const struct btf_type *t, int comp_idx, u32 off,
> -		    int sz, struct btf_field_info *info,
> +		    int sz, struct btf_field_info_search *srch,
>   		    enum btf_field_type head_type)
>   {
> +	struct btf_field_info *info;
>   	const char *node_field_name;
>   	const char *value_type;
>   	s32 id;
> @@ -3367,6 +3441,11 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
>   	node_field_name++;
>   	if (str_is_empty(node_field_name))
>   		return -EINVAL;
> +
> +	info = __next_field_info(srch);
> +	if (IS_ERR_OR_NULL(info))
> +		return PTR_ERR(info);
> +

ditto.

>   	info->type = head_type;
>   	info->off = off;
>   	info->graph_root.value_btf_id = id;
> @@ -3374,25 +3453,24 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
>   	return BTF_FIELD_FOUND;
>   }
>   
> -#define field_mask_test_name(field_type, field_type_str)		\
> -	if (field_mask & field_type && !strcmp(name, field_type_str)) {	\
> -		type = field_type;					\
> -		goto end;						\
> +#define field_mask_test_name(field_type, field_type_str)			\
> +	if (srch->field_mask & field_type && !strcmp(name, field_type_str)) {	\
> +		return field_type;						\
>   	}
>   
> -#define field_mask_test_name_check_seen(field_type, field_type_str)	\
> -	if (field_mask & field_type && !strcmp(name, field_type_str)) {	\
> -		if (*seen_mask & field_type)				\
> -			return -E2BIG;					\
> -		*seen_mask |= field_type;				\
> -		type = field_type;					\
> -		goto end;						\
> +#define field_mask_test_name_check_seen(field_type, field_type_str)		\
> +	if (srch->field_mask & field_type && !strcmp(name, field_type_str)) {	\
> +		if (srch->seen_mask & field_type)				\
> +			return -E2BIG;						\
> +		srch->seen_mask |= field_type;					\
> +		return field_type;						\
>   	}
>   
> -static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
> -			      int *align, int *sz)

[...]
Yonghong Song Oct. 30, 2023, 7:56 p.m. UTC | #3
On 10/30/23 12:31 PM, Yonghong Song wrote:
>
> On 10/23/23 3:00 PM, Dave Marchevsky wrote:
>> btf_find_field takes (btf_type, special_field_types) and returns info
>> about the specific special fields in btf_type, in the form of an array
>> of struct btf_field info. The meat of this 'search for special fields'
>
> struct btf_field_info.
>
>> process happens in btf_find_datasec_var and btf_find_struct_field
>> helpers: each member is examined and, if it's special, a struct
>> btf_field_info describing it is added to the return array. Indeed, any
>> function that might add to the output probably also looks at struct
>> members or datasec vars.
>>
>> Most of the parameters passed around between helpers doing the search
>> can be grouped into two categories: "info about the output array" and
>> "info about which fields to search for". This patch joins those together
>> in struct btf_field_info_search, simplifying the signatures of most
>> helpers involved in the search, including array flattening helper that
>> later patches in the series will add.
>>
>> The aforementioned array flattening logic will greatly increase the
>> number of btf_field_info's needed to describe some structs, so this
>> patch also turns the statically-sized struct btf_field_info
>> info_arr[BTF_FIELDS_MAX] into a growable array with a larger max size.
>
> Since this patch is a 'refactoring' patch, let us delay this patch


sorry. typo. "this patch" -> "this change".


> until the next one. This patch should be strictly a refactoring
> patch so it becomes easier to verify changes.
>
>>
>> Implementation notes:
>>    * BTF_FIELDS_MAX is now max size of growable btf_field_info *infos
>>      instead of initial (and max) size of static result array
>>      * Static array before had 10 elems (+1 tmp btf_field_info)
>>      * growable array starts with 16 and doubles every time it needs to
>>        grow, up to BTF_FIELDS_MAX of 256
>>
>>    * __next_field_infos is used with next_cnt > 1 later in the series
>>
>>    * btf_find_{datasec_var, struct_field} have special logic for an edge
>>      case where the result array is full but the field being examined
>>      gets BTF_FIELD_IGNORE return from btf_find_{struct, 
>> kptr,graph_root}
>>      * If result wasn't BTF_FIELD_IGNORE, a btf_field_info would have to
>>        be added to the array. Since it is we can look at next field.
>>      * Before this patch the logic handling this edge case was hard to
>>        follow and used a tmp btf_struct_info. This patch moves the
>>        add-if-not-ignore logic down into btf_find_{struct, kptr,
>>        graph_root}, removing the need to opportunistically grab a
>>        btf_field_info to populate before knowing if it's actually
>>        necessary. Now a new one is grabbed only if the field shouldn't
>>        be ignored.
>
> This extra 'tmp' btf_field_info approach sounds okay to me
> in the original code. The previous code has a static size
> and there is no realloc. Now you introduced realloc, so
> removing extra 'tmp' seems do make sense.
>
>
>>
>>    * Within btf_find_{datasec_var, struct_field}, each member is
>>      currently examined in two phases: first btf_get_field_type checks
>>      the member type name, then btf_find_{struct,graph_root,kptr} do
>>      additional sanity checking and populate struct btf_field_info. Kptr
>>      fields don't have a specific type name, though, so
>>      btf_get_field_type assumes that - if we're looking for kptrs - any
>>      member that fails type name check could be a kptr field.
>>      * As a result btf_find_kptr effectively does all the pointer
>>        hopping, sanity checking, and info population. Instead of trying
>>        to fit kptr handling into this two-phase model, where it's
>>        unwieldy, handle it in a separate codepath when name matching
>>        fails.
>>
>> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
>> ---
>>   include/linux/bpf.h |   4 +-
>>   kernel/bpf/btf.c    | 331 +++++++++++++++++++++++++++++---------------
>>   2 files changed, 219 insertions(+), 116 deletions(-)
>>
>> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
>> index b4825d3cdb29..e07cac5cc3cf 100644
>> --- a/include/linux/bpf.h
>> +++ b/include/linux/bpf.h
>> @@ -171,8 +171,8 @@ struct bpf_map_ops {
>>   };
>>     enum {
>> -    /* Support at most 10 fields in a BTF type */
>> -    BTF_FIELDS_MAX       = 10,
>> +    /* Support at most 256 fields in a BTF type */
>> +    BTF_FIELDS_MAX       = 256,
>>   };
>>     enum btf_field_type {
>> diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
>> index 975ef8e73393..e999ba85c363 100644
>> --- a/kernel/bpf/btf.c
>> +++ b/kernel/bpf/btf.c
>> @@ -3257,25 +3257,94 @@ struct btf_field_info {
>>       };
>>   };
>>   +struct btf_field_info_search {
>> +    /* growable array. allocated in __next_field_infos
>> +     * free'd in btf_parse_fields
>> +     */
>> +    struct btf_field_info *infos;
>> +    /* size of infos */
>> +    int info_cnt;
>> +    /* index of next btf_field_info to populate */
>> +    int idx;
>> +
>> +    /* btf_field_types to search for */
>> +    u32 field_mask;
>> +    /* btf_field_types found earlier */
>> +    u32 seen_mask;
>> +};
>> +
>> +/* Reserve next_cnt contiguous btf_field_info's for caller to populate
>> + * Returns ptr to first reserved btf_field_info
>> + */
>> +static struct btf_field_info *__next_field_infos(struct 
>> btf_field_info_search *srch,
>> +                         u32 next_cnt)
>> +{
>> +    struct btf_field_info *new_infos, *ret;
>> +
>> +    if (!next_cnt)
>> +        return ERR_PTR(-EINVAL);
>
> Looks next_cnt is never 0.
>
>> +
>> +    if (srch->idx + next_cnt < srch->info_cnt)
>> +        goto nogrow_out;
>> +
>> +    /* Need to grow */
>> +    if (srch->idx + next_cnt > BTF_FIELDS_MAX)
>> +        return ERR_PTR(-E2BIG);
>> +
>> +    while (srch->idx + next_cnt >= srch->info_cnt)
>> +        srch->info_cnt = srch->infos ? srch->info_cnt * 2 : 16;
>> +
>> +    new_infos = krealloc(srch->infos,
>> +                 srch->info_cnt * sizeof(struct btf_field_info),
>> +                 GFP_KERNEL | __GFP_NOWARN);
>> +    if (!new_infos)
>> +        return ERR_PTR(-ENOMEM);
>> +    srch->infos = new_infos;
>> +
>> +nogrow_out:
>> +    ret = &srch->infos[srch->idx];
>> +    srch->idx += next_cnt;
>> +    return ret;
>> +}
>> +
>> +/* Request srch's next free btf_field_info to populate, possibly 
>> growing
>> + * srch->infos
>> + */
>> +static struct btf_field_info *__next_field_info(struct 
>> btf_field_info_search *srch)
>> +{
>> +    return __next_field_infos(srch, 1);
>> +}
>> +
>>   static int btf_find_struct(const struct btf *btf, const struct 
>> btf_type *t,
>>                  u32 off, int sz, enum btf_field_type field_type,
>> -               struct btf_field_info *info)
>> +               struct btf_field_info_search *srch)
>>   {
>> +    struct btf_field_info *info;
>> +
>>       if (!__btf_type_is_struct(t))
>>           return BTF_FIELD_IGNORE;
>>       if (t->size != sz)
>>           return BTF_FIELD_IGNORE;
>> +
>> +    info = __next_field_info(srch);
>> +    if (IS_ERR_OR_NULL(info))
>> +        return PTR_ERR(info);
>
> info cannot be NULL.
>
>> +
>>       info->type = field_type;
>>       info->off = off;
>>       return BTF_FIELD_FOUND;
>>   }
>>   -static int btf_find_kptr(const struct btf *btf, const struct 
>> btf_type *t,
>> -             u32 off, int sz, struct btf_field_info *info)
>> +static int btf_maybe_find_kptr(const struct btf *btf, const struct 
>> btf_type *t,
>> +                   u32 off, struct btf_field_info_search *srch)
>>   {
>> +    struct btf_field_info *info;
>>       enum btf_field_type type;
>>       u32 res_id;
>>   +    if (!(srch->field_mask & BPF_KPTR))
>> +        return BTF_FIELD_IGNORE;
>> +
>>       /* Permit modifiers on the pointer itself */
>>       if (btf_type_is_volatile(t))
>>           t = btf_type_by_id(btf, t->type);
>> @@ -3304,6 +3373,10 @@ static int btf_find_kptr(const struct btf 
>> *btf, const struct btf_type *t,
>>       if (!__btf_type_is_struct(t))
>>           return -EINVAL;
>>   +    info = __next_field_info(srch);
>> +    if (IS_ERR_OR_NULL(info))
>> +        return PTR_ERR(info);
>
> info cannot be NULL.
>
>> +
>>       info->type = type;
>>       info->off = off;
>>       info->kptr.type_id = res_id;
>> @@ -3340,9 +3413,10 @@ const char *btf_find_decl_tag_value(const 
>> struct btf *btf, const struct btf_type
>>   static int
>>   btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
>>               const struct btf_type *t, int comp_idx, u32 off,
>> -            int sz, struct btf_field_info *info,
>> +            int sz, struct btf_field_info_search *srch,
>>               enum btf_field_type head_type)
>>   {
>> +    struct btf_field_info *info;
>>       const char *node_field_name;
>>       const char *value_type;
>>       s32 id;
>> @@ -3367,6 +3441,11 @@ btf_find_graph_root(const struct btf *btf, 
>> const struct btf_type *pt,
>>       node_field_name++;
>>       if (str_is_empty(node_field_name))
>>           return -EINVAL;
>> +
>> +    info = __next_field_info(srch);
>> +    if (IS_ERR_OR_NULL(info))
>> +        return PTR_ERR(info);
>> +
>
> ditto.
>
>>       info->type = head_type;
>>       info->off = off;
>>       info->graph_root.value_btf_id = id;
>> @@ -3374,25 +3453,24 @@ btf_find_graph_root(const struct btf *btf, 
>> const struct btf_type *pt,
>>       return BTF_FIELD_FOUND;
>>   }
>>   -#define field_mask_test_name(field_type, field_type_str)        \
>> -    if (field_mask & field_type && !strcmp(name, field_type_str)) 
>> {    \
>> -        type = field_type;                    \
>> -        goto end;                        \
>> +#define field_mask_test_name(field_type, field_type_str)            \
>> +    if (srch->field_mask & field_type && !strcmp(name, 
>> field_type_str)) {    \
>> +        return field_type;                        \
>>       }
>>   -#define field_mask_test_name_check_seen(field_type, 
>> field_type_str)    \
>> -    if (field_mask & field_type && !strcmp(name, field_type_str)) 
>> {    \
>> -        if (*seen_mask & field_type)                \
>> -            return -E2BIG;                    \
>> -        *seen_mask |= field_type;                \
>> -        type = field_type;                    \
>> -        goto end;                        \
>> +#define field_mask_test_name_check_seen(field_type, 
>> field_type_str)        \
>> +    if (srch->field_mask & field_type && !strcmp(name, 
>> field_type_str)) {    \
>> +        if (srch->seen_mask & field_type) \
>> +            return -E2BIG;                        \
>> +        srch->seen_mask |= field_type;                    \
>> +        return field_type;                        \
>>       }
>>   -static int btf_get_field_type(const char *name, u32 field_mask, 
>> u32 *seen_mask,
>> -                  int *align, int *sz)
>
> [...]
>
>
Andrii Nakryiko Nov. 1, 2023, 9:56 p.m. UTC | #4
On Mon, Oct 23, 2023 at 3:00 PM Dave Marchevsky <davemarchevsky@fb.com> wrote:
>
> btf_find_field takes (btf_type, special_field_types) and returns info
> about the specific special fields in btf_type, in the form of an array
> of struct btf_field info. The meat of this 'search for special fields'
> process happens in btf_find_datasec_var and btf_find_struct_field
> helpers: each member is examined and, if it's special, a struct
> btf_field_info describing it is added to the return array. Indeed, any
> function that might add to the output probably also looks at struct
> members or datasec vars.
>
> Most of the parameters passed around between helpers doing the search
> can be grouped into two categories: "info about the output array" and
> "info about which fields to search for". This patch joins those together
> in struct btf_field_info_search, simplifying the signatures of most
> helpers involved in the search, including array flattening helper that
> later patches in the series will add.
>
> The aforementioned array flattening logic will greatly increase the
> number of btf_field_info's needed to describe some structs, so this
> patch also turns the statically-sized struct btf_field_info
> info_arr[BTF_FIELDS_MAX] into a growable array with a larger max size.
>
> Implementation notes:
>   * BTF_FIELDS_MAX is now max size of growable btf_field_info *infos
>     instead of initial (and max) size of static result array
>     * Static array before had 10 elems (+1 tmp btf_field_info)
>     * growable array starts with 16 and doubles every time it needs to
>       grow, up to BTF_FIELDS_MAX of 256
>
>   * __next_field_infos is used with next_cnt > 1 later in the series
>
>   * btf_find_{datasec_var, struct_field} have special logic for an edge
>     case where the result array is full but the field being examined
>     gets BTF_FIELD_IGNORE return from btf_find_{struct, kptr,graph_root}
>     * If result wasn't BTF_FIELD_IGNORE, a btf_field_info would have to
>       be added to the array. Since it is we can look at next field.
>     * Before this patch the logic handling this edge case was hard to
>       follow and used a tmp btf_struct_info. This patch moves the
>       add-if-not-ignore logic down into btf_find_{struct, kptr,
>       graph_root}, removing the need to opportunistically grab a
>       btf_field_info to populate before knowing if it's actually
>       necessary. Now a new one is grabbed only if the field shouldn't
>       be ignored.
>
>   * Within btf_find_{datasec_var, struct_field}, each member is
>     currently examined in two phases: first btf_get_field_type checks
>     the member type name, then btf_find_{struct,graph_root,kptr} do
>     additional sanity checking and populate struct btf_field_info. Kptr
>     fields don't have a specific type name, though, so
>     btf_get_field_type assumes that - if we're looking for kptrs - any
>     member that fails type name check could be a kptr field.
>     * As a result btf_find_kptr effectively does all the pointer
>       hopping, sanity checking, and info population. Instead of trying
>       to fit kptr handling into this two-phase model, where it's
>       unwieldy, handle it in a separate codepath when name matching
>       fails.
>
> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
> ---
>  include/linux/bpf.h |   4 +-
>  kernel/bpf/btf.c    | 331 +++++++++++++++++++++++++++++---------------
>  2 files changed, 219 insertions(+), 116 deletions(-)
>
> diff --git a/include/linux/bpf.h b/include/linux/bpf.h
> index b4825d3cdb29..e07cac5cc3cf 100644
> --- a/include/linux/bpf.h
> +++ b/include/linux/bpf.h
> @@ -171,8 +171,8 @@ struct bpf_map_ops {
>  };
>
>  enum {
> -       /* Support at most 10 fields in a BTF type */
> -       BTF_FIELDS_MAX     = 10,
> +       /* Support at most 256 fields in a BTF type */
> +       BTF_FIELDS_MAX     = 256,
>  };
>
>  enum btf_field_type {
> diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
> index 975ef8e73393..e999ba85c363 100644
> --- a/kernel/bpf/btf.c
> +++ b/kernel/bpf/btf.c
> @@ -3257,25 +3257,94 @@ struct btf_field_info {
>         };
>  };
>
> +struct btf_field_info_search {
> +       /* growable array. allocated in __next_field_infos
> +        * free'd in btf_parse_fields
> +        */
> +       struct btf_field_info *infos;
> +       /* size of infos */
> +       int info_cnt;
> +       /* index of next btf_field_info to populate */
> +       int idx;

this seems pretty unconventional naming, typically info_cnt would be
called "cap" (for capacity) and idx would be "cnt" (number of actually
filled elements)

> +
> +       /* btf_field_types to search for */
> +       u32 field_mask;
> +       /* btf_field_types found earlier */
> +       u32 seen_mask;
> +};
> +
> +/* Reserve next_cnt contiguous btf_field_info's for caller to populate
> + * Returns ptr to first reserved btf_field_info
> + */
> +static struct btf_field_info *__next_field_infos(struct btf_field_info_search *srch,
> +                                                u32 next_cnt)

both next_cnt and __next_field_infos that do allocation are quite
surprising, this is a function to add/append more elements, so why not
add_field_infos() and new_cnt or add_cnt? The terminology around
"next" is confusing, IMO, because you'd expect this to be about
iteration, not memory allocation.

> +{
> +       struct btf_field_info *new_infos, *ret;
> +
> +       if (!next_cnt)
> +               return ERR_PTR(-EINVAL);
> +
> +       if (srch->idx + next_cnt < srch->info_cnt)
> +               goto nogrow_out;

why goto? just update count and return a pointer? Goto makes sense if
there is at least two code paths with the same exit logic.

> +
> +       /* Need to grow */
> +       if (srch->idx + next_cnt > BTF_FIELDS_MAX)
> +               return ERR_PTR(-E2BIG);
> +
> +       while (srch->idx + next_cnt >= srch->info_cnt)
> +               srch->info_cnt = srch->infos ? srch->info_cnt * 2 : 16;

I think krealloc() is smart enough to not allocate exact number of
bytes, and instead is rounding it up to closes bucket size. So I think
you can just keep it simple and ask for `srch->idx + next_cnt`
elements, and leave smartness to krealloc(). Not sure why 16 is the
starting size, though? How many elements do we typically have? 1, 2,
3, 5?

> +
> +       new_infos = krealloc(srch->infos,
> +                            srch->info_cnt * sizeof(struct btf_field_info),
> +                            GFP_KERNEL | __GFP_NOWARN);
> +       if (!new_infos)
> +               return ERR_PTR(-ENOMEM);
> +       srch->infos = new_infos;
> +
> +nogrow_out:
> +       ret = &srch->infos[srch->idx];
> +       srch->idx += next_cnt;
> +       return ret;
> +}
> +
> +/* Request srch's next free btf_field_info to populate, possibly growing
> + * srch->infos
> + */
> +static struct btf_field_info *__next_field_info(struct btf_field_info_search *srch)
> +{
> +       return __next_field_infos(srch, 1);
> +}
> +
>  static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
>                            u32 off, int sz, enum btf_field_type field_type,
> -                          struct btf_field_info *info)
> +                          struct btf_field_info_search *srch)
>  {
> +       struct btf_field_info *info;
> +
>         if (!__btf_type_is_struct(t))
>                 return BTF_FIELD_IGNORE;
>         if (t->size != sz)
>                 return BTF_FIELD_IGNORE;
> +
> +       info = __next_field_info(srch);
> +       if (IS_ERR_OR_NULL(info))

Can it return NULL? If not, let's not check for NULL at all, it's misleading.

> +               return PTR_ERR(info);
> +
>         info->type = field_type;
>         info->off = off;
>         return BTF_FIELD_FOUND;
>  }
>
> -static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
> -                        u32 off, int sz, struct btf_field_info *info)
> +static int btf_maybe_find_kptr(const struct btf *btf, const struct btf_type *t,
> +                              u32 off, struct btf_field_info_search *srch)
>  {
> +       struct btf_field_info *info;
>         enum btf_field_type type;
>         u32 res_id;
>
> +       if (!(srch->field_mask & BPF_KPTR))
> +               return BTF_FIELD_IGNORE;
> +
>         /* Permit modifiers on the pointer itself */
>         if (btf_type_is_volatile(t))
>                 t = btf_type_by_id(btf, t->type);
> @@ -3304,6 +3373,10 @@ static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
>         if (!__btf_type_is_struct(t))
>                 return -EINVAL;
>
> +       info = __next_field_info(srch);
> +       if (IS_ERR_OR_NULL(info))
> +               return PTR_ERR(info);
> +

ditto

>         info->type = type;
>         info->off = off;
>         info->kptr.type_id = res_id;

[...]

>  #undef field_mask_test_name_check_seen
>  #undef field_mask_test_name
>
> +static int __struct_member_check_align(u32 off, enum btf_field_type field_type)
> +{
> +       u32 align = btf_field_type_align(field_type);
> +
> +       if (off % align)

maybe guard against zero division? WARN_ON_ONCE() in
btf_field_type_align() shouldn't cause crash here

> +               return -EINVAL;
> +       return 0;
> +}
> +
>  static int btf_find_struct_field(const struct btf *btf,
> -                                const struct btf_type *t, u32 field_mask,
> -                                struct btf_field_info *info, int info_cnt)
> +                                const struct btf_type *t,
> +                                struct btf_field_info_search *srch)
>  {
> -       int ret, idx = 0, align, sz, field_type;
>         const struct btf_member *member;
> -       struct btf_field_info tmp;
> -       u32 i, off, seen_mask = 0;
> +       int ret, field_type;
> +       u32 i, off, sz;
>

[...]

> +
> +static int __datasec_vsi_check_align_sz(const struct btf_var_secinfo *vsi,
> +                                       enum btf_field_type field_type,
> +                                       u32 expected_sz)
> +{
> +       u32 off, align;
> +
> +       off = vsi->offset;
> +       align = btf_field_type_align(field_type);
> +
> +       if (vsi->size != expected_sz)
> +               return -EINVAL;
> +       if (off % align)
> +               return -EINVAL;

same about possible align == 0

> +
> +       return 0;
>  }
>

[...]
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b4825d3cdb29..e07cac5cc3cf 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -171,8 +171,8 @@  struct bpf_map_ops {
 };
 
 enum {
-	/* Support at most 10 fields in a BTF type */
-	BTF_FIELDS_MAX	   = 10,
+	/* Support at most 256 fields in a BTF type */
+	BTF_FIELDS_MAX	   = 256,
 };
 
 enum btf_field_type {
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 975ef8e73393..e999ba85c363 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -3257,25 +3257,94 @@  struct btf_field_info {
 	};
 };
 
+struct btf_field_info_search {
+	/* growable array. allocated in __next_field_infos
+	 * free'd in btf_parse_fields
+	 */
+	struct btf_field_info *infos;
+	/* size of infos */
+	int info_cnt;
+	/* index of next btf_field_info to populate */
+	int idx;
+
+	/* btf_field_types to search for */
+	u32 field_mask;
+	/* btf_field_types found earlier */
+	u32 seen_mask;
+};
+
+/* Reserve next_cnt contiguous btf_field_info's for caller to populate
+ * Returns ptr to first reserved btf_field_info
+ */
+static struct btf_field_info *__next_field_infos(struct btf_field_info_search *srch,
+						 u32 next_cnt)
+{
+	struct btf_field_info *new_infos, *ret;
+
+	if (!next_cnt)
+		return ERR_PTR(-EINVAL);
+
+	if (srch->idx + next_cnt < srch->info_cnt)
+		goto nogrow_out;
+
+	/* Need to grow */
+	if (srch->idx + next_cnt > BTF_FIELDS_MAX)
+		return ERR_PTR(-E2BIG);
+
+	while (srch->idx + next_cnt >= srch->info_cnt)
+		srch->info_cnt = srch->infos ? srch->info_cnt * 2 : 16;
+
+	new_infos = krealloc(srch->infos,
+			     srch->info_cnt * sizeof(struct btf_field_info),
+			     GFP_KERNEL | __GFP_NOWARN);
+	if (!new_infos)
+		return ERR_PTR(-ENOMEM);
+	srch->infos = new_infos;
+
+nogrow_out:
+	ret = &srch->infos[srch->idx];
+	srch->idx += next_cnt;
+	return ret;
+}
+
+/* Request srch's next free btf_field_info to populate, possibly growing
+ * srch->infos
+ */
+static struct btf_field_info *__next_field_info(struct btf_field_info_search *srch)
+{
+	return __next_field_infos(srch, 1);
+}
+
 static int btf_find_struct(const struct btf *btf, const struct btf_type *t,
 			   u32 off, int sz, enum btf_field_type field_type,
-			   struct btf_field_info *info)
+			   struct btf_field_info_search *srch)
 {
+	struct btf_field_info *info;
+
 	if (!__btf_type_is_struct(t))
 		return BTF_FIELD_IGNORE;
 	if (t->size != sz)
 		return BTF_FIELD_IGNORE;
+
+	info = __next_field_info(srch);
+	if (IS_ERR_OR_NULL(info))
+		return PTR_ERR(info);
+
 	info->type = field_type;
 	info->off = off;
 	return BTF_FIELD_FOUND;
 }
 
-static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
-			 u32 off, int sz, struct btf_field_info *info)
+static int btf_maybe_find_kptr(const struct btf *btf, const struct btf_type *t,
+			       u32 off, struct btf_field_info_search *srch)
 {
+	struct btf_field_info *info;
 	enum btf_field_type type;
 	u32 res_id;
 
+	if (!(srch->field_mask & BPF_KPTR))
+		return BTF_FIELD_IGNORE;
+
 	/* Permit modifiers on the pointer itself */
 	if (btf_type_is_volatile(t))
 		t = btf_type_by_id(btf, t->type);
@@ -3304,6 +3373,10 @@  static int btf_find_kptr(const struct btf *btf, const struct btf_type *t,
 	if (!__btf_type_is_struct(t))
 		return -EINVAL;
 
+	info = __next_field_info(srch);
+	if (IS_ERR_OR_NULL(info))
+		return PTR_ERR(info);
+
 	info->type = type;
 	info->off = off;
 	info->kptr.type_id = res_id;
@@ -3340,9 +3413,10 @@  const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type
 static int
 btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
 		    const struct btf_type *t, int comp_idx, u32 off,
-		    int sz, struct btf_field_info *info,
+		    int sz, struct btf_field_info_search *srch,
 		    enum btf_field_type head_type)
 {
+	struct btf_field_info *info;
 	const char *node_field_name;
 	const char *value_type;
 	s32 id;
@@ -3367,6 +3441,11 @@  btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
 	node_field_name++;
 	if (str_is_empty(node_field_name))
 		return -EINVAL;
+
+	info = __next_field_info(srch);
+	if (IS_ERR_OR_NULL(info))
+		return PTR_ERR(info);
+
 	info->type = head_type;
 	info->off = off;
 	info->graph_root.value_btf_id = id;
@@ -3374,25 +3453,24 @@  btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
 	return BTF_FIELD_FOUND;
 }
 
-#define field_mask_test_name(field_type, field_type_str)		\
-	if (field_mask & field_type && !strcmp(name, field_type_str)) {	\
-		type = field_type;					\
-		goto end;						\
+#define field_mask_test_name(field_type, field_type_str)			\
+	if (srch->field_mask & field_type && !strcmp(name, field_type_str)) {	\
+		return field_type;						\
 	}
 
-#define field_mask_test_name_check_seen(field_type, field_type_str)	\
-	if (field_mask & field_type && !strcmp(name, field_type_str)) {	\
-		if (*seen_mask & field_type)				\
-			return -E2BIG;					\
-		*seen_mask |= field_type;				\
-		type = field_type;					\
-		goto end;						\
+#define field_mask_test_name_check_seen(field_type, field_type_str)		\
+	if (srch->field_mask & field_type && !strcmp(name, field_type_str)) {	\
+		if (srch->seen_mask & field_type)				\
+			return -E2BIG;						\
+		srch->seen_mask |= field_type;					\
+		return field_type;						\
 	}
 
-static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
-			      int *align, int *sz)
+static int btf_get_field_type_by_name(const struct btf *btf,
+				      const struct btf_type *t,
+				      struct btf_field_info_search *srch)
 {
-	int type = 0;
+	const char *name = __btf_name_by_offset(btf, t->name_off);
 
 	field_mask_test_name_check_seen(BPF_SPIN_LOCK, "bpf_spin_lock");
 	field_mask_test_name_check_seen(BPF_TIMER,     "bpf_timer");
@@ -3403,47 +3481,58 @@  static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask,
 	field_mask_test_name(BPF_RB_ROOT,   "bpf_rb_root");
 	field_mask_test_name(BPF_RB_NODE,   "bpf_rb_node");
 
-	/* Only return BPF_KPTR when all other types with matchable names fail */
-	if (field_mask & BPF_KPTR) {
-		type = BPF_KPTR_REF;
-		goto end;
-	}
 	return 0;
-end:
-	*sz = btf_field_type_size(type);
-	*align = btf_field_type_align(type);
-	return type;
 }
 
 #undef field_mask_test_name_check_seen
 #undef field_mask_test_name
 
+static int __struct_member_check_align(u32 off, enum btf_field_type field_type)
+{
+	u32 align = btf_field_type_align(field_type);
+
+	if (off % align)
+		return -EINVAL;
+	return 0;
+}
+
 static int btf_find_struct_field(const struct btf *btf,
-				 const struct btf_type *t, u32 field_mask,
-				 struct btf_field_info *info, int info_cnt)
+				 const struct btf_type *t,
+				 struct btf_field_info_search *srch)
 {
-	int ret, idx = 0, align, sz, field_type;
 	const struct btf_member *member;
-	struct btf_field_info tmp;
-	u32 i, off, seen_mask = 0;
+	int ret, field_type;
+	u32 i, off, sz;
 
 	for_each_member(i, t, member) {
 		const struct btf_type *member_type = btf_type_by_id(btf,
 								    member->type);
-
-		field_type = btf_get_field_type(__btf_name_by_offset(btf, member_type->name_off),
-						field_mask, &seen_mask, &align, &sz);
-		if (field_type == 0)
-			continue;
-		if (field_type < 0)
-			return field_type;
-
 		off = __btf_member_bit_offset(t, member);
 		if (off % 8)
 			/* valid C code cannot generate such BTF */
 			return -EINVAL;
 		off /= 8;
-		if (off % align)
+
+		field_type = btf_get_field_type_by_name(btf, member_type, srch);
+		if (field_type < 0)
+			return field_type;
+
+		if (field_type == 0) {
+			/* Maybe it's a kptr. Use BPF_KPTR_REF for align
+			 * checks, all ptrs have same align.
+			 * btf_maybe_find_kptr will find actual kptr type
+			 */
+			if (__struct_member_check_align(off, BPF_KPTR_REF))
+				continue;
+
+			ret = btf_maybe_find_kptr(btf, member_type, off, srch);
+			if (ret < 0)
+				return ret;
+			continue;
+		}
+
+		sz = btf_field_type_size(field_type);
+		if (__struct_member_check_align(off, field_type))
 			continue;
 
 		switch (field_type) {
@@ -3453,64 +3542,81 @@  static int btf_find_struct_field(const struct btf *btf,
 		case BPF_RB_NODE:
 		case BPF_REFCOUNT:
 			ret = btf_find_struct(btf, member_type, off, sz, field_type,
-					      idx < info_cnt ? &info[idx] : &tmp);
-			if (ret < 0)
-				return ret;
-			break;
-		case BPF_KPTR_UNREF:
-		case BPF_KPTR_REF:
-		case BPF_KPTR_PERCPU:
-			ret = btf_find_kptr(btf, member_type, off, sz,
-					    idx < info_cnt ? &info[idx] : &tmp);
+					      srch);
 			if (ret < 0)
 				return ret;
 			break;
 		case BPF_LIST_HEAD:
 		case BPF_RB_ROOT:
 			ret = btf_find_graph_root(btf, t, member_type,
-						  i, off, sz,
-						  idx < info_cnt ? &info[idx] : &tmp,
-						  field_type);
+						  i, off, sz, srch, field_type);
 			if (ret < 0)
 				return ret;
 			break;
+		/* kptr fields are not handled in this switch, see
+		 * btf_maybe_find_kptr above
+		 */
+		case BPF_KPTR_UNREF:
+		case BPF_KPTR_REF:
+		case BPF_KPTR_PERCPU:
 		default:
 			return -EFAULT;
 		}
-
-		if (ret == BTF_FIELD_IGNORE)
-			continue;
-		if (idx >= info_cnt)
-			return -E2BIG;
-		++idx;
 	}
-	return idx;
+	return srch->idx;
+}
+
+static int __datasec_vsi_check_align_sz(const struct btf_var_secinfo *vsi,
+					enum btf_field_type field_type,
+					u32 expected_sz)
+{
+	u32 off, align;
+
+	off = vsi->offset;
+	align = btf_field_type_align(field_type);
+
+	if (vsi->size != expected_sz)
+		return -EINVAL;
+	if (off % align)
+		return -EINVAL;
+
+	return 0;
 }
 
 static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
-				u32 field_mask, struct btf_field_info *info,
-				int info_cnt)
+				struct btf_field_info_search *srch)
 {
-	int ret, idx = 0, align, sz, field_type;
 	const struct btf_var_secinfo *vsi;
-	struct btf_field_info tmp;
-	u32 i, off, seen_mask = 0;
+	int ret, field_type;
+	u32 i, off, sz;
 
 	for_each_vsi(i, t, vsi) {
 		const struct btf_type *var = btf_type_by_id(btf, vsi->type);
 		const struct btf_type *var_type = btf_type_by_id(btf, var->type);
 
-		field_type = btf_get_field_type(__btf_name_by_offset(btf, var_type->name_off),
-						field_mask, &seen_mask, &align, &sz);
-		if (field_type == 0)
-			continue;
+		off = vsi->offset;
+		field_type = btf_get_field_type_by_name(btf, var_type, srch);
 		if (field_type < 0)
 			return field_type;
 
-		off = vsi->offset;
-		if (vsi->size != sz)
+		if (field_type == 0) {
+			/* Maybe it's a kptr. Use BPF_KPTR_REF for sz / align
+			 * checks, all ptrs have same sz / align.
+			 * btf_maybe_find_kptr will find actual kptr type
+			 */
+			sz = btf_field_type_size(BPF_KPTR_REF);
+			if (__datasec_vsi_check_align_sz(vsi, BPF_KPTR_REF, sz))
+				continue;
+
+			ret = btf_maybe_find_kptr(btf, var_type, off, srch);
+			if (ret < 0)
+				return ret;
 			continue;
-		if (off % align)
+		}
+
+		sz = btf_field_type_size(field_type);
+
+		if (__datasec_vsi_check_align_sz(vsi, field_type, sz))
 			continue;
 
 		switch (field_type) {
@@ -3520,48 +3626,38 @@  static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t,
 		case BPF_RB_NODE:
 		case BPF_REFCOUNT:
 			ret = btf_find_struct(btf, var_type, off, sz, field_type,
-					      idx < info_cnt ? &info[idx] : &tmp);
-			if (ret < 0)
-				return ret;
-			break;
-		case BPF_KPTR_UNREF:
-		case BPF_KPTR_REF:
-		case BPF_KPTR_PERCPU:
-			ret = btf_find_kptr(btf, var_type, off, sz,
-					    idx < info_cnt ? &info[idx] : &tmp);
+					      srch);
 			if (ret < 0)
 				return ret;
 			break;
 		case BPF_LIST_HEAD:
 		case BPF_RB_ROOT:
 			ret = btf_find_graph_root(btf, var, var_type,
-						  -1, off, sz,
-						  idx < info_cnt ? &info[idx] : &tmp,
+						  -1, off, sz, srch,
 						  field_type);
 			if (ret < 0)
 				return ret;
 			break;
+		/* kptr fields are not handled in this switch, see
+		 * btf_maybe_find_kptr above
+		 */
+		case BPF_KPTR_UNREF:
+		case BPF_KPTR_REF:
+		case BPF_KPTR_PERCPU:
 		default:
 			return -EFAULT;
 		}
-
-		if (ret == BTF_FIELD_IGNORE)
-			continue;
-		if (idx >= info_cnt)
-			return -E2BIG;
-		++idx;
 	}
-	return idx;
+	return srch->idx;
 }
 
 static int btf_find_field(const struct btf *btf, const struct btf_type *t,
-			  u32 field_mask, struct btf_field_info *info,
-			  int info_cnt)
+			  struct btf_field_info_search *srch)
 {
 	if (__btf_type_is_struct(t))
-		return btf_find_struct_field(btf, t, field_mask, info, info_cnt);
+		return btf_find_struct_field(btf, t, srch);
 	else if (btf_type_is_datasec(t))
-		return btf_find_datasec_var(btf, t, field_mask, info, info_cnt);
+		return btf_find_datasec_var(btf, t, srch);
 	return -EINVAL;
 }
 
@@ -3729,47 +3825,51 @@  static int btf_field_cmp(const void *_a, const void *_b, const void *priv)
 struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type *t,
 				    u32 field_mask, u32 value_size)
 {
-	struct btf_field_info info_arr[BTF_FIELDS_MAX];
+	struct btf_field_info_search srch;
 	u32 next_off = 0, field_type_size;
+	struct btf_field_info *info;
 	struct btf_record *rec;
 	int ret, i, cnt;
 
-	ret = btf_find_field(btf, t, field_mask, info_arr, ARRAY_SIZE(info_arr));
-	if (ret < 0)
-		return ERR_PTR(ret);
-	if (!ret)
-		return NULL;
+	memset(&srch, 0, sizeof(srch));
+	srch.field_mask = field_mask;
+	ret = btf_find_field(btf, t, &srch);
+	if (ret <= 0)
+		goto end_srch;
 
 	cnt = ret;
 	/* This needs to be kzalloc to zero out padding and unused fields, see
 	 * comment in btf_record_equal.
 	 */
 	rec = kzalloc(offsetof(struct btf_record, fields[cnt]), GFP_KERNEL | __GFP_NOWARN);
-	if (!rec)
-		return ERR_PTR(-ENOMEM);
+	if (!rec) {
+		ret = -ENOMEM;
+		goto end_srch;
+	}
 
 	rec->spin_lock_off = -EINVAL;
 	rec->timer_off = -EINVAL;
 	rec->refcount_off = -EINVAL;
 	for (i = 0; i < cnt; i++) {
-		field_type_size = btf_field_type_size(info_arr[i].type);
-		if (info_arr[i].off + field_type_size > value_size) {
-			WARN_ONCE(1, "verifier bug off %d size %d", info_arr[i].off, value_size);
+		info = &srch.infos[i];
+		field_type_size = btf_field_type_size(info->type);
+		if (info->off + field_type_size > value_size) {
+			WARN_ONCE(1, "verifier bug off %d size %d", info->off, value_size);
 			ret = -EFAULT;
 			goto end;
 		}
-		if (info_arr[i].off < next_off) {
+		if (info->off < next_off) {
 			ret = -EEXIST;
 			goto end;
 		}
-		next_off = info_arr[i].off + field_type_size;
+		next_off = info->off + field_type_size;
 
-		rec->field_mask |= info_arr[i].type;
-		rec->fields[i].offset = info_arr[i].off;
-		rec->fields[i].type = info_arr[i].type;
+		rec->field_mask |= info->type;
+		rec->fields[i].offset = info->off;
+		rec->fields[i].type = info->type;
 		rec->fields[i].size = field_type_size;
 
-		switch (info_arr[i].type) {
+		switch (info->type) {
 		case BPF_SPIN_LOCK:
 			WARN_ON_ONCE(rec->spin_lock_off >= 0);
 			/* Cache offset for faster lookup at runtime */
@@ -3788,17 +3888,17 @@  struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
 		case BPF_KPTR_UNREF:
 		case BPF_KPTR_REF:
 		case BPF_KPTR_PERCPU:
-			ret = btf_parse_kptr(btf, &rec->fields[i], &info_arr[i]);
+			ret = btf_parse_kptr(btf, &rec->fields[i], info);
 			if (ret < 0)
 				goto end;
 			break;
 		case BPF_LIST_HEAD:
-			ret = btf_parse_list_head(btf, &rec->fields[i], &info_arr[i]);
+			ret = btf_parse_list_head(btf, &rec->fields[i], info);
 			if (ret < 0)
 				goto end;
 			break;
 		case BPF_RB_ROOT:
-			ret = btf_parse_rb_root(btf, &rec->fields[i], &info_arr[i]);
+			ret = btf_parse_rb_root(btf, &rec->fields[i], info);
 			if (ret < 0)
 				goto end;
 			break;
@@ -3828,10 +3928,13 @@  struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
 
 	sort_r(rec->fields, rec->cnt, sizeof(struct btf_field), btf_field_cmp,
 	       NULL, rec);
+	kfree(srch.infos);
 
 	return rec;
 end:
 	btf_record_free(rec);
+end_srch:
+	kfree(srch.infos);
 	return ERR_PTR(ret);
 }