diff mbox series

[bpf-next,v2,2/9] bpftool: Honor BPF_CORE_TYPE_MATCHES relocation

Message ID 20220623212205.2805002-3-deso@posteo.net (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Introduce type match support | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-3 fail Logs for Kernel LATEST on z15 with gcc
bpf/vmtest-bpf-next-VM_Test-1 fail Logs for Kernel LATEST on ubuntu-latest with gcc
bpf/vmtest-bpf-next-VM_Test-2 fail Logs for Kernel LATEST on ubuntu-latest with llvm-15
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 6 maintainers not CCed: netdev@vger.kernel.org songliubraving@fb.com yhs@fb.com john.fastabend@gmail.com kafai@fb.com kpsingh@kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 100 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Daniel Müller June 23, 2022, 9:21 p.m. UTC
bpftool needs to know about the newly introduced BPF_CORE_TYPE_MATCHES
relocation for its 'gen min_core_btf' command to work properly in the
present of this relocation.
Specifically, we need to make sure to mark types and fields so that they
are present in the minimized BTF for "type match" checks to work out.
However, contrary to the existing btfgen_record_field_relo, we need to
rely on the BTF -- and not the spec -- to find fields. With this change
we handle this new variant correctly. The functionality will be tested
with follow on changes to BPF selftests, which already run against a
minimized BTF created with bpftool.

Cc: Quentin Monnet <quentin@isovalent.com>
Signed-off-by: Daniel Müller <deso@posteo.net>
---
 tools/bpf/bpftool/gen.c | 107 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 107 insertions(+)

Comments

Quentin Monnet June 24, 2022, 11:37 a.m. UTC | #1
2022-06-23 21:21 UTC+0000 ~ Daniel Müller <deso@posteo.net>
> bpftool needs to know about the newly introduced BPF_CORE_TYPE_MATCHES
> relocation for its 'gen min_core_btf' command to work properly in the
> present of this relocation.
> Specifically, we need to make sure to mark types and fields so that they
> are present in the minimized BTF for "type match" checks to work out.
> However, contrary to the existing btfgen_record_field_relo, we need to
> rely on the BTF -- and not the spec -- to find fields. With this change
> we handle this new variant correctly. The functionality will be tested
> with follow on changes to BPF selftests, which already run against a
> minimized BTF created with bpftool.
> 
> Cc: Quentin Monnet <quentin@isovalent.com>
> Signed-off-by: Daniel Müller <deso@posteo.net>
> ---
>  tools/bpf/bpftool/gen.c | 107 ++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 107 insertions(+)
> 
> diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
> index 480cbd8..6cd0ed 100644
> --- a/tools/bpf/bpftool/gen.c
> +++ b/tools/bpf/bpftool/gen.c
> @@ -1856,6 +1856,111 @@ static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_sp
>  	return 0;
>  }
>  
> +/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
> + * this function does not rely on the target spec for inferring members, but
> + * uses the associated BTF.
> + *
> + * The `behind_ptr` argument is used to stop marking of composite types reached
> + * through a pointer. This way, we keep can keep BTF size in check while

Typo, "we keep can keep"

> + * providing reasonable match semantics.
> + */
> +static int btfgen_mark_types_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
> +{
> +	const struct btf_type *btf_type;
> +	struct btf *btf = info->src_btf;
> +	struct btf_type *cloned_type;
> +	int i, err;
> +
> +	if (type_id == 0)
> +		return 0;
> +
> +	btf_type = btf__type_by_id(btf, type_id);
> +	/* mark type on cloned BTF as used */
> +	cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
> +	cloned_type->name_off = MARKED;
> +
> +	switch (btf_kind(btf_type)) {
> +	case BTF_KIND_UNKN:
> +	case BTF_KIND_INT:
> +	case BTF_KIND_FLOAT:
> +	case BTF_KIND_ENUM:
> +	case BTF_KIND_ENUM64:
> +		break;
> +	case BTF_KIND_STRUCT:
> +	case BTF_KIND_UNION: {
> +		struct btf_member *m = btf_members(btf_type);
> +		__u16 vlen = btf_vlen(btf_type);
> +
> +		if (behind_ptr)
> +			break;
> +
> +		for (i = 0; i < vlen; i++, m++) {
> +			/* mark member */
> +			btfgen_mark_member(info, type_id, i);
> +
> +			/* mark member's type */
> +			err = btfgen_mark_types_match(info, m->type, false);
> +			if (err)
> +				return err;
> +		}
> +		break;
> +	}
> +	case BTF_KIND_CONST:
> +	case BTF_KIND_FWD:
> +	case BTF_KIND_VOLATILE:
> +	case BTF_KIND_TYPEDEF:
> +		return btfgen_mark_types_match(info, btf_type->type, false);
> +	case BTF_KIND_PTR:
> +		return btfgen_mark_types_match(info, btf_type->type, true);
> +	case BTF_KIND_ARRAY: {
> +		struct btf_array *array;
> +
> +		array = btf_array(btf_type);
> +		/* mark array type */
> +		err = btfgen_mark_types_match(info, array->type, false);
> +		/* mark array's index type */
> +		err = err ? : btfgen_mark_types_match(info, array->index_type, false);
> +		if (err)
> +			return err;
> +		break;
> +	}
> +	case BTF_KIND_FUNC_PROTO: {
> +		__u16 vlen = btf_vlen(btf_type);
> +		struct btf_param *param;
> +
> +		/* mark ret type */
> +		err = btfgen_mark_types_match(info, btf_type->type, false);
> +		if (err)
> +			return err;
> +
> +		/* mark parameters types */
> +		param = btf_params(btf_type);
> +		for (i = 0; i < vlen; i++) {
> +			err = btfgen_mark_types_match(info, param->type, false);
> +			if (err)
> +				return err;
> +			param++;
> +		}
> +		break;
> +	}
> +	/* tells if some other type needs to be handled */
> +	default:
> +		p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
> + * this function does not rely on the target spec for inferring members, but
> + * uses the associated BTF.
> + */
> +static int btfgen_record_types_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)

Nit: Maybe btfgen_record_type_match_relo() ("type" singular), for
consistency with btfgen_record_type_relo()?

> +{
> +	return btfgen_mark_types_match(info, targ_spec->root_type_id, false);
> +}
> +
>  static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
>  {
>  	return btfgen_mark_type(info, targ_spec->root_type_id, true);
> @@ -1882,6 +1987,8 @@ static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *r
>  	case BPF_CORE_TYPE_EXISTS:
>  	case BPF_CORE_TYPE_SIZE:
>  		return btfgen_record_type_relo(info, res);
> +	case BPF_CORE_TYPE_MATCHES:
> +		return btfgen_record_types_match_relo(info, res);
>  	case BPF_CORE_ENUMVAL_EXISTS:
>  	case BPF_CORE_ENUMVAL_VALUE:
>  		return btfgen_record_enumval_relo(info, res);

Aside from the minor nits, the patch looks good to me. Thanks!

Acked-by: Quentin Monnet <quentin@isovalent.com>
Andrii Nakryiko June 24, 2022, 9:25 p.m. UTC | #2
On Thu, Jun 23, 2022 at 2:22 PM Daniel Müller <deso@posteo.net> wrote:
>
> bpftool needs to know about the newly introduced BPF_CORE_TYPE_MATCHES
> relocation for its 'gen min_core_btf' command to work properly in the
> present of this relocation.
> Specifically, we need to make sure to mark types and fields so that they
> are present in the minimized BTF for "type match" checks to work out.
> However, contrary to the existing btfgen_record_field_relo, we need to
> rely on the BTF -- and not the spec -- to find fields. With this change
> we handle this new variant correctly. The functionality will be tested
> with follow on changes to BPF selftests, which already run against a
> minimized BTF created with bpftool.
>
> Cc: Quentin Monnet <quentin@isovalent.com>
> Signed-off-by: Daniel Müller <deso@posteo.net>
> ---
>  tools/bpf/bpftool/gen.c | 107 ++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 107 insertions(+)
>
> diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
> index 480cbd8..6cd0ed 100644
> --- a/tools/bpf/bpftool/gen.c
> +++ b/tools/bpf/bpftool/gen.c
> @@ -1856,6 +1856,111 @@ static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_sp
>         return 0;
>  }
>
> +/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
> + * this function does not rely on the target spec for inferring members, but
> + * uses the associated BTF.
> + *
> + * The `behind_ptr` argument is used to stop marking of composite types reached
> + * through a pointer. This way, we keep can keep BTF size in check while
> + * providing reasonable match semantics.
> + */
> +static int btfgen_mark_types_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
> +{
> +       const struct btf_type *btf_type;
> +       struct btf *btf = info->src_btf;
> +       struct btf_type *cloned_type;
> +       int i, err;
> +
> +       if (type_id == 0)
> +               return 0;
> +
> +       btf_type = btf__type_by_id(btf, type_id);
> +       /* mark type on cloned BTF as used */
> +       cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
> +       cloned_type->name_off = MARKED;
> +
> +       switch (btf_kind(btf_type)) {
> +       case BTF_KIND_UNKN:
> +       case BTF_KIND_INT:
> +       case BTF_KIND_FLOAT:
> +       case BTF_KIND_ENUM:
> +       case BTF_KIND_ENUM64:
> +               break;
> +       case BTF_KIND_STRUCT:
> +       case BTF_KIND_UNION: {
> +               struct btf_member *m = btf_members(btf_type);
> +               __u16 vlen = btf_vlen(btf_type);
> +
> +               if (behind_ptr)
> +                       break;
> +
> +               for (i = 0; i < vlen; i++, m++) {
> +                       /* mark member */
> +                       btfgen_mark_member(info, type_id, i);
> +
> +                       /* mark member's type */
> +                       err = btfgen_mark_types_match(info, m->type, false);
> +                       if (err)
> +                               return err;
> +               }
> +               break;
> +       }
> +       case BTF_KIND_CONST:
> +       case BTF_KIND_FWD:
> +       case BTF_KIND_VOLATILE:
> +       case BTF_KIND_TYPEDEF:

BTF_KIND_RESTRICT is missing?


> +               return btfgen_mark_types_match(info, btf_type->type, false);
> +       case BTF_KIND_PTR:
> +               return btfgen_mark_types_match(info, btf_type->type, true);
> +       case BTF_KIND_ARRAY: {
> +               struct btf_array *array;
> +
> +               array = btf_array(btf_type);
> +               /* mark array type */
> +               err = btfgen_mark_types_match(info, array->type, false);
> +               /* mark array's index type */
> +               err = err ? : btfgen_mark_types_match(info, array->index_type, false);
> +               if (err)
> +                       return err;
> +               break;
> +       }

[...]
Daniel Müller June 27, 2022, 4:43 p.m. UTC | #3
On Fri, Jun 24, 2022 at 12:37:09PM +0100, Quentin Monnet wrote:
> 2022-06-23 21:21 UTC+0000 ~ Daniel Müller <deso@posteo.net>
> > bpftool needs to know about the newly introduced BPF_CORE_TYPE_MATCHES
> > relocation for its 'gen min_core_btf' command to work properly in the
> > present of this relocation.
> > Specifically, we need to make sure to mark types and fields so that they
> > are present in the minimized BTF for "type match" checks to work out.
> > However, contrary to the existing btfgen_record_field_relo, we need to
> > rely on the BTF -- and not the spec -- to find fields. With this change
> > we handle this new variant correctly. The functionality will be tested
> > with follow on changes to BPF selftests, which already run against a
> > minimized BTF created with bpftool.
> > 
> > Cc: Quentin Monnet <quentin@isovalent.com>
> > Signed-off-by: Daniel Müller <deso@posteo.net>
> > ---
> >  tools/bpf/bpftool/gen.c | 107 ++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 107 insertions(+)
> > 
> > diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
> > index 480cbd8..6cd0ed 100644
> > --- a/tools/bpf/bpftool/gen.c
> > +++ b/tools/bpf/bpftool/gen.c
> > @@ -1856,6 +1856,111 @@ static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_sp
> >  	return 0;
> >  }
> >  
> > +/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
> > + * this function does not rely on the target spec for inferring members, but
> > + * uses the associated BTF.
> > + *
> > + * The `behind_ptr` argument is used to stop marking of composite types reached
> > + * through a pointer. This way, we keep can keep BTF size in check while
> 
> Typo, "we keep can keep"

Fixed. Thanks!

> > + * providing reasonable match semantics.
> > + */
> > +static int btfgen_mark_types_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
> > +{
> > +	const struct btf_type *btf_type;
> > +	struct btf *btf = info->src_btf;
> > +	struct btf_type *cloned_type;
> > +	int i, err;
> > +
> > +	if (type_id == 0)
> > +		return 0;
> > +
> > +	btf_type = btf__type_by_id(btf, type_id);
> > +	/* mark type on cloned BTF as used */
> > +	cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
> > +	cloned_type->name_off = MARKED;
> > +
> > +	switch (btf_kind(btf_type)) {
> > +	case BTF_KIND_UNKN:
> > +	case BTF_KIND_INT:
> > +	case BTF_KIND_FLOAT:
> > +	case BTF_KIND_ENUM:
> > +	case BTF_KIND_ENUM64:
> > +		break;
> > +	case BTF_KIND_STRUCT:
> > +	case BTF_KIND_UNION: {
> > +		struct btf_member *m = btf_members(btf_type);
> > +		__u16 vlen = btf_vlen(btf_type);
> > +
> > +		if (behind_ptr)
> > +			break;
> > +
> > +		for (i = 0; i < vlen; i++, m++) {
> > +			/* mark member */
> > +			btfgen_mark_member(info, type_id, i);
> > +
> > +			/* mark member's type */
> > +			err = btfgen_mark_types_match(info, m->type, false);
> > +			if (err)
> > +				return err;
> > +		}
> > +		break;
> > +	}
> > +	case BTF_KIND_CONST:
> > +	case BTF_KIND_FWD:
> > +	case BTF_KIND_VOLATILE:
> > +	case BTF_KIND_TYPEDEF:
> > +		return btfgen_mark_types_match(info, btf_type->type, false);
> > +	case BTF_KIND_PTR:
> > +		return btfgen_mark_types_match(info, btf_type->type, true);
> > +	case BTF_KIND_ARRAY: {
> > +		struct btf_array *array;
> > +
> > +		array = btf_array(btf_type);
> > +		/* mark array type */
> > +		err = btfgen_mark_types_match(info, array->type, false);
> > +		/* mark array's index type */
> > +		err = err ? : btfgen_mark_types_match(info, array->index_type, false);
> > +		if (err)
> > +			return err;
> > +		break;
> > +	}
> > +	case BTF_KIND_FUNC_PROTO: {
> > +		__u16 vlen = btf_vlen(btf_type);
> > +		struct btf_param *param;
> > +
> > +		/* mark ret type */
> > +		err = btfgen_mark_types_match(info, btf_type->type, false);
> > +		if (err)
> > +			return err;
> > +
> > +		/* mark parameters types */
> > +		param = btf_params(btf_type);
> > +		for (i = 0; i < vlen; i++) {
> > +			err = btfgen_mark_types_match(info, param->type, false);
> > +			if (err)
> > +				return err;
> > +			param++;
> > +		}
> > +		break;
> > +	}
> > +	/* tells if some other type needs to be handled */
> > +	default:
> > +		p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
> > +		return -EINVAL;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
> > + * this function does not rely on the target spec for inferring members, but
> > + * uses the associated BTF.
> > + */
> > +static int btfgen_record_types_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
> 
> Nit: Maybe btfgen_record_type_match_relo() ("type" singular), for
> consistency with btfgen_record_type_relo()?

Sure, changed.

> > +{
> > +	return btfgen_mark_types_match(info, targ_spec->root_type_id, false);
> > +}
> > +
> >  static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
> >  {
> >  	return btfgen_mark_type(info, targ_spec->root_type_id, true);
> > @@ -1882,6 +1987,8 @@ static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *r
> >  	case BPF_CORE_TYPE_EXISTS:
> >  	case BPF_CORE_TYPE_SIZE:
> >  		return btfgen_record_type_relo(info, res);
> > +	case BPF_CORE_TYPE_MATCHES:
> > +		return btfgen_record_types_match_relo(info, res);
> >  	case BPF_CORE_ENUMVAL_EXISTS:
> >  	case BPF_CORE_ENUMVAL_VALUE:
> >  		return btfgen_record_enumval_relo(info, res);
> 
> Aside from the minor nits, the patch looks good to me. Thanks!

Thanks for your review!

Daniel

> Acked-by: Quentin Monnet <quentin@isovalent.com>
Daniel Müller June 27, 2022, 4:50 p.m. UTC | #4
On Fri, Jun 24, 2022 at 02:25:50PM -0700, Andrii Nakryiko wrote:
> On Thu, Jun 23, 2022 at 2:22 PM Daniel Müller <deso@posteo.net> wrote:
> >
> > bpftool needs to know about the newly introduced BPF_CORE_TYPE_MATCHES
> > relocation for its 'gen min_core_btf' command to work properly in the
> > present of this relocation.
> > Specifically, we need to make sure to mark types and fields so that they
> > are present in the minimized BTF for "type match" checks to work out.
> > However, contrary to the existing btfgen_record_field_relo, we need to
> > rely on the BTF -- and not the spec -- to find fields. With this change
> > we handle this new variant correctly. The functionality will be tested
> > with follow on changes to BPF selftests, which already run against a
> > minimized BTF created with bpftool.
> >
> > Cc: Quentin Monnet <quentin@isovalent.com>
> > Signed-off-by: Daniel Müller <deso@posteo.net>
> > ---
> >  tools/bpf/bpftool/gen.c | 107 ++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 107 insertions(+)
> >
> > diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
> > index 480cbd8..6cd0ed 100644
> > --- a/tools/bpf/bpftool/gen.c
> > +++ b/tools/bpf/bpftool/gen.c
> > @@ -1856,6 +1856,111 @@ static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_sp
> >         return 0;
> >  }
> >
> > +/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
> > + * this function does not rely on the target spec for inferring members, but
> > + * uses the associated BTF.
> > + *
> > + * The `behind_ptr` argument is used to stop marking of composite types reached
> > + * through a pointer. This way, we keep can keep BTF size in check while
> > + * providing reasonable match semantics.
> > + */
> > +static int btfgen_mark_types_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
> > +{
> > +       const struct btf_type *btf_type;
> > +       struct btf *btf = info->src_btf;
> > +       struct btf_type *cloned_type;
> > +       int i, err;
> > +
> > +       if (type_id == 0)
> > +               return 0;
> > +
> > +       btf_type = btf__type_by_id(btf, type_id);
> > +       /* mark type on cloned BTF as used */
> > +       cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
> > +       cloned_type->name_off = MARKED;
> > +
> > +       switch (btf_kind(btf_type)) {
> > +       case BTF_KIND_UNKN:
> > +       case BTF_KIND_INT:
> > +       case BTF_KIND_FLOAT:
> > +       case BTF_KIND_ENUM:
> > +       case BTF_KIND_ENUM64:
> > +               break;
> > +       case BTF_KIND_STRUCT:
> > +       case BTF_KIND_UNION: {
> > +               struct btf_member *m = btf_members(btf_type);
> > +               __u16 vlen = btf_vlen(btf_type);
> > +
> > +               if (behind_ptr)
> > +                       break;
> > +
> > +               for (i = 0; i < vlen; i++, m++) {
> > +                       /* mark member */
> > +                       btfgen_mark_member(info, type_id, i);
> > +
> > +                       /* mark member's type */
> > +                       err = btfgen_mark_types_match(info, m->type, false);
> > +                       if (err)
> > +                               return err;
> > +               }
> > +               break;
> > +       }
> > +       case BTF_KIND_CONST:
> > +       case BTF_KIND_FWD:
> > +       case BTF_KIND_VOLATILE:
> > +       case BTF_KIND_TYPEDEF:
> 
> BTF_KIND_RESTRICT is missing?

Good catch. It's missing in btfgen_mark_type as well. Will add it.

> > +               return btfgen_mark_types_match(info, btf_type->type, false);
> > +       case BTF_KIND_PTR:
> > +               return btfgen_mark_types_match(info, btf_type->type, true);
> > +       case BTF_KIND_ARRAY: {
> > +               struct btf_array *array;
> > +
> > +               array = btf_array(btf_type);
> > +               /* mark array type */
> > +               err = btfgen_mark_types_match(info, array->type, false);
> > +               /* mark array's index type */
> > +               err = err ? : btfgen_mark_types_match(info, array->index_type, false);
> > +               if (err)
> > +                       return err;
> > +               break;
> > +       }
> 
> [...]
diff mbox series

Patch

diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 480cbd8..6cd0ed 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -1856,6 +1856,111 @@  static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_sp
 	return 0;
 }
 
+/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
+ * this function does not rely on the target spec for inferring members, but
+ * uses the associated BTF.
+ *
+ * The `behind_ptr` argument is used to stop marking of composite types reached
+ * through a pointer. This way, we keep can keep BTF size in check while
+ * providing reasonable match semantics.
+ */
+static int btfgen_mark_types_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
+{
+	const struct btf_type *btf_type;
+	struct btf *btf = info->src_btf;
+	struct btf_type *cloned_type;
+	int i, err;
+
+	if (type_id == 0)
+		return 0;
+
+	btf_type = btf__type_by_id(btf, type_id);
+	/* mark type on cloned BTF as used */
+	cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
+	cloned_type->name_off = MARKED;
+
+	switch (btf_kind(btf_type)) {
+	case BTF_KIND_UNKN:
+	case BTF_KIND_INT:
+	case BTF_KIND_FLOAT:
+	case BTF_KIND_ENUM:
+	case BTF_KIND_ENUM64:
+		break;
+	case BTF_KIND_STRUCT:
+	case BTF_KIND_UNION: {
+		struct btf_member *m = btf_members(btf_type);
+		__u16 vlen = btf_vlen(btf_type);
+
+		if (behind_ptr)
+			break;
+
+		for (i = 0; i < vlen; i++, m++) {
+			/* mark member */
+			btfgen_mark_member(info, type_id, i);
+
+			/* mark member's type */
+			err = btfgen_mark_types_match(info, m->type, false);
+			if (err)
+				return err;
+		}
+		break;
+	}
+	case BTF_KIND_CONST:
+	case BTF_KIND_FWD:
+	case BTF_KIND_VOLATILE:
+	case BTF_KIND_TYPEDEF:
+		return btfgen_mark_types_match(info, btf_type->type, false);
+	case BTF_KIND_PTR:
+		return btfgen_mark_types_match(info, btf_type->type, true);
+	case BTF_KIND_ARRAY: {
+		struct btf_array *array;
+
+		array = btf_array(btf_type);
+		/* mark array type */
+		err = btfgen_mark_types_match(info, array->type, false);
+		/* mark array's index type */
+		err = err ? : btfgen_mark_types_match(info, array->index_type, false);
+		if (err)
+			return err;
+		break;
+	}
+	case BTF_KIND_FUNC_PROTO: {
+		__u16 vlen = btf_vlen(btf_type);
+		struct btf_param *param;
+
+		/* mark ret type */
+		err = btfgen_mark_types_match(info, btf_type->type, false);
+		if (err)
+			return err;
+
+		/* mark parameters types */
+		param = btf_params(btf_type);
+		for (i = 0; i < vlen; i++) {
+			err = btfgen_mark_types_match(info, param->type, false);
+			if (err)
+				return err;
+			param++;
+		}
+		break;
+	}
+	/* tells if some other type needs to be handled */
+	default:
+		p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
+ * this function does not rely on the target spec for inferring members, but
+ * uses the associated BTF.
+ */
+static int btfgen_record_types_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+	return btfgen_mark_types_match(info, targ_spec->root_type_id, false);
+}
+
 static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
 {
 	return btfgen_mark_type(info, targ_spec->root_type_id, true);
@@ -1882,6 +1987,8 @@  static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *r
 	case BPF_CORE_TYPE_EXISTS:
 	case BPF_CORE_TYPE_SIZE:
 		return btfgen_record_type_relo(info, res);
+	case BPF_CORE_TYPE_MATCHES:
+		return btfgen_record_types_match_relo(info, res);
 	case BPF_CORE_ENUMVAL_EXISTS:
 	case BPF_CORE_ENUMVAL_VALUE:
 		return btfgen_record_enumval_relo(info, res);