diff mbox series

[bpf-next,1/2] bpftool: improve skeleton backwards compat with old buggy libbpfs

Message ID 20240704001527.754710-2-andrii@kernel.org (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Fix libbpf BPF skeleton forward/backward compat | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 10 maintainers not CCed: yonghong.song@linux.dev haoluo@google.com jolsa@kernel.org song@kernel.org qmo@kernel.org john.fastabend@gmail.com eddyz87@gmail.com kpsingh@kernel.org martin.lau@linux.dev sdf@google.com
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch warning WARNING: Co-developed-by and Signed-off-by: name/email do not match WARNING: __always_unused or __maybe_unused is preferred over __attribute__((__unused__)) WARNING: line length of 109 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: unnecessary whitespace before a quoted newline
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2

Commit Message

Andrii Nakryiko July 4, 2024, 12:15 a.m. UTC
Old versions of libbpf don't handle varying sizes of bpf_map_skeleton
struct correctly. As such, BPF skeleton generated by newest bpftool
might not be compatible with older libbpf (though only when libbpf is
used as a shared library), even though it, by design, should.

Going forward libbpf will be fixed, plus we'll release bug fixed
versions of relevant old libbpfs, but meanwhile try to mitigate from
bpftool side by conservatively assuming older and smaller definition of
bpf_map_skeleton, if possible. Meaning, if there are no struct_ops maps.

If there are struct_ops, then presumably user would like to have
auto-attaching logic and struct_ops map link placeholders, so use the
full bpf_map_skeleton definition in that case.

Co-developed-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 tools/bpf/bpftool/gen.c | 46 ++++++++++++++++++++++++++++-------------
 1 file changed, 32 insertions(+), 14 deletions(-)

Comments

Quentin Monnet July 4, 2024, 7:21 p.m. UTC | #1
On 04/07/2024 01:15, Andrii Nakryiko wrote:
> Old versions of libbpf don't handle varying sizes of bpf_map_skeleton
> struct correctly. As such, BPF skeleton generated by newest bpftool
> might not be compatible with older libbpf (though only when libbpf is
> used as a shared library), even though it, by design, should.
> 
> Going forward libbpf will be fixed, plus we'll release bug fixed
> versions of relevant old libbpfs, but meanwhile try to mitigate from
> bpftool side by conservatively assuming older and smaller definition of
> bpf_map_skeleton, if possible. Meaning, if there are no struct_ops maps.
> 
> If there are struct_ops, then presumably user would like to have
> auto-attaching logic and struct_ops map link placeholders, so use the
> full bpf_map_skeleton definition in that case.
> 
> Co-developed-by: Mykyta Yatsenko <yatsenko@meta.com>
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>

Note: I don't know to what extent we enforce this, but kernel docs state
that "Since Co-developed-by: denotes authorship, every Co-developed-by:
must be immediately followed by a Signed-off-by: of the associated
co-author". Mykyta's sign-off is missing from both patches.

Other than that, the patch looks good, thanks for fixing bpftool!

Acked-by: Quentin Monnet <qmo@kernel.org>
Eduard Zingerman July 4, 2024, 8:31 p.m. UTC | #2
On Wed, 2024-07-03 at 17:15 -0700, Andrii Nakryiko wrote:
> Old versions of libbpf don't handle varying sizes of bpf_map_skeleton
> struct correctly. As such, BPF skeleton generated by newest bpftool
> might not be compatible with older libbpf (though only when libbpf is
> used as a shared library), even though it, by design, should.
> 
> Going forward libbpf will be fixed, plus we'll release bug fixed
> versions of relevant old libbpfs, but meanwhile try to mitigate from
> bpftool side by conservatively assuming older and smaller definition of
> bpf_map_skeleton, if possible. Meaning, if there are no struct_ops maps.
> 
> If there are struct_ops, then presumably user would like to have
> auto-attaching logic and struct_ops map link placeholders, so use the
> full bpf_map_skeleton definition in that case.
> 
> Co-developed-by: Mykyta Yatsenko <yatsenko@meta.com>
> Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> ---

Silly question, here is a fragment of the profiler.skel.h generated
with bpftool build (tools/bpf/bpftool/profiler.skel.h):

static inline int
profiler_bpf__create_skeleton(struct profiler_bpf *obj)
{
	/* ... */

	map = (struct bpf_map_skeleton *)((char *)s->maps + 0 * s->map_skel_sz);
	map->name = "events";
	map->map = &obj->maps.events;

	/* ... 4 more like this ... */

	/* ... */

	s->progs[0].name = "fentry_XXX";
	s->progs[0].prog = &obj->progs.fentry_XXX;
	s->progs[0].link = &obj->links.fentry_XXX;

	s->progs[1].name = "fexit_XXX";
	s->progs[1].prog = &obj->progs.fexit_XXX;
	s->progs[1].link = &obj->links.fexit_XXX;

	/* ... */
}

Do we need to handle 'progs' array access in a same way as maps?

[...]

> @@ -878,23 +895,22 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool
>  
>  		codegen("\
>  			\n\
> -									\n\
> -				s->maps[%zu].name = \"%s\";	    \n\
> -				s->maps[%zu].map = &obj->maps.%s;   \n\
> +								    \n\
> +				map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
> +				map->name = \"%s\";		    \n\
> +				map->map = &obj->maps.%s;	    \n\
>  			",
> -			i, bpf_map__name(map), i, ident);
> +			i, bpf_map__name(map), ident);
>  		/* memory-mapped internal maps */
>  		if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
> -			printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
> -				i, ident);
> +			printf("\tmap->mmaped = (void **)&obj->%s;  \n", ident);
                                                                  ^^
                                              nit: this generates extra white space
>  		}
>  
>  		if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
>  			codegen("\
>  				\n\
> -					s->maps[%zu].link = &obj->links.%s;\n\
> -				",
> -				i, ident);
> +					map->link = &obj->links.%s; \n\
> +				", ident);
>  		}
>  		i++;
>  	}

[...]
Andrii Nakryiko July 8, 2024, 5:14 p.m. UTC | #3
On Thu, Jul 4, 2024 at 12:21 PM Quentin Monnet <qmo@kernel.org> wrote:
>
> On 04/07/2024 01:15, Andrii Nakryiko wrote:
> > Old versions of libbpf don't handle varying sizes of bpf_map_skeleton
> > struct correctly. As such, BPF skeleton generated by newest bpftool
> > might not be compatible with older libbpf (though only when libbpf is
> > used as a shared library), even though it, by design, should.
> >
> > Going forward libbpf will be fixed, plus we'll release bug fixed
> > versions of relevant old libbpfs, but meanwhile try to mitigate from
> > bpftool side by conservatively assuming older and smaller definition of
> > bpf_map_skeleton, if possible. Meaning, if there are no struct_ops maps.
> >
> > If there are struct_ops, then presumably user would like to have
> > auto-attaching logic and struct_ops map link placeholders, so use the
> > full bpf_map_skeleton definition in that case.
> >
> > Co-developed-by: Mykyta Yatsenko <yatsenko@meta.com>
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
>
> Note: I don't know to what extent we enforce this, but kernel docs state
> that "Since Co-developed-by: denotes authorship, every Co-developed-by:
> must be immediately followed by a Signed-off-by: of the associated
> co-author". Mykyta's sign-off is missing from both patches.
>

Oh, sorry about that. I don't do co-developed-by often, so didn't
realize. I'll add Mykyta's Signed-off-by in v2, thanks

> Other than that, the patch looks good, thanks for fixing bpftool!
>
> Acked-by: Quentin Monnet <qmo@kernel.org>
Andrii Nakryiko July 8, 2024, 5:15 p.m. UTC | #4
On Thu, Jul 4, 2024 at 1:31 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Wed, 2024-07-03 at 17:15 -0700, Andrii Nakryiko wrote:
> > Old versions of libbpf don't handle varying sizes of bpf_map_skeleton
> > struct correctly. As such, BPF skeleton generated by newest bpftool
> > might not be compatible with older libbpf (though only when libbpf is
> > used as a shared library), even though it, by design, should.
> >
> > Going forward libbpf will be fixed, plus we'll release bug fixed
> > versions of relevant old libbpfs, but meanwhile try to mitigate from
> > bpftool side by conservatively assuming older and smaller definition of
> > bpf_map_skeleton, if possible. Meaning, if there are no struct_ops maps.
> >
> > If there are struct_ops, then presumably user would like to have
> > auto-attaching logic and struct_ops map link placeholders, so use the
> > full bpf_map_skeleton definition in that case.
> >
> > Co-developed-by: Mykyta Yatsenko <yatsenko@meta.com>
> > Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
> > ---
>
> Silly question, here is a fragment of the profiler.skel.h generated
> with bpftool build (tools/bpf/bpftool/profiler.skel.h):
>
> static inline int
> profiler_bpf__create_skeleton(struct profiler_bpf *obj)
> {
>         /* ... */
>
>         map = (struct bpf_map_skeleton *)((char *)s->maps + 0 * s->map_skel_sz);
>         map->name = "events";
>         map->map = &obj->maps.events;
>
>         /* ... 4 more like this ... */
>
>         /* ... */
>
>         s->progs[0].name = "fentry_XXX";
>         s->progs[0].prog = &obj->progs.fentry_XXX;
>         s->progs[0].link = &obj->links.fentry_XXX;
>
>         s->progs[1].name = "fexit_XXX";
>         s->progs[1].prog = &obj->progs.fexit_XXX;
>         s->progs[1].link = &obj->links.fexit_XXX;
>
>         /* ... */
> }
>
> Do we need to handle 'progs' array access in a same way as maps?

Given bpf_prog_skeleton has never been extended yet (and maybe never
will be), I chose not to uglify this unnecessarily. My thinking/hope
is that by the time we get to extending prog_skeleton struct, all
actively used libbpf versions will be patched up and will handle this
correctly without the hacks we have to do for map_skeleton.


>
> [...]
>
> > @@ -878,23 +895,22 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool
> >
> >               codegen("\
> >                       \n\
> > -                                                                     \n\
> > -                             s->maps[%zu].name = \"%s\";         \n\
> > -                             s->maps[%zu].map = &obj->maps.%s;   \n\
> > +                                                                 \n\
> > +                             map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
> > +                             map->name = \"%s\";                 \n\
> > +                             map->map = &obj->maps.%s;           \n\
> >                       ",
> > -                     i, bpf_map__name(map), i, ident);
> > +                     i, bpf_map__name(map), ident);
> >               /* memory-mapped internal maps */
> >               if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
> > -                     printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
> > -                             i, ident);
> > +                     printf("\tmap->mmaped = (void **)&obj->%s;  \n", ident);
>                                                                   ^^
>                                               nit: this generates extra white space
> >               }
> >
> >               if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
> >                       codegen("\
> >                               \n\
> > -                                     s->maps[%zu].link = &obj->links.%s;\n\
> > -                             ",
> > -                             i, ident);
> > +                                     map->link = &obj->links.%s; \n\
> > +                             ", ident);
> >               }
> >               i++;
> >       }
>
> [...]
Eduard Zingerman July 8, 2024, 5:53 p.m. UTC | #5
On Mon, 2024-07-08 at 10:15 -0700, Andrii Nakryiko wrote:

[...]

> > static inline int
> > profiler_bpf__create_skeleton(struct profiler_bpf *obj)
> > {
> >         /* ... */
> > 
> >         map = (struct bpf_map_skeleton *)((char *)s->maps + 0 * s->map_skel_sz);
> >         map->name = "events";
> >         map->map = &obj->maps.events;
> > 
> >         /* ... 4 more like this ... */
> > 
> >         /* ... */
> > 
> >         s->progs[0].name = "fentry_XXX";
> >         s->progs[0].prog = &obj->progs.fentry_XXX;
> >         s->progs[0].link = &obj->links.fentry_XXX;
> > 
> >         s->progs[1].name = "fexit_XXX";
> >         s->progs[1].prog = &obj->progs.fexit_XXX;
> >         s->progs[1].link = &obj->links.fexit_XXX;
> > 
> >         /* ... */
> > }
> > 
> > Do we need to handle 'progs' array access in a same way as maps?
> 
> Given bpf_prog_skeleton has never been extended yet (and maybe never
> will be), I chose not to uglify this unnecessarily. My thinking/hope
> is that by the time we get to extending prog_skeleton struct, all
> actively used libbpf versions will be patched up and will handle this
> correctly without the hacks we have to do for map_skeleton.

Understood, fair enough.

[...]
diff mbox series

Patch

diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index 51eaed76db97..70aaa32ddcc9 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -852,24 +852,41 @@  codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool
 {
 	struct bpf_map *map;
 	char ident[256];
-	size_t i;
+	size_t i, map_sz;
 
 	if (!map_cnt)
 		return;
 
+	/* for backward compatibility with old libbpf versions that don't
+	 * handle new BPF skeleton with new struct bpf_map_skeleton definition
+	 * that includes link field, avoid specifying new increased size,
+	 * unless we absolutely have to (i.e., if there are struct_ops maps
+	 * present)
+	 */
+	map_sz = offsetof(struct bpf_map_skeleton, link);
+	if (populate_links) {
+		bpf_object__for_each_map(map, obj) {
+			if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
+				map_sz = sizeof(struct bpf_map_skeleton);
+				break;
+			}
+		}
+	}
+
 	codegen("\
 		\n\
-									\n\
+								    \n\
 			/* maps */				    \n\
 			s->map_cnt = %zu;			    \n\
-			s->map_skel_sz = sizeof(*s->maps);	    \n\
-			s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
+			s->map_skel_sz = %zu;			    \n\
+			s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\
+					sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\
 			if (!s->maps) {				    \n\
 				err = -ENOMEM;			    \n\
 				goto err;			    \n\
 			}					    \n\
 		",
-		map_cnt
+		map_cnt, map_sz, map_sz, map_sz
 	);
 	i = 0;
 	bpf_object__for_each_map(map, obj) {
@@ -878,23 +895,22 @@  codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool
 
 		codegen("\
 			\n\
-									\n\
-				s->maps[%zu].name = \"%s\";	    \n\
-				s->maps[%zu].map = &obj->maps.%s;   \n\
+								    \n\
+				map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
+				map->name = \"%s\";		    \n\
+				map->map = &obj->maps.%s;	    \n\
 			",
-			i, bpf_map__name(map), i, ident);
+			i, bpf_map__name(map), ident);
 		/* memory-mapped internal maps */
 		if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
-			printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
-				i, ident);
+			printf("\tmap->mmaped = (void **)&obj->%s;  \n", ident);
 		}
 
 		if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
 			codegen("\
 				\n\
-					s->maps[%zu].link = &obj->links.%s;\n\
-				",
-				i, ident);
+					map->link = &obj->links.%s; \n\
+				", ident);
 		}
 		i++;
 	}
@@ -1463,6 +1479,7 @@  static int do_skeleton(int argc, char **argv)
 		%1$s__create_skeleton(struct %1$s *obj)			    \n\
 		{							    \n\
 			struct bpf_object_skeleton *s;			    \n\
+			struct bpf_map_skeleton *map __attribute__((unused));\n\
 			int err;					    \n\
 									    \n\
 			s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
@@ -1753,6 +1770,7 @@  static int do_subskeleton(int argc, char **argv)
 		{							    \n\
 			struct %1$s *obj;				    \n\
 			struct bpf_object_subskeleton *s;		    \n\
+			struct bpf_map_skeleton *map __attribute__((unused));\n\
 			int err;					    \n\
 									    \n\
 			obj = (struct %1$s *)calloc(1, sizeof(*obj));	    \n\