diff mbox series

[bpf-next,2/3] libbpf: move global data mmap()'ing into bpf_object__load()

Message ID 20241023043908.3834423-3-andrii@kernel.org (mailing list archive)
State Accepted
Commit 137978f422516a128326df55c0ba23605f925e21
Delegated to: BPF
Headers show
Series Fix libbpf's bpf_object and BPF subskel interoperability | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 5 this patch: 5
netdev/build_tools success Errors and warnings before: 4 (+1) this patch: 4 (+1)
netdev/cc_maintainers fail 1 blamed authors not CCed: martin.lau@linux.dev; 9 maintainers not CCed: song@kernel.org haoluo@google.com john.fastabend@gmail.com sdf@fomichev.me martin.lau@linux.dev kpsingh@kernel.org yonghong.song@linux.dev eddyz87@gmail.com jolsa@kernel.org
netdev/build_clang success Errors and warnings before: 5 this patch: 5
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 3 this patch: 3
netdev/checkpatch warning WARNING: line length of 94 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-17 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-18 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18

Commit Message

Andrii Nakryiko Oct. 23, 2024, 4:39 a.m. UTC
Since BPF skeleton inception libbpf has been doing mmap()'ing of global
data ARRAY maps in bpf_object__load_skeleton() API, which is used by
code generated .skel.h files (i.e., by BPF skeletons only).

This is wrong because if BPF object is loaded through generic
bpf_object__load() API, global data maps won't be re-mmap()'ed after
load step, and memory pointers returned from bpf_map__initial_value()
would be wrong and won't reflect the actual memory shared between BPF
program and user space.

bpf_map__initial_value() return result is rarely used after load, so
this went unnoticed for a really long time, until bpftrace project
attempted to load BPF object through generic bpf_object__load() API and
then used BPF subskeleton instantiated from such bpf_object. It turned
out that .data/.rodata/.bss data updates through such subskeleton was
"blackholed", all because libbpf wouldn't re-mmap() those maps during
bpf_object__load() phase.

Long story short, this step should be done by libbpf regardless of BPF
skeleton usage, right after BPF map is created in the kernel. This patch
moves this functionality into bpf_object__populate_internal_map() to
achieve this. And bpf_object__load_skeleton() is now simple and almost
trivial, only propagating these mmap()'ed pointers into user-supplied
skeleton structs.

We also do trivial adjustments to error reporting inside
bpf_object__populate_internal_map() for consistency with the rest of
libbpf's map-handling code.

Reported-by: Alastair Robertson <ajor@meta.com>
Reported-by: Jonathan Wiepert <jwiepert@meta.com>
Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support")
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
---
 tools/lib/bpf/libbpf.c | 83 ++++++++++++++++++++----------------------
 1 file changed, 40 insertions(+), 43 deletions(-)

Comments

Jiri Olsa Oct. 23, 2024, 12:54 p.m. UTC | #1
On Tue, Oct 22, 2024 at 09:39:07PM -0700, Andrii Nakryiko wrote:

SNIP

> @@ -5146,11 +5147,43 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
>  		if (err) {
>  			err = -errno;
>  			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
> -			pr_warn("Error freezing map(%s) as read-only: %s\n",
> -				map->name, cp);
> +			pr_warn("map '%s': failed to freeze as read-only: %s\n",
> +				bpf_map__name(map), cp);
>  			return err;
>  		}
>  	}
> +
> +	/* Remap anonymous mmap()-ed "map initialization image" as
> +	 * a BPF map-backed mmap()-ed memory, but preserving the same
> +	 * memory address. This will cause kernel to change process'
> +	 * page table to point to a different piece of kernel memory,
> +	 * but from userspace point of view memory address (and its
> +	 * contents, being identical at this point) will stay the
> +	 * same. This mapping will be released by bpf_object__close()
> +	 * as per normal clean up procedure.
> +	 */
> +	mmap_sz = bpf_map_mmap_sz(map);
> +	if (map->def.map_flags & BPF_F_MMAPABLE) {
> +		void *mmaped;
> +		int prot;
> +
> +		if (map->def.map_flags & BPF_F_RDONLY_PROG)
> +			prot = PROT_READ;
> +		else
> +			prot = PROT_READ | PROT_WRITE;
> +		mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
> +		if (mmaped == MAP_FAILED) {
> +			err = -errno;
> +			pr_warn("map '%s': failed to re-mmap() contents: %d\n",
> +				bpf_map__name(map), err);
> +			return err;
> +		}
> +		map->mmaped = mmaped;
> +	} else if (map->mmaped) {
> +		munmap(map->mmaped, mmap_sz);
> +		map->mmaped = NULL;
> +	}

this caught my eye because we did not do that in bpf_object__load_skeleton,
makes sense, but why do we mmap *!*BPF_F_MMAPABLE maps in the first place?

jirka

> +
>  	return 0;
>  }
>  
> @@ -5467,8 +5500,7 @@ bpf_object__create_maps(struct bpf_object *obj)
>  				err = bpf_object__populate_internal_map(obj, map);
>  				if (err < 0)
>  					goto err_out;
> -			}
> -			if (map->def.type == BPF_MAP_TYPE_ARENA) {
> +			} else if (map->def.type == BPF_MAP_TYPE_ARENA) {
>  				map->mmaped = mmap((void *)(long)map->map_extra,
>  						   bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
>  						   map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
> @@ -13916,46 +13948,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
>  	for (i = 0; i < s->map_cnt; i++) {
>  		struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
>  		struct bpf_map *map = *map_skel->map;
> -		size_t mmap_sz = bpf_map_mmap_sz(map);
> -		int prot, map_fd = map->fd;
> -		void **mmaped = map_skel->mmaped;
> -
> -		if (!mmaped)
> -			continue;
> -
> -		if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
> -			*mmaped = NULL;
> -			continue;
> -		}
>  
> -		if (map->def.type == BPF_MAP_TYPE_ARENA) {
> -			*mmaped = map->mmaped;
> +		if (!map_skel->mmaped)
>  			continue;
> -		}
> -
> -		if (map->def.map_flags & BPF_F_RDONLY_PROG)
> -			prot = PROT_READ;
> -		else
> -			prot = PROT_READ | PROT_WRITE;
>  
> -		/* Remap anonymous mmap()-ed "map initialization image" as
> -		 * a BPF map-backed mmap()-ed memory, but preserving the same
> -		 * memory address. This will cause kernel to change process'
> -		 * page table to point to a different piece of kernel memory,
> -		 * but from userspace point of view memory address (and its
> -		 * contents, being identical at this point) will stay the
> -		 * same. This mapping will be released by bpf_object__close()
> -		 * as per normal clean up procedure, so we don't need to worry
> -		 * about it from skeleton's clean up perspective.
> -		 */
> -		*mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
> -		if (*mmaped == MAP_FAILED) {
> -			err = -errno;
> -			*mmaped = NULL;
> -			pr_warn("failed to re-mmap() map '%s': %d\n",
> -				 bpf_map__name(map), err);
> -			return libbpf_err(err);
> -		}
> +		*map_skel->mmaped = map->mmaped;
>  	}
>  
>  	return 0;
> -- 
> 2.43.5
> 
>
Andrii Nakryiko Oct. 23, 2024, 3:59 p.m. UTC | #2
On Wed, Oct 23, 2024 at 5:54 AM Jiri Olsa <olsajiri@gmail.com> wrote:
>
> On Tue, Oct 22, 2024 at 09:39:07PM -0700, Andrii Nakryiko wrote:
>
> SNIP
>
> > @@ -5146,11 +5147,43 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
> >               if (err) {
> >                       err = -errno;
> >                       cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
> > -                     pr_warn("Error freezing map(%s) as read-only: %s\n",
> > -                             map->name, cp);
> > +                     pr_warn("map '%s': failed to freeze as read-only: %s\n",
> > +                             bpf_map__name(map), cp);
> >                       return err;
> >               }
> >       }
> > +
> > +     /* Remap anonymous mmap()-ed "map initialization image" as
> > +      * a BPF map-backed mmap()-ed memory, but preserving the same
> > +      * memory address. This will cause kernel to change process'
> > +      * page table to point to a different piece of kernel memory,
> > +      * but from userspace point of view memory address (and its
> > +      * contents, being identical at this point) will stay the
> > +      * same. This mapping will be released by bpf_object__close()
> > +      * as per normal clean up procedure.
> > +      */
> > +     mmap_sz = bpf_map_mmap_sz(map);
> > +     if (map->def.map_flags & BPF_F_MMAPABLE) {
> > +             void *mmaped;
> > +             int prot;
> > +
> > +             if (map->def.map_flags & BPF_F_RDONLY_PROG)
> > +                     prot = PROT_READ;
> > +             else
> > +                     prot = PROT_READ | PROT_WRITE;
> > +             mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
> > +             if (mmaped == MAP_FAILED) {
> > +                     err = -errno;
> > +                     pr_warn("map '%s': failed to re-mmap() contents: %d\n",
> > +                             bpf_map__name(map), err);
> > +                     return err;
> > +             }
> > +             map->mmaped = mmaped;
> > +     } else if (map->mmaped) {
> > +             munmap(map->mmaped, mmap_sz);
> > +             map->mmaped = NULL;
> > +     }
>
> this caught my eye because we did not do that in bpf_object__load_skeleton,
> makes sense, but why do we mmap *!*BPF_F_MMAPABLE maps in the first place?

The initial mmap(ANONYMOUS) is basically malloc(), but it works
uniformly for both BPF_F_MMAPABLE global data arrays, and non-mmapable
ones. Just a streamlining and thus simplification.

>
> jirka
>
> > +
> >       return 0;
> >  }
> >
> > @@ -5467,8 +5500,7 @@ bpf_object__create_maps(struct bpf_object *obj)
> >                               err = bpf_object__populate_internal_map(obj, map);
> >                               if (err < 0)
> >                                       goto err_out;
> > -                     }
> > -                     if (map->def.type == BPF_MAP_TYPE_ARENA) {
> > +                     } else if (map->def.type == BPF_MAP_TYPE_ARENA) {
> >                               map->mmaped = mmap((void *)(long)map->map_extra,
> >                                                  bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
> >                                                  map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
> > @@ -13916,46 +13948,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
> >       for (i = 0; i < s->map_cnt; i++) {
> >               struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
> >               struct bpf_map *map = *map_skel->map;
> > -             size_t mmap_sz = bpf_map_mmap_sz(map);
> > -             int prot, map_fd = map->fd;
> > -             void **mmaped = map_skel->mmaped;
> > -
> > -             if (!mmaped)
> > -                     continue;
> > -
> > -             if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
> > -                     *mmaped = NULL;
> > -                     continue;
> > -             }
> >
> > -             if (map->def.type == BPF_MAP_TYPE_ARENA) {
> > -                     *mmaped = map->mmaped;
> > +             if (!map_skel->mmaped)
> >                       continue;
> > -             }
> > -
> > -             if (map->def.map_flags & BPF_F_RDONLY_PROG)
> > -                     prot = PROT_READ;
> > -             else
> > -                     prot = PROT_READ | PROT_WRITE;
> >
> > -             /* Remap anonymous mmap()-ed "map initialization image" as
> > -              * a BPF map-backed mmap()-ed memory, but preserving the same
> > -              * memory address. This will cause kernel to change process'
> > -              * page table to point to a different piece of kernel memory,
> > -              * but from userspace point of view memory address (and its
> > -              * contents, being identical at this point) will stay the
> > -              * same. This mapping will be released by bpf_object__close()
> > -              * as per normal clean up procedure, so we don't need to worry
> > -              * about it from skeleton's clean up perspective.
> > -              */
> > -             *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
> > -             if (*mmaped == MAP_FAILED) {
> > -                     err = -errno;
> > -                     *mmaped = NULL;
> > -                     pr_warn("failed to re-mmap() map '%s': %d\n",
> > -                              bpf_map__name(map), err);
> > -                     return libbpf_err(err);
> > -             }
> > +             *map_skel->mmaped = map->mmaped;
> >       }
> >
> >       return 0;
> > --
> > 2.43.5
> >
> >
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 7c40286c3948..711173acbcef 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -5122,6 +5122,7 @@  bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
 	enum libbpf_map_type map_type = map->libbpf_type;
 	char *cp, errmsg[STRERR_BUFSIZE];
 	int err, zero = 0;
+	size_t mmap_sz;
 
 	if (obj->gen_loader) {
 		bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
@@ -5135,8 +5136,8 @@  bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
 	if (err) {
 		err = -errno;
 		cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
-		pr_warn("Error setting initial map(%s) contents: %s\n",
-			map->name, cp);
+		pr_warn("map '%s': failed to set initial contents: %s\n",
+			bpf_map__name(map), cp);
 		return err;
 	}
 
@@ -5146,11 +5147,43 @@  bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
 		if (err) {
 			err = -errno;
 			cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
-			pr_warn("Error freezing map(%s) as read-only: %s\n",
-				map->name, cp);
+			pr_warn("map '%s': failed to freeze as read-only: %s\n",
+				bpf_map__name(map), cp);
 			return err;
 		}
 	}
+
+	/* Remap anonymous mmap()-ed "map initialization image" as
+	 * a BPF map-backed mmap()-ed memory, but preserving the same
+	 * memory address. This will cause kernel to change process'
+	 * page table to point to a different piece of kernel memory,
+	 * but from userspace point of view memory address (and its
+	 * contents, being identical at this point) will stay the
+	 * same. This mapping will be released by bpf_object__close()
+	 * as per normal clean up procedure.
+	 */
+	mmap_sz = bpf_map_mmap_sz(map);
+	if (map->def.map_flags & BPF_F_MMAPABLE) {
+		void *mmaped;
+		int prot;
+
+		if (map->def.map_flags & BPF_F_RDONLY_PROG)
+			prot = PROT_READ;
+		else
+			prot = PROT_READ | PROT_WRITE;
+		mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map->fd, 0);
+		if (mmaped == MAP_FAILED) {
+			err = -errno;
+			pr_warn("map '%s': failed to re-mmap() contents: %d\n",
+				bpf_map__name(map), err);
+			return err;
+		}
+		map->mmaped = mmaped;
+	} else if (map->mmaped) {
+		munmap(map->mmaped, mmap_sz);
+		map->mmaped = NULL;
+	}
+
 	return 0;
 }
 
@@ -5467,8 +5500,7 @@  bpf_object__create_maps(struct bpf_object *obj)
 				err = bpf_object__populate_internal_map(obj, map);
 				if (err < 0)
 					goto err_out;
-			}
-			if (map->def.type == BPF_MAP_TYPE_ARENA) {
+			} else if (map->def.type == BPF_MAP_TYPE_ARENA) {
 				map->mmaped = mmap((void *)(long)map->map_extra,
 						   bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
 						   map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
@@ -13916,46 +13948,11 @@  int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
 	for (i = 0; i < s->map_cnt; i++) {
 		struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
 		struct bpf_map *map = *map_skel->map;
-		size_t mmap_sz = bpf_map_mmap_sz(map);
-		int prot, map_fd = map->fd;
-		void **mmaped = map_skel->mmaped;
-
-		if (!mmaped)
-			continue;
-
-		if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
-			*mmaped = NULL;
-			continue;
-		}
 
-		if (map->def.type == BPF_MAP_TYPE_ARENA) {
-			*mmaped = map->mmaped;
+		if (!map_skel->mmaped)
 			continue;
-		}
-
-		if (map->def.map_flags & BPF_F_RDONLY_PROG)
-			prot = PROT_READ;
-		else
-			prot = PROT_READ | PROT_WRITE;
 
-		/* Remap anonymous mmap()-ed "map initialization image" as
-		 * a BPF map-backed mmap()-ed memory, but preserving the same
-		 * memory address. This will cause kernel to change process'
-		 * page table to point to a different piece of kernel memory,
-		 * but from userspace point of view memory address (and its
-		 * contents, being identical at this point) will stay the
-		 * same. This mapping will be released by bpf_object__close()
-		 * as per normal clean up procedure, so we don't need to worry
-		 * about it from skeleton's clean up perspective.
-		 */
-		*mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
-		if (*mmaped == MAP_FAILED) {
-			err = -errno;
-			*mmaped = NULL;
-			pr_warn("failed to re-mmap() map '%s': %d\n",
-				 bpf_map__name(map), err);
-			return libbpf_err(err);
-		}
+		*map_skel->mmaped = map->mmaped;
 	}
 
 	return 0;