diff mbox series

[bpf-next,2/3] libbpf: split bpf object load into prepare/load

Message ID 20250228175255.254009-3-mykyta.yatsenko5@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Introduce bpf_object__prepare | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-gcc / veristat-meta / x86_64-gcc veristat_meta
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-19 success Logs for s390x-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-20 success Logs for s390x-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-12 success Logs for aarch64-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-gcc / veristat-kernel / x86_64-gcc veristat_kernel
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-11 success Logs for aarch64-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-17 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-17 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-43 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-44 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-45 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-50 success Logs for x86_64-llvm-18 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-49 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-51 success Logs for x86_64-llvm-18 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-46 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-47 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-48 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success Errors and warnings before: 26 (+1) this patch: 26 (+1)
netdev/cc_maintainers warning 8 maintainers not CCed: song@kernel.org kpsingh@kernel.org john.fastabend@gmail.com jolsa@kernel.org yonghong.song@linux.dev haoluo@google.com sdf@fomichev.me martin.lau@linux.dev
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 94 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc fail Errors and warnings before: 264 this patch: 268
netdev/source_inline success Was 0 now: 0

Commit Message

Mykyta Yatsenko Feb. 28, 2025, 5:52 p.m. UTC
From: Mykyta Yatsenko <yatsenko@meta.com>

Introduce bpf_object__prepare API: additional intermediate step,
executing all steps that bpf_object__load is running before the actual
loading of BPF programs.

Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
---
 tools/lib/bpf/libbpf.c   | 161 +++++++++++++++++++++++++++------------
 tools/lib/bpf/libbpf.h   |   9 +++
 tools/lib/bpf/libbpf.map |   1 +
 3 files changed, 121 insertions(+), 50 deletions(-)

Comments

Andrii Nakryiko Feb. 28, 2025, 10:31 p.m. UTC | #1
On Fri, Feb 28, 2025 at 9:53 AM Mykyta Yatsenko
<mykyta.yatsenko5@gmail.com> wrote:
>
> From: Mykyta Yatsenko <yatsenko@meta.com>
>
> Introduce bpf_object__prepare API: additional intermediate step,
> executing all steps that bpf_object__load is running before the actual
> loading of BPF programs.
>
> Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
> ---
>  tools/lib/bpf/libbpf.c   | 161 +++++++++++++++++++++++++++------------
>  tools/lib/bpf/libbpf.h   |   9 +++
>  tools/lib/bpf/libbpf.map |   1 +
>  3 files changed, 121 insertions(+), 50 deletions(-)
>
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 9ced1ce2334c..dd2f64903c3b 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -4858,7 +4858,7 @@ bool bpf_map__autocreate(const struct bpf_map *map)
>
>  int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
>  {
> -       if (map->obj->state >= OBJ_LOADED)
> +       if (map->obj->state >= OBJ_PREPARED)

ah, ok, so you've decided to change that in this patch... that works
as well, I guess

>                 return libbpf_err(-EBUSY);
>
>         map->autocreate = autocreate;
> @@ -4952,7 +4952,7 @@ struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
>
>  int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
>  {
> -       if (map->obj->state >= OBJ_LOADED)
> +       if (map->obj->state >= OBJ_PREPARED)
>                 return libbpf_err(-EBUSY);
>
>         map->def.max_entries = max_entries;
> @@ -5199,7 +5199,7 @@ static void bpf_map__destroy(struct bpf_map *map);
>
>  static bool map_is_created(const struct bpf_map *map)

we should use this helper in bpf_map__set_max_entries() and other
setters like that. We better error out when someone is trying to set
different value size for the map that was explicitly set from FD with
bpf_map__reuse_fd()... Can you please add a patch that would fix this
up (before all this OBJ_PREPARED refactoring)?

>  {
> -       return map->obj->state >= OBJ_LOADED || map->reused;
> +       return map->obj->state >= OBJ_PREPARED || map->reused;
>  }
>
>  static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
> @@ -7901,13 +7901,6 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
>         size_t i;
>         int err;
>
> -       for (i = 0; i < obj->nr_programs; i++) {
> -               prog = &obj->programs[i];
> -               err = bpf_object__sanitize_prog(obj, prog);
> -               if (err)
> -                       return err;
> -       }
> -
>         for (i = 0; i < obj->nr_programs; i++) {
>                 prog = &obj->programs[i];
>                 if (prog_is_subprog(obj, prog))
> @@ -7933,6 +7926,21 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
>         return 0;
>  }
>
> +static int bpf_object_prepare_progs(struct bpf_object *obj)
> +{
> +       struct bpf_program *prog;
> +       size_t i;
> +       int err;
> +
> +       for (i = 0; i < obj->nr_programs; i++) {
> +               prog = &obj->programs[i];
> +               err = bpf_object__sanitize_prog(obj, prog);
> +               if (err)
> +                       return err;
> +       }
> +       return 0;
> +}
> +
>  static const struct bpf_sec_def *find_sec_def(const char *sec_name);
>
>  static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
> @@ -8549,9 +8557,72 @@ static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
>         return 0;
>  }
>
> +static void bpf_object_unpin(struct bpf_object *obj)
> +{
> +       int i;
> +
> +       /* unpin any maps that were auto-pinned during load */
> +       for (i = 0; i < obj->nr_maps; i++)
> +               if (obj->maps[i].pinned && !obj->maps[i].reused)
> +                       bpf_map__unpin(&obj->maps[i], NULL);
> +}
> +
> +static void bpf_object_post_load_cleanup(struct bpf_object *obj)
> +{
> +       int i;
> +
> +       /* clean up fd_array */
> +       zfree(&obj->fd_array);
> +
> +       /* clean up module BTFs */
> +       for (i = 0; i < obj->btf_module_cnt; i++) {
> +               close(obj->btf_modules[i].fd);
> +               btf__free(obj->btf_modules[i].btf);
> +               free(obj->btf_modules[i].name);
> +       }
> +       free(obj->btf_modules);
> +
> +       /* clean up vmlinux BTF */
> +       btf__free(obj->btf_vmlinux);
> +       obj->btf_vmlinux = NULL;
> +}
> +
> +static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path)
> +{
> +       int err;
> +
> +       if (!obj)
> +               return -EINVAL;

meh, drop this please, can't be NULL

> +
> +       if (obj->state >= OBJ_PREPARED) {
> +               pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name);
> +               return -EINVAL;
> +       }
> +
> +       err = bpf_object_prepare_token(obj);
> +       err = err ? : bpf_object__probe_loading(obj);
> +       err = err ? : bpf_object__load_vmlinux_btf(obj, false);
> +       err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
> +       err = err ? : bpf_object__sanitize_maps(obj);
> +       err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
> +       err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
> +       err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
> +       err = err ? : bpf_object__sanitize_and_load_btf(obj);
> +       err = err ? : bpf_object__create_maps(obj);
> +       err = err ? : bpf_object_prepare_progs(obj);
> +       obj->state = OBJ_PREPARED;
> +
> +       if (err) {
> +               bpf_object_unpin(obj);
> +               bpf_object_unload(obj);
> +               return err;
> +       }
> +       return 0;
> +}
> +
>  static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
>  {
> -       int err, i;
> +       int err;
>
>         if (!obj)
>                 return libbpf_err(-EINVAL);
> @@ -8571,17 +8642,12 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
>                 return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
>         }
>
> -       err = bpf_object_prepare_token(obj);
> -       err = err ? : bpf_object__probe_loading(obj);
> -       err = err ? : bpf_object__load_vmlinux_btf(obj, false);
> -       err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
> -       err = err ? : bpf_object__sanitize_maps(obj);
> -       err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
> -       err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
> -       err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
> -       err = err ? : bpf_object__sanitize_and_load_btf(obj);
> -       err = err ? : bpf_object__create_maps(obj);
> -       err = err ? : bpf_object__load_progs(obj, extra_log_level);
> +       if (obj->state < OBJ_PREPARED) {
> +               err = bpf_object_prepare(obj, target_btf_path);
> +               if (err)
> +                       return libbpf_err(err);
> +       }
> +       err = bpf_object__load_progs(obj, extra_log_level);
>         err = err ? : bpf_object_init_prog_arrays(obj);
>         err = err ? : bpf_object_prepare_struct_ops(obj);
>
> @@ -8593,35 +8659,22 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
>                         err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
>         }
>
> -       /* clean up fd_array */
> -       zfree(&obj->fd_array);
> +       bpf_object_post_load_cleanup(obj);
> +       obj->state = OBJ_LOADED;/* doesn't matter if successfully or not */
>
> -       /* clean up module BTFs */
> -       for (i = 0; i < obj->btf_module_cnt; i++) {
> -               close(obj->btf_modules[i].fd);
> -               btf__free(obj->btf_modules[i].btf);
> -               free(obj->btf_modules[i].name);
> +       if (err) {
> +               bpf_object_unpin(obj);
> +               bpf_object_unload(obj);
> +               pr_warn("failed to load object '%s'\n", obj->path);
> +               return libbpf_err(err);
>         }
> -       free(obj->btf_modules);
> -
> -       /* clean up vmlinux BTF */
> -       btf__free(obj->btf_vmlinux);
> -       obj->btf_vmlinux = NULL;
> -
> -       obj->state = OBJ_LOADED;/* doesn't matter if successfully or not */
> -       if (err)
> -               goto out;
>
>         return 0;
> -out:
> -       /* unpin any maps that were auto-pinned during load */
> -       for (i = 0; i < obj->nr_maps; i++)
> -               if (obj->maps[i].pinned && !obj->maps[i].reused)
> -                       bpf_map__unpin(&obj->maps[i], NULL);
> +}
>
> -       bpf_object_unload(obj);
> -       pr_warn("failed to load object '%s'\n", obj->path);
> -       return libbpf_err(err);
> +int bpf_object__prepare(struct bpf_object *obj)
> +{
> +       return libbpf_err(bpf_object_prepare(obj, NULL));
>  }
>
>  int bpf_object__load(struct bpf_object *obj)
> @@ -8871,8 +8924,8 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
>         if (!obj)
>                 return libbpf_err(-ENOENT);
>
> -       if (obj->state < OBJ_LOADED) {
> -               pr_warn("object not yet loaded; load it first\n");
> +       if (obj->state < OBJ_PREPARED) {
> +               pr_warn("object not yet loaded/prepared; load/prepare it first\n");

let's keep the original simpler message, this is way too much of
either/or. Most users will never care about bpf_object__prepare()
anyways.

>                 return libbpf_err(-ENOENT);
>         }
>
> @@ -9069,6 +9122,14 @@ void bpf_object__close(struct bpf_object *obj)
>         if (IS_ERR_OR_NULL(obj))
>                 return;
>
> +       /*
> +        * if user called bpf_object__prepare() without ever getting to
> +        * bpf_object__load(), we need to clean up stuff that is normally
> +        * cleaned up at the end of loading step
> +        */
> +       if (obj->state == OBJ_PREPARED)
> +               bpf_object_post_load_cleanup(obj);
> +

let's make bpf_object_post_load_cleanup() idempotent, so calling it
multiple times won't hurt, it should be pretty simple (just set
obj->btf_module_cnt to zero)

>         usdt_manager_free(obj->usdt_man);
>         obj->usdt_man = NULL;
>
> @@ -10304,7 +10365,7 @@ static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
>
>  int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
>  {
> -       if (map->obj->state >= OBJ_LOADED || map->reused)
> +       if (map->obj->state >= OBJ_PREPARED || map->reused)
>                 return libbpf_err(-EBUSY);
>
>         if (map->mmaped) {
> @@ -10350,7 +10411,7 @@ int bpf_map__set_initial_value(struct bpf_map *map,
>  {
>         size_t actual_sz;
>
> -       if (map->obj->state >= OBJ_LOADED || map->reused)
> +       if (map->obj->state >= OBJ_PREPARED || map->reused)
>                 return libbpf_err(-EBUSY);
>
>         if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
> diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
> index 3020ee45303a..09e87998c64e 100644
> --- a/tools/lib/bpf/libbpf.h
> +++ b/tools/lib/bpf/libbpf.h
> @@ -241,6 +241,15 @@ LIBBPF_API struct bpf_object *
>  bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
>                      const struct bpf_object_open_opts *opts);
>
> +/**
> + * @brief **bpf_object__prepare()** prepares BPF object for loading.

a bit too brief. Please add that preparation step performs ELF
processing, relocations, prepares final state of BPF program
instructions (accessible with bpf_program__insns()), creates and
(potentially) pins maps, and stops short of loading BPF programs.

This is an important aspect that veristat and others will be relying
on, so it should be documented properly

> + * @param obj Pointer to a valid BPF object instance returned by
> + * **bpf_object__open*()** API
> + * @return 0, on success; negative error code, otherwise, error code is
> + * stored in errno
> + */
> +int bpf_object__prepare(struct bpf_object *obj);
> +
>  /**
>   * @brief **bpf_object__load()** loads BPF object into kernel.
>   * @param obj Pointer to a valid BPF object instance returned by
> diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
> index b5a838de6f47..22edde0bf85e 100644
> --- a/tools/lib/bpf/libbpf.map
> +++ b/tools/lib/bpf/libbpf.map
> @@ -438,4 +438,5 @@ LIBBPF_1.6.0 {
>                 bpf_linker__new_fd;
>                 btf__add_decl_attr;
>                 btf__add_type_attr;
> +               bpf_object__prepare;

this is sorted list

pw-bot: cr

>  } LIBBPF_1.5.0;
> --
> 2.48.1
>
Eduard Zingerman March 1, 2025, 8:12 a.m. UTC | #2
On Fri, 2025-02-28 at 17:52 +0000, Mykyta Yatsenko wrote:

[...]

> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 9ced1ce2334c..dd2f64903c3b 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -4858,7 +4858,7 @@ bool bpf_map__autocreate(const struct bpf_map *map)
>  
>  int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
>  {
> -	if (map->obj->state >= OBJ_LOADED)
> +	if (map->obj->state >= OBJ_PREPARED)
>  		return libbpf_err(-EBUSY);

I looked through logic in patches #1 and #2 and changes look correct.
Running tests under valgrind does not show issues with this feature.
The only ask from my side is to consider doing ==/!= comparisons in
cases like above. E.g. it seems that `map->obj->state != OBJ_OPENED`
is a bit simpler to understand when reading condition above.
Or maybe that's just me.

>  	map->autocreate = autocreate;

[...]
Mykyta Yatsenko March 1, 2025, 9:45 p.m. UTC | #3
On 01/03/2025 08:12, Eduard Zingerman wrote:
> On Fri, 2025-02-28 at 17:52 +0000, Mykyta Yatsenko wrote:
>
> [...]
>
>> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
>> index 9ced1ce2334c..dd2f64903c3b 100644
>> --- a/tools/lib/bpf/libbpf.c
>> +++ b/tools/lib/bpf/libbpf.c
>> @@ -4858,7 +4858,7 @@ bool bpf_map__autocreate(const struct bpf_map *map)
>>   
>>   int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
>>   {
>> -	if (map->obj->state >= OBJ_LOADED)
>> +	if (map->obj->state >= OBJ_PREPARED)
>>   		return libbpf_err(-EBUSY);
> I looked through logic in patches #1 and #2 and changes look correct.
> Running tests under valgrind does not show issues with this feature.
> The only ask from my side is to consider doing ==/!= comparisons in
> cases like above. E.g. it seems that `map->obj->state != OBJ_OPENED`
> is a bit simpler to understand when reading condition above.
> Or maybe that's just me.
I'm not sure about this one.  >= or < checks for state relative to 
operand more
flexibly,for example `map->obj->state >= OBJ_PREPARED` is read as
"is the object in at least PREPARED state". Perhaps, if we add more states,
these >,< checks will not require any changes, while ==, != may.
I guess this also depends on what we actually want to check here, is it that
state at least PREPARED or the state is not initial OPENED.
Not a strong opinion, though, happy to flip code to ==, !=.
>   
>>   	map->autocreate = autocreate;
> [...]
>
Andrii Nakryiko March 3, 2025, 9:38 p.m. UTC | #4
On Sat, Mar 1, 2025 at 1:45 PM Mykyta Yatsenko
<mykyta.yatsenko5@gmail.com> wrote:
>
> On 01/03/2025 08:12, Eduard Zingerman wrote:
> > On Fri, 2025-02-28 at 17:52 +0000, Mykyta Yatsenko wrote:
> >
> > [...]
> >
> >> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> >> index 9ced1ce2334c..dd2f64903c3b 100644
> >> --- a/tools/lib/bpf/libbpf.c
> >> +++ b/tools/lib/bpf/libbpf.c
> >> @@ -4858,7 +4858,7 @@ bool bpf_map__autocreate(const struct bpf_map *map)
> >>
> >>   int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
> >>   {
> >> -    if (map->obj->state >= OBJ_LOADED)
> >> +    if (map->obj->state >= OBJ_PREPARED)
> >>              return libbpf_err(-EBUSY);
> > I looked through logic in patches #1 and #2 and changes look correct.
> > Running tests under valgrind does not show issues with this feature.
> > The only ask from my side is to consider doing ==/!= comparisons in
> > cases like above. E.g. it seems that `map->obj->state != OBJ_OPENED`
> > is a bit simpler to understand when reading condition above.
> > Or maybe that's just me.
> I'm not sure about this one.  >= or < checks for state relative to
> operand more
> flexibly,for example `map->obj->state >= OBJ_PREPARED` is read as
> "is the object in at least PREPARED state". Perhaps, if we add more states,
> these >,< checks will not require any changes, while ==, != may.
> I guess this also depends on what we actually want to check here, is it that
> state at least PREPARED or the state is not initial OPENED.
> Not a strong opinion, though, happy to flip code to ==, !=.

Those steps are logically ordered, so >= and <= makes more sense. If
we ever add one extra step somewhere in between existing steps, most
checks will stay correct, while with equality a lot of them might need
to be adjusted to multiple equalities.

> >
> >>      map->autocreate = autocreate;
> > [...]
> >
>
Eduard Zingerman March 3, 2025, 10:04 p.m. UTC | #5
On Mon, 2025-03-03 at 13:38 -0800, Andrii Nakryiko wrote:
> On Sat, Mar 1, 2025 at 1:45 PM Mykyta Yatsenko
> <mykyta.yatsenko5@gmail.com> wrote:
> > 
> > On 01/03/2025 08:12, Eduard Zingerman wrote:
> > > On Fri, 2025-02-28 at 17:52 +0000, Mykyta Yatsenko wrote:
> > > 
> > > [...]
> > > 
> > > > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > > > index 9ced1ce2334c..dd2f64903c3b 100644
> > > > --- a/tools/lib/bpf/libbpf.c
> > > > +++ b/tools/lib/bpf/libbpf.c
> > > > @@ -4858,7 +4858,7 @@ bool bpf_map__autocreate(const struct bpf_map *map)
> > > > 
> > > >   int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
> > > >   {
> > > > -    if (map->obj->state >= OBJ_LOADED)
> > > > +    if (map->obj->state >= OBJ_PREPARED)
> > > >              return libbpf_err(-EBUSY);
> > > I looked through logic in patches #1 and #2 and changes look correct.
> > > Running tests under valgrind does not show issues with this feature.
> > > The only ask from my side is to consider doing ==/!= comparisons in
> > > cases like above. E.g. it seems that `map->obj->state != OBJ_OPENED`
> > > is a bit simpler to understand when reading condition above.
> > > Or maybe that's just me.
> > I'm not sure about this one.  >= or < checks for state relative to
> > operand more
> > flexibly,for example `map->obj->state >= OBJ_PREPARED` is read as
> > "is the object in at least PREPARED state". Perhaps, if we add more states,
> > these >,< checks will not require any changes, while ==, != may.
> > I guess this also depends on what we actually want to check here, is it that
> > state at least PREPARED or the state is not initial OPENED.
> > Not a strong opinion, though, happy to flip code to ==, !=.
> 
> Those steps are logically ordered, so >= and <= makes more sense. If
> we ever add one extra step somewhere in between existing steps, most
> checks will stay correct, while with equality a lot of them might need
> to be adjusted to multiple equalities.

As I said, for me personally it is easier to read "can set autocreate
only in OPENED state", compared to "can't set autocreate if state is
PREPARED of higher".
But whatever, I'm not a true C programmer anyway :)

[...]
Andrii Nakryiko March 3, 2025, 11:27 p.m. UTC | #6
On Mon, Mar 3, 2025 at 2:04 PM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Mon, 2025-03-03 at 13:38 -0800, Andrii Nakryiko wrote:
> > On Sat, Mar 1, 2025 at 1:45 PM Mykyta Yatsenko
> > <mykyta.yatsenko5@gmail.com> wrote:
> > >
> > > On 01/03/2025 08:12, Eduard Zingerman wrote:
> > > > On Fri, 2025-02-28 at 17:52 +0000, Mykyta Yatsenko wrote:
> > > >
> > > > [...]
> > > >
> > > > > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> > > > > index 9ced1ce2334c..dd2f64903c3b 100644
> > > > > --- a/tools/lib/bpf/libbpf.c
> > > > > +++ b/tools/lib/bpf/libbpf.c
> > > > > @@ -4858,7 +4858,7 @@ bool bpf_map__autocreate(const struct bpf_map *map)
> > > > >
> > > > >   int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
> > > > >   {
> > > > > -    if (map->obj->state >= OBJ_LOADED)
> > > > > +    if (map->obj->state >= OBJ_PREPARED)
> > > > >              return libbpf_err(-EBUSY);
> > > > I looked through logic in patches #1 and #2 and changes look correct.
> > > > Running tests under valgrind does not show issues with this feature.
> > > > The only ask from my side is to consider doing ==/!= comparisons in
> > > > cases like above. E.g. it seems that `map->obj->state != OBJ_OPENED`
> > > > is a bit simpler to understand when reading condition above.
> > > > Or maybe that's just me.
> > > I'm not sure about this one.  >= or < checks for state relative to
> > > operand more
> > > flexibly,for example `map->obj->state >= OBJ_PREPARED` is read as
> > > "is the object in at least PREPARED state". Perhaps, if we add more states,
> > > these >,< checks will not require any changes, while ==, != may.
> > > I guess this also depends on what we actually want to check here, is it that
> > > state at least PREPARED or the state is not initial OPENED.
> > > Not a strong opinion, though, happy to flip code to ==, !=.
> >
> > Those steps are logically ordered, so >= and <= makes more sense. If
> > we ever add one extra step somewhere in between existing steps, most
> > checks will stay correct, while with equality a lot of them might need
> > to be adjusted to multiple equalities.
>
> As I said, for me personally it is easier to read "can set autocreate
> only in OPENED state", compared to "can't set autocreate if state is
> PREPARED of higher".

The latter, IMO. PREPARED state is when maps are finalized, so if we
got there or beyond, can't modify maps. For progs the similar step is
LOADED.

> But whatever, I'm not a true C programmer anyway :)
>
> [...]
>
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 9ced1ce2334c..dd2f64903c3b 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -4858,7 +4858,7 @@  bool bpf_map__autocreate(const struct bpf_map *map)
 
 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
 {
-	if (map->obj->state >= OBJ_LOADED)
+	if (map->obj->state >= OBJ_PREPARED)
 		return libbpf_err(-EBUSY);
 
 	map->autocreate = autocreate;
@@ -4952,7 +4952,7 @@  struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
 
 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
 {
-	if (map->obj->state >= OBJ_LOADED)
+	if (map->obj->state >= OBJ_PREPARED)
 		return libbpf_err(-EBUSY);
 
 	map->def.max_entries = max_entries;
@@ -5199,7 +5199,7 @@  static void bpf_map__destroy(struct bpf_map *map);
 
 static bool map_is_created(const struct bpf_map *map)
 {
-	return map->obj->state >= OBJ_LOADED || map->reused;
+	return map->obj->state >= OBJ_PREPARED || map->reused;
 }
 
 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
@@ -7901,13 +7901,6 @@  bpf_object__load_progs(struct bpf_object *obj, int log_level)
 	size_t i;
 	int err;
 
-	for (i = 0; i < obj->nr_programs; i++) {
-		prog = &obj->programs[i];
-		err = bpf_object__sanitize_prog(obj, prog);
-		if (err)
-			return err;
-	}
-
 	for (i = 0; i < obj->nr_programs; i++) {
 		prog = &obj->programs[i];
 		if (prog_is_subprog(obj, prog))
@@ -7933,6 +7926,21 @@  bpf_object__load_progs(struct bpf_object *obj, int log_level)
 	return 0;
 }
 
+static int bpf_object_prepare_progs(struct bpf_object *obj)
+{
+	struct bpf_program *prog;
+	size_t i;
+	int err;
+
+	for (i = 0; i < obj->nr_programs; i++) {
+		prog = &obj->programs[i];
+		err = bpf_object__sanitize_prog(obj, prog);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
 
 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
@@ -8549,9 +8557,72 @@  static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
 	return 0;
 }
 
+static void bpf_object_unpin(struct bpf_object *obj)
+{
+	int i;
+
+	/* unpin any maps that were auto-pinned during load */
+	for (i = 0; i < obj->nr_maps; i++)
+		if (obj->maps[i].pinned && !obj->maps[i].reused)
+			bpf_map__unpin(&obj->maps[i], NULL);
+}
+
+static void bpf_object_post_load_cleanup(struct bpf_object *obj)
+{
+	int i;
+
+	/* clean up fd_array */
+	zfree(&obj->fd_array);
+
+	/* clean up module BTFs */
+	for (i = 0; i < obj->btf_module_cnt; i++) {
+		close(obj->btf_modules[i].fd);
+		btf__free(obj->btf_modules[i].btf);
+		free(obj->btf_modules[i].name);
+	}
+	free(obj->btf_modules);
+
+	/* clean up vmlinux BTF */
+	btf__free(obj->btf_vmlinux);
+	obj->btf_vmlinux = NULL;
+}
+
+static int bpf_object_prepare(struct bpf_object *obj, const char *target_btf_path)
+{
+	int err;
+
+	if (!obj)
+		return -EINVAL;
+
+	if (obj->state >= OBJ_PREPARED) {
+		pr_warn("object '%s': prepare loading can't be attempted twice\n", obj->name);
+		return -EINVAL;
+	}
+
+	err = bpf_object_prepare_token(obj);
+	err = err ? : bpf_object__probe_loading(obj);
+	err = err ? : bpf_object__load_vmlinux_btf(obj, false);
+	err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
+	err = err ? : bpf_object__sanitize_maps(obj);
+	err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
+	err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
+	err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
+	err = err ? : bpf_object__sanitize_and_load_btf(obj);
+	err = err ? : bpf_object__create_maps(obj);
+	err = err ? : bpf_object_prepare_progs(obj);
+	obj->state = OBJ_PREPARED;
+
+	if (err) {
+		bpf_object_unpin(obj);
+		bpf_object_unload(obj);
+		return err;
+	}
+	return 0;
+}
+
 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
 {
-	int err, i;
+	int err;
 
 	if (!obj)
 		return libbpf_err(-EINVAL);
@@ -8571,17 +8642,12 @@  static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
 		return libbpf_err(-LIBBPF_ERRNO__ENDIAN);
 	}
 
-	err = bpf_object_prepare_token(obj);
-	err = err ? : bpf_object__probe_loading(obj);
-	err = err ? : bpf_object__load_vmlinux_btf(obj, false);
-	err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
-	err = err ? : bpf_object__sanitize_maps(obj);
-	err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
-	err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
-	err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
-	err = err ? : bpf_object__sanitize_and_load_btf(obj);
-	err = err ? : bpf_object__create_maps(obj);
-	err = err ? : bpf_object__load_progs(obj, extra_log_level);
+	if (obj->state < OBJ_PREPARED) {
+		err = bpf_object_prepare(obj, target_btf_path);
+		if (err)
+			return libbpf_err(err);
+	}
+	err = bpf_object__load_progs(obj, extra_log_level);
 	err = err ? : bpf_object_init_prog_arrays(obj);
 	err = err ? : bpf_object_prepare_struct_ops(obj);
 
@@ -8593,35 +8659,22 @@  static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
 			err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
 	}
 
-	/* clean up fd_array */
-	zfree(&obj->fd_array);
+	bpf_object_post_load_cleanup(obj);
+	obj->state = OBJ_LOADED;/* doesn't matter if successfully or not */
 
-	/* clean up module BTFs */
-	for (i = 0; i < obj->btf_module_cnt; i++) {
-		close(obj->btf_modules[i].fd);
-		btf__free(obj->btf_modules[i].btf);
-		free(obj->btf_modules[i].name);
+	if (err) {
+		bpf_object_unpin(obj);
+		bpf_object_unload(obj);
+		pr_warn("failed to load object '%s'\n", obj->path);
+		return libbpf_err(err);
 	}
-	free(obj->btf_modules);
-
-	/* clean up vmlinux BTF */
-	btf__free(obj->btf_vmlinux);
-	obj->btf_vmlinux = NULL;
-
-	obj->state = OBJ_LOADED;/* doesn't matter if successfully or not */
-	if (err)
-		goto out;
 
 	return 0;
-out:
-	/* unpin any maps that were auto-pinned during load */
-	for (i = 0; i < obj->nr_maps; i++)
-		if (obj->maps[i].pinned && !obj->maps[i].reused)
-			bpf_map__unpin(&obj->maps[i], NULL);
+}
 
-	bpf_object_unload(obj);
-	pr_warn("failed to load object '%s'\n", obj->path);
-	return libbpf_err(err);
+int bpf_object__prepare(struct bpf_object *obj)
+{
+	return libbpf_err(bpf_object_prepare(obj, NULL));
 }
 
 int bpf_object__load(struct bpf_object *obj)
@@ -8871,8 +8924,8 @@  int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
 	if (!obj)
 		return libbpf_err(-ENOENT);
 
-	if (obj->state < OBJ_LOADED) {
-		pr_warn("object not yet loaded; load it first\n");
+	if (obj->state < OBJ_PREPARED) {
+		pr_warn("object not yet loaded/prepared; load/prepare it first\n");
 		return libbpf_err(-ENOENT);
 	}
 
@@ -9069,6 +9122,14 @@  void bpf_object__close(struct bpf_object *obj)
 	if (IS_ERR_OR_NULL(obj))
 		return;
 
+	/*
+	 * if user called bpf_object__prepare() without ever getting to
+	 * bpf_object__load(), we need to clean up stuff that is normally
+	 * cleaned up at the end of loading step
+	 */
+	if (obj->state == OBJ_PREPARED)
+		bpf_object_post_load_cleanup(obj);
+
 	usdt_manager_free(obj->usdt_man);
 	obj->usdt_man = NULL;
 
@@ -10304,7 +10365,7 @@  static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
 
 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
 {
-	if (map->obj->state >= OBJ_LOADED || map->reused)
+	if (map->obj->state >= OBJ_PREPARED || map->reused)
 		return libbpf_err(-EBUSY);
 
 	if (map->mmaped) {
@@ -10350,7 +10411,7 @@  int bpf_map__set_initial_value(struct bpf_map *map,
 {
 	size_t actual_sz;
 
-	if (map->obj->state >= OBJ_LOADED || map->reused)
+	if (map->obj->state >= OBJ_PREPARED || map->reused)
 		return libbpf_err(-EBUSY);
 
 	if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 3020ee45303a..09e87998c64e 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -241,6 +241,15 @@  LIBBPF_API struct bpf_object *
 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
 		     const struct bpf_object_open_opts *opts);
 
+/**
+ * @brief **bpf_object__prepare()** prepares BPF object for loading.
+ * @param obj Pointer to a valid BPF object instance returned by
+ * **bpf_object__open*()** API
+ * @return 0, on success; negative error code, otherwise, error code is
+ * stored in errno
+ */
+int bpf_object__prepare(struct bpf_object *obj);
+
 /**
  * @brief **bpf_object__load()** loads BPF object into kernel.
  * @param obj Pointer to a valid BPF object instance returned by
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index b5a838de6f47..22edde0bf85e 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -438,4 +438,5 @@  LIBBPF_1.6.0 {
 		bpf_linker__new_fd;
 		btf__add_decl_attr;
 		btf__add_type_attr;
+		bpf_object__prepare;
 } LIBBPF_1.5.0;