diff mbox series

[bpf-next,6/7] libbpf: Update a bpf_link with another struct_ops.

Message ID 20230214221718.503964-7-kuifeng@meta.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Transit between BPF TCP congestion controls. | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 7 maintainers not CCed: john.fastabend@gmail.com daniel@iogearbox.net sdf@google.com jolsa@kernel.org haoluo@google.com yhs@fb.com kpsingh@kernel.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 93 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 110 this patch: 110
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 success Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for test_progs on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-17 success Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 success Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-19 success Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for test_progs_no_alu32 on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-22 success Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc

Commit Message

Kui-Feng Lee Feb. 14, 2023, 10:17 p.m. UTC
Introduce bpf_link__update_struct_ops(), which will allow you to
effortlessly transition the struct_ops map of any given bpf_link into
an alternative.

Signed-off-by: Kui-Feng Lee <kuifeng@meta.com>
---
 tools/lib/bpf/libbpf.c   | 35 +++++++++++++++++++++++++++++++++++
 tools/lib/bpf/libbpf.h   |  1 +
 tools/lib/bpf/libbpf.map |  1 +
 3 files changed, 37 insertions(+)

Comments

Andrii Nakryiko Feb. 16, 2023, 10:48 p.m. UTC | #1
On Tue, Feb 14, 2023 at 2:17 PM Kui-Feng Lee <kuifeng@meta.com> wrote:
>
> Introduce bpf_link__update_struct_ops(), which will allow you to
> effortlessly transition the struct_ops map of any given bpf_link into
> an alternative.
>
> Signed-off-by: Kui-Feng Lee <kuifeng@meta.com>
> ---
>  tools/lib/bpf/libbpf.c   | 35 +++++++++++++++++++++++++++++++++++
>  tools/lib/bpf/libbpf.h   |  1 +
>  tools/lib/bpf/libbpf.map |  1 +
>  3 files changed, 37 insertions(+)
>
> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> index 1eff6a03ddd9..6f7c72e312d4 100644
> --- a/tools/lib/bpf/libbpf.c
> +++ b/tools/lib/bpf/libbpf.c
> @@ -11524,6 +11524,41 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
>         return &link->link;
>  }
>
> +/*
> + * Swap the back struct_ops of a link with a new struct_ops map.
> + */
> +int bpf_link__update_struct_ops(struct bpf_link *link, const struct bpf_map *map)

we have bpf_link__update_program(), and so the generic counterpart for
map-based links would be bpf_link__update_map(). Let's call it that.
And it shouldn't probably assume so much struct_ops specific things.

> +{
> +       struct bpf_link_struct_ops_map *st_ops_link;
> +       int err, fd;
> +
> +       if (!bpf_map__is_struct_ops(map) || map->fd == -1)
> +               return -EINVAL;
> +
> +       /* Ensure the type of a link is correct */
> +       if (link->detach != bpf_link__detach_struct_ops)
> +               return -EINVAL;
> +
> +       err = bpf_map__update_vdata(map);

it's a bit weird we do this at attach time, not when bpf_map is
actually instantiated. Should we move this map contents initialization
to bpf_object__load() phase? Same for bpf_map__attach_struct_ops().
What do we lose by doing it after all the BPF programs are loaded in
load phase?

> +       if (err) {
> +               err = -errno;
> +               free(link);
> +               return err;
> +       }
> +
> +       fd = bpf_link_update(link->fd, map->fd, NULL);
> +       if (fd < 0) {
> +               err = -errno;
> +               free(link);
> +               return err;
> +       }
> +
> +       st_ops_link = container_of(link, struct bpf_link_struct_ops_map, link);
> +       st_ops_link->map_fd = map->fd;
> +
> +       return 0;
> +}
> +
>  typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
>                                                           void *private_data);
>
> diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
> index 2efd80f6f7b9..dd25cd6759d4 100644
> --- a/tools/lib/bpf/libbpf.h
> +++ b/tools/lib/bpf/libbpf.h
> @@ -695,6 +695,7 @@ bpf_program__attach_freplace(const struct bpf_program *prog,
>  struct bpf_map;
>
>  LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
> +LIBBPF_API int bpf_link__update_struct_ops(struct bpf_link *link, const struct bpf_map *map);

let's rename to bpf_link__update_map() and put it next to
bpf_link__update_program() in libbpf.h

>
>  struct bpf_iter_attach_opts {
>         size_t sz; /* size of this struct for forward/backward compatibility */
> diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
> index 11c36a3c1a9f..ca6993c744b6 100644
> --- a/tools/lib/bpf/libbpf.map
> +++ b/tools/lib/bpf/libbpf.map
> @@ -373,6 +373,7 @@ LIBBPF_1.1.0 {
>         global:
>                 bpf_btf_get_fd_by_id_opts;
>                 bpf_link_get_fd_by_id_opts;
> +               bpf_link__update_struct_ops;
>                 bpf_map_get_fd_by_id_opts;
>                 bpf_prog_get_fd_by_id_opts;
>                 user_ring_buffer__discard;

we are in LIBBPF_1.2.0 already, please move


> --
> 2.30.2
>
Kui-Feng Lee Feb. 18, 2023, 12:22 a.m. UTC | #2
On 2/16/23 14:48, Andrii Nakryiko wrote:
> On Tue, Feb 14, 2023 at 2:17 PM Kui-Feng Lee <kuifeng@meta.com> wrote:
>>
>> Introduce bpf_link__update_struct_ops(), which will allow you to
>> effortlessly transition the struct_ops map of any given bpf_link into
>> an alternative.
>>
>> Signed-off-by: Kui-Feng Lee <kuifeng@meta.com>
>> ---
>>   tools/lib/bpf/libbpf.c   | 35 +++++++++++++++++++++++++++++++++++
>>   tools/lib/bpf/libbpf.h   |  1 +
>>   tools/lib/bpf/libbpf.map |  1 +
>>   3 files changed, 37 insertions(+)
>>
>> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
>> index 1eff6a03ddd9..6f7c72e312d4 100644
>> --- a/tools/lib/bpf/libbpf.c
>> +++ b/tools/lib/bpf/libbpf.c
>> @@ -11524,6 +11524,41 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
>>          return &link->link;
>>   }
>>
>> +/*
>> + * Swap the back struct_ops of a link with a new struct_ops map.
>> + */
>> +int bpf_link__update_struct_ops(struct bpf_link *link, const struct bpf_map *map)
> 
> we have bpf_link__update_program(), and so the generic counterpart for
> map-based links would be bpf_link__update_map(). Let's call it that.
> And it shouldn't probably assume so much struct_ops specific things.

Sure

> 
>> +{
>> +       struct bpf_link_struct_ops_map *st_ops_link;
>> +       int err, fd;
>> +
>> +       if (!bpf_map__is_struct_ops(map) || map->fd == -1)
>> +               return -EINVAL;
>> +
>> +       /* Ensure the type of a link is correct */
>> +       if (link->detach != bpf_link__detach_struct_ops)
>> +               return -EINVAL;
>> +
>> +       err = bpf_map__update_vdata(map);
> 
> it's a bit weird we do this at attach time, not when bpf_map is
> actually instantiated. Should we move this map contents initialization
> to bpf_object__load() phase? Same for bpf_map__attach_struct_ops().
> What do we lose by doing it after all the BPF programs are loaded in
> load phase?

With the current behavior (w/o links), a struct_ops will be registered 
when updating its value.  If we move bpf_map__update_vdata() to 
bpf_object__load(), a congestion control algorithm will be activated at 
the moment loading it before attaching it.  However, we should activate 
an algorithm at attach time.


> 
>> +       if (err) {
>> +               err = -errno;
>> +               free(link);
>> +               return err;
>> +       }
>> +
>> +       fd = bpf_link_update(link->fd, map->fd, NULL);
>> +       if (fd < 0) {
>> +               err = -errno;
>> +               free(link);
>> +               return err;
>> +       }
>> +
>> +       st_ops_link = container_of(link, struct bpf_link_struct_ops_map, link);
>> +       st_ops_link->map_fd = map->fd;
>> +
>> +       return 0;
>> +}
>> +
>>   typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
>>                                                            void *private_data);
>>
>> diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
>> index 2efd80f6f7b9..dd25cd6759d4 100644
>> --- a/tools/lib/bpf/libbpf.h
>> +++ b/tools/lib/bpf/libbpf.h
>> @@ -695,6 +695,7 @@ bpf_program__attach_freplace(const struct bpf_program *prog,
>>   struct bpf_map;
>>
>>   LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
>> +LIBBPF_API int bpf_link__update_struct_ops(struct bpf_link *link, const struct bpf_map *map);
> 
> let's rename to bpf_link__update_map() and put it next to
> bpf_link__update_program() in libbpf.h
> 
>>
>>   struct bpf_iter_attach_opts {
>>          size_t sz; /* size of this struct for forward/backward compatibility */
>> diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
>> index 11c36a3c1a9f..ca6993c744b6 100644
>> --- a/tools/lib/bpf/libbpf.map
>> +++ b/tools/lib/bpf/libbpf.map
>> @@ -373,6 +373,7 @@ LIBBPF_1.1.0 {
>>          global:
>>                  bpf_btf_get_fd_by_id_opts;
>>                  bpf_link_get_fd_by_id_opts;
>> +               bpf_link__update_struct_ops;
>>                  bpf_map_get_fd_by_id_opts;
>>                  bpf_prog_get_fd_by_id_opts;
>>                  user_ring_buffer__discard;
> 
> we are in LIBBPF_1.2.0 already, please move
> 
> 
>> --
>> 2.30.2
>>
Andrii Nakryiko Feb. 18, 2023, 1:10 a.m. UTC | #3
On Fri, Feb 17, 2023 at 4:22 PM Kui-Feng Lee <sinquersw@gmail.com> wrote:
>
>
>
> On 2/16/23 14:48, Andrii Nakryiko wrote:
> > On Tue, Feb 14, 2023 at 2:17 PM Kui-Feng Lee <kuifeng@meta.com> wrote:
> >>
> >> Introduce bpf_link__update_struct_ops(), which will allow you to
> >> effortlessly transition the struct_ops map of any given bpf_link into
> >> an alternative.
> >>
> >> Signed-off-by: Kui-Feng Lee <kuifeng@meta.com>
> >> ---
> >>   tools/lib/bpf/libbpf.c   | 35 +++++++++++++++++++++++++++++++++++
> >>   tools/lib/bpf/libbpf.h   |  1 +
> >>   tools/lib/bpf/libbpf.map |  1 +
> >>   3 files changed, 37 insertions(+)
> >>
> >> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
> >> index 1eff6a03ddd9..6f7c72e312d4 100644
> >> --- a/tools/lib/bpf/libbpf.c
> >> +++ b/tools/lib/bpf/libbpf.c
> >> @@ -11524,6 +11524,41 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
> >>          return &link->link;
> >>   }
> >>
> >> +/*
> >> + * Swap the back struct_ops of a link with a new struct_ops map.
> >> + */
> >> +int bpf_link__update_struct_ops(struct bpf_link *link, const struct bpf_map *map)
> >
> > we have bpf_link__update_program(), and so the generic counterpart for
> > map-based links would be bpf_link__update_map(). Let's call it that.
> > And it shouldn't probably assume so much struct_ops specific things.
>
> Sure
>
> >
> >> +{
> >> +       struct bpf_link_struct_ops_map *st_ops_link;
> >> +       int err, fd;
> >> +
> >> +       if (!bpf_map__is_struct_ops(map) || map->fd == -1)
> >> +               return -EINVAL;
> >> +
> >> +       /* Ensure the type of a link is correct */
> >> +       if (link->detach != bpf_link__detach_struct_ops)
> >> +               return -EINVAL;
> >> +
> >> +       err = bpf_map__update_vdata(map);
> >
> > it's a bit weird we do this at attach time, not when bpf_map is
> > actually instantiated. Should we move this map contents initialization
> > to bpf_object__load() phase? Same for bpf_map__attach_struct_ops().
> > What do we lose by doing it after all the BPF programs are loaded in
> > load phase?
>
> With the current behavior (w/o links), a struct_ops will be registered
> when updating its value.  If we move bpf_map__update_vdata() to
> bpf_object__load(), a congestion control algorithm will be activated at
> the moment loading it before attaching it.  However, we should activate
> an algorithm at attach time.
>

Of course. But I was thinking to move `bpf_map_update_elem(map->fd,
&zero, st_ops->kern_vdata, 0);` part out of bpf_map__update_vdata()
and make update_vdata() just prepare st_ops->kern_vdata only.

>
> >
> >> +       if (err) {
> >> +               err = -errno;
> >> +               free(link);
> >> +               return err;
> >> +       }
> >> +
> >> +       fd = bpf_link_update(link->fd, map->fd, NULL);
> >> +       if (fd < 0) {
> >> +               err = -errno;
> >> +               free(link);
> >> +               return err;
> >> +       }
> >> +
> >> +       st_ops_link = container_of(link, struct bpf_link_struct_ops_map, link);
> >> +       st_ops_link->map_fd = map->fd;
> >> +
> >> +       return 0;
> >> +}
> >> +
> >>   typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
> >>                                                            void *private_data);
> >>
> >> diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
> >> index 2efd80f6f7b9..dd25cd6759d4 100644
> >> --- a/tools/lib/bpf/libbpf.h
> >> +++ b/tools/lib/bpf/libbpf.h
> >> @@ -695,6 +695,7 @@ bpf_program__attach_freplace(const struct bpf_program *prog,
> >>   struct bpf_map;
> >>
> >>   LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
> >> +LIBBPF_API int bpf_link__update_struct_ops(struct bpf_link *link, const struct bpf_map *map);
> >
> > let's rename to bpf_link__update_map() and put it next to
> > bpf_link__update_program() in libbpf.h
> >
> >>
> >>   struct bpf_iter_attach_opts {
> >>          size_t sz; /* size of this struct for forward/backward compatibility */
> >> diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
> >> index 11c36a3c1a9f..ca6993c744b6 100644
> >> --- a/tools/lib/bpf/libbpf.map
> >> +++ b/tools/lib/bpf/libbpf.map
> >> @@ -373,6 +373,7 @@ LIBBPF_1.1.0 {
> >>          global:
> >>                  bpf_btf_get_fd_by_id_opts;
> >>                  bpf_link_get_fd_by_id_opts;
> >> +               bpf_link__update_struct_ops;
> >>                  bpf_map_get_fd_by_id_opts;
> >>                  bpf_prog_get_fd_by_id_opts;
> >>                  user_ring_buffer__discard;
> >
> > we are in LIBBPF_1.2.0 already, please move
> >
> >
> >> --
> >> 2.30.2
> >>
Kui-Feng Lee Feb. 21, 2023, 10:20 p.m. UTC | #4
On 2/17/23 17:10, Andrii Nakryiko wrote:
> On Fri, Feb 17, 2023 at 4:22 PM Kui-Feng Lee <sinquersw@gmail.com> wrote:
>>
>>
>>
>> On 2/16/23 14:48, Andrii Nakryiko wrote:
>>> On Tue, Feb 14, 2023 at 2:17 PM Kui-Feng Lee <kuifeng@meta.com> wrote:
>>>>
>>>> Introduce bpf_link__update_struct_ops(), which will allow you to
>>>> effortlessly transition the struct_ops map of any given bpf_link into
>>>> an alternative.
>>>>
>>>> Signed-off-by: Kui-Feng Lee <kuifeng@meta.com>
>>>> ---
>>>>    tools/lib/bpf/libbpf.c   | 35 +++++++++++++++++++++++++++++++++++
>>>>    tools/lib/bpf/libbpf.h   |  1 +
>>>>    tools/lib/bpf/libbpf.map |  1 +
>>>>    3 files changed, 37 insertions(+)
>>>>
>>>> diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
>>>> index 1eff6a03ddd9..6f7c72e312d4 100644
>>>> --- a/tools/lib/bpf/libbpf.c
>>>> +++ b/tools/lib/bpf/libbpf.c
>>>> @@ -11524,6 +11524,41 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
>>>>           return &link->link;
>>>>    }
>>>>
>>>> +/*
>>>> + * Swap the back struct_ops of a link with a new struct_ops map.
>>>> + */
>>>> +int bpf_link__update_struct_ops(struct bpf_link *link, const struct bpf_map *map)
>>>
>>> we have bpf_link__update_program(), and so the generic counterpart for
>>> map-based links would be bpf_link__update_map(). Let's call it that.
>>> And it shouldn't probably assume so much struct_ops specific things.
>>
>> Sure
>>
>>>
>>>> +{
>>>> +       struct bpf_link_struct_ops_map *st_ops_link;
>>>> +       int err, fd;
>>>> +
>>>> +       if (!bpf_map__is_struct_ops(map) || map->fd == -1)
>>>> +               return -EINVAL;
>>>> +
>>>> +       /* Ensure the type of a link is correct */
>>>> +       if (link->detach != bpf_link__detach_struct_ops)
>>>> +               return -EINVAL;
>>>> +
>>>> +       err = bpf_map__update_vdata(map);
>>>
>>> it's a bit weird we do this at attach time, not when bpf_map is
>>> actually instantiated. Should we move this map contents initialization
>>> to bpf_object__load() phase? Same for bpf_map__attach_struct_ops().
>>> What do we lose by doing it after all the BPF programs are loaded in
>>> load phase?
>>
>> With the current behavior (w/o links), a struct_ops will be registered
>> when updating its value.  If we move bpf_map__update_vdata() to
>> bpf_object__load(), a congestion control algorithm will be activated at
>> the moment loading it before attaching it.  However, we should activate
>> an algorithm at attach time.
>>
> 
> Of course. But I was thinking to move `bpf_map_update_elem(map->fd,
> &zero, st_ops->kern_vdata, 0);` part out of bpf_map__update_vdata()
> and make update_vdata() just prepare st_ops->kern_vdata only.

Ok! I will rename it as bpf_map_prepare_vdata(), and call 
bpf_map_update_elem() separately.
diff mbox series

Patch

diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 1eff6a03ddd9..6f7c72e312d4 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -11524,6 +11524,41 @@  struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
 	return &link->link;
 }
 
+/*
+ * Swap the back struct_ops of a link with a new struct_ops map.
+ */
+int bpf_link__update_struct_ops(struct bpf_link *link, const struct bpf_map *map)
+{
+	struct bpf_link_struct_ops_map *st_ops_link;
+	int err, fd;
+
+	if (!bpf_map__is_struct_ops(map) || map->fd == -1)
+		return -EINVAL;
+
+	/* Ensure the type of a link is correct */
+	if (link->detach != bpf_link__detach_struct_ops)
+		return -EINVAL;
+
+	err = bpf_map__update_vdata(map);
+	if (err) {
+		err = -errno;
+		free(link);
+		return err;
+	}
+
+	fd = bpf_link_update(link->fd, map->fd, NULL);
+	if (fd < 0) {
+		err = -errno;
+		free(link);
+		return err;
+	}
+
+	st_ops_link = container_of(link, struct bpf_link_struct_ops_map, link);
+	st_ops_link->map_fd = map->fd;
+
+	return 0;
+}
+
 typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
 							  void *private_data);
 
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 2efd80f6f7b9..dd25cd6759d4 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -695,6 +695,7 @@  bpf_program__attach_freplace(const struct bpf_program *prog,
 struct bpf_map;
 
 LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
+LIBBPF_API int bpf_link__update_struct_ops(struct bpf_link *link, const struct bpf_map *map);
 
 struct bpf_iter_attach_opts {
 	size_t sz; /* size of this struct for forward/backward compatibility */
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 11c36a3c1a9f..ca6993c744b6 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -373,6 +373,7 @@  LIBBPF_1.1.0 {
 	global:
 		bpf_btf_get_fd_by_id_opts;
 		bpf_link_get_fd_by_id_opts;
+		bpf_link__update_struct_ops;
 		bpf_map_get_fd_by_id_opts;
 		bpf_prog_get_fd_by_id_opts;
 		user_ring_buffer__discard;