Message ID | 20211006222103.3631981-3-joannekoong@fb.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | BPF |
Headers | show |
Series | Implement bitset maps, with bloom filter | expand |
On Wed, Oct 6, 2021 at 3:27 PM Joanne Koong <joannekoong@fb.com> wrote: > > This patch adds the libbpf infrastructure for supporting a > per-map-type "map_extra" field, whose definition will be > idiosyncratic depending on map type. > > For example, for the bitset map, the lower 4 bits of map_extra > is used to denote the number of hash functions. > > Signed-off-by: Joanne Koong <joannekoong@fb.com> > --- > include/uapi/linux/bpf.h | 1 + > tools/include/uapi/linux/bpf.h | 1 + > tools/lib/bpf/bpf.c | 1 + > tools/lib/bpf/bpf.h | 1 + > tools/lib/bpf/bpf_helpers.h | 1 + > tools/lib/bpf/libbpf.c | 25 ++++++++++++++++++++++++- > tools/lib/bpf/libbpf.h | 4 ++++ > tools/lib/bpf/libbpf.map | 2 ++ > tools/lib/bpf/libbpf_internal.h | 4 +++- > 9 files changed, 38 insertions(+), 2 deletions(-) > > diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h > index b40fa1a72a75..a6f225e9c95a 100644 > --- a/include/uapi/linux/bpf.h > +++ b/include/uapi/linux/bpf.h > @@ -5639,6 +5639,7 @@ struct bpf_map_info { > __u32 btf_id; > __u32 btf_key_type_id; > __u32 btf_value_type_id; > + __u32 map_extra; > } __attribute__((aligned(8))); > > struct bpf_btf_info { > diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h > index b40fa1a72a75..a6f225e9c95a 100644 > --- a/tools/include/uapi/linux/bpf.h > +++ b/tools/include/uapi/linux/bpf.h > @@ -5639,6 +5639,7 @@ struct bpf_map_info { > __u32 btf_id; > __u32 btf_key_type_id; > __u32 btf_value_type_id; > + __u32 map_extra; > } __attribute__((aligned(8))); > > struct bpf_btf_info { > diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c > index 7d1741ceaa32..41e3e85e7789 100644 > --- a/tools/lib/bpf/bpf.c > +++ b/tools/lib/bpf/bpf.c > @@ -97,6 +97,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) > attr.btf_key_type_id = create_attr->btf_key_type_id; > attr.btf_value_type_id = create_attr->btf_value_type_id; > attr.map_ifindex = create_attr->map_ifindex; > + attr.map_extra = create_attr->map_extra; > if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) > attr.btf_vmlinux_value_type_id = > create_attr->btf_vmlinux_value_type_id; > diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h > index 6fffb3cdf39b..c4049f2d63cc 100644 > --- a/tools/lib/bpf/bpf.h > +++ b/tools/lib/bpf/bpf.h > @@ -50,6 +50,7 @@ struct bpf_create_map_attr { > __u32 inner_map_fd; > __u32 btf_vmlinux_value_type_id; > }; > + __u32 map_extra; this struct is frozen, we can't change it. It's fine to not allow passing map_extra in libbpf APIs. We have libbpf 1.0 task to revamp low-level APIs like map creation in a way that will allow good extensibility. You don't have to worry about that in this patch set. > }; > > LIBBPF_API int > diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h > index 963b1060d944..bce5a0090f3f 100644 > --- a/tools/lib/bpf/bpf_helpers.h > +++ b/tools/lib/bpf/bpf_helpers.h > @@ -133,6 +133,7 @@ struct bpf_map_def { > unsigned int value_size; > unsigned int max_entries; > unsigned int map_flags; > + unsigned int map_extra; > }; This one is also frozen, please don't change it. > > enum libbpf_pin_type { > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c > index ed313fd491bd..12a9ecd45a78 100644 > --- a/tools/lib/bpf/libbpf.c > +++ b/tools/lib/bpf/libbpf.c > @@ -2274,6 +2274,10 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, > } > map_def->pinning = val; > map_def->parts |= MAP_DEF_PINNING; > + } else if (strcmp(name, "map_extra") == 0) { > + if (!get_map_field_int(map_name, btf, m, &map_def->map_extra)) > + return -EINVAL; > + map_def->parts |= MAP_DEF_MAP_EXTRA; > } else { > if (strict) { > pr_warn("map '%s': unknown field '%s'.\n", map_name, name); > @@ -2298,6 +2302,7 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def > map->def.value_size = def->value_size; > map->def.max_entries = def->max_entries; > map->def.map_flags = def->map_flags; > + map->def.map_extra = def->map_extra; > > map->numa_node = def->numa_node; > map->btf_key_type_id = def->key_type_id; > @@ -2322,6 +2327,8 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def > pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); > if (def->parts & MAP_DEF_MAP_FLAGS) > pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags); > + if (def->parts & MAP_DEF_MAP_EXTRA) > + pr_debug("map '%s': found map_extra = %u.\n", map->name, def->map_extra); reading this now, I think map_flags should be emitted as %x, can you please update map_flags format specified and use %x for map_extra as well? > if (def->parts & MAP_DEF_PINNING) > pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); > if (def->parts & MAP_DEF_NUMA_NODE) > @@ -4017,6 +4024,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) > map->def.value_size = info.value_size; > map->def.max_entries = info.max_entries; > map->def.map_flags = info.map_flags; > + map->def.map_extra = info.map_extra; > map->btf_key_type_id = info.btf_key_type_id; > map->btf_value_type_id = info.btf_value_type_id; > map->reused = true; > @@ -4534,7 +4542,8 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) > map_info.key_size == map->def.key_size && > map_info.value_size == map->def.value_size && > map_info.max_entries == map->def.max_entries && > - map_info.map_flags == map->def.map_flags); > + map_info.map_flags == map->def.map_flags && > + map_info.map_extra == map->def.map_extra); > } > > static int > @@ -4631,6 +4640,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b > create_attr.key_size = def->key_size; > create_attr.value_size = def->value_size; > create_attr.numa_node = map->numa_node; > + create_attr.map_extra = def->map_extra; > > if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { > int nr_cpus; > @@ -8637,6 +8647,19 @@ int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) > return 0; > } > > +__u32 bpf_map__map_extra(const struct bpf_map *map) > +{ > + return map->def.map_extra; > +} > + > +int bpf_map__set_map_extra(struct bpf_map *map, __u32 map_extra) > +{ > + if (map->fd >= 0) > + return libbpf_err(-EBUSY); > + map->def.map_extra = map_extra; > + return 0; > +} > + > __u32 bpf_map__numa_node(const struct bpf_map *map) > { > return map->numa_node; > diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h > index 89ca9c83ed4e..55e8dfe6f3e1 100644 > --- a/tools/lib/bpf/libbpf.h > +++ b/tools/lib/bpf/libbpf.h > @@ -486,6 +486,7 @@ struct bpf_map_def { > unsigned int value_size; > unsigned int max_entries; > unsigned int map_flags; > + unsigned int map_extra; > }; this struct is also frozen, please keep it as is > > /** > @@ -562,6 +563,9 @@ LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); > /* get/set map if_index */ > LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); > LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); > +/* get/set map map_extra flags */ > +LIBBPF_API __u32 bpf_map__map_extra(const struct bpf_map *map); > +LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u32 map_extra); > > typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); > LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, > diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map > index f270d25e4af3..308378b3f20b 100644 > --- a/tools/lib/bpf/libbpf.map > +++ b/tools/lib/bpf/libbpf.map > @@ -395,4 +395,6 @@ LIBBPF_0.6.0 { > bpf_object__prev_program; > btf__add_btf; > btf__add_tag; > + bpf_map__map_extra; > + bpf_map__set_map_extra; this list is alphabetically sorted, please keep it so > } LIBBPF_0.5.0; > diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h > index f7fd3944d46d..188db854d9c2 100644 > --- a/tools/lib/bpf/libbpf_internal.h > +++ b/tools/lib/bpf/libbpf_internal.h > @@ -193,8 +193,9 @@ enum map_def_parts { > MAP_DEF_NUMA_NODE = 0x080, > MAP_DEF_PINNING = 0x100, > MAP_DEF_INNER_MAP = 0x200, > + MAP_DEF_MAP_EXTRA = 0x400, > > - MAP_DEF_ALL = 0x3ff, /* combination of all above */ > + MAP_DEF_ALL = 0x7ff, /* combination of all above */ > }; > > struct btf_map_def { > @@ -208,6 +209,7 @@ struct btf_map_def { > __u32 map_flags; > __u32 numa_node; > __u32 pinning; > + __u32 map_extra; > }; this is currently the only (because internal) struct that can get map_extra added :) > > int parse_btf_map_def(const char *map_name, struct btf *btf, > -- > 2.30.2 >
On Wed, Oct 6, 2021 at 3:27 PM Joanne Koong <joannekoong@fb.com> wrote: > > This patch adds the libbpf infrastructure for supporting a > per-map-type "map_extra" field, whose definition will be > idiosyncratic depending on map type. > > For example, for the bitset map, the lower 4 bits of map_extra > is used to denote the number of hash functions. > > Signed-off-by: Joanne Koong <joannekoong@fb.com> > --- > include/uapi/linux/bpf.h | 1 + > tools/include/uapi/linux/bpf.h | 1 + > tools/lib/bpf/bpf.c | 1 + > tools/lib/bpf/bpf.h | 1 + > tools/lib/bpf/bpf_helpers.h | 1 + > tools/lib/bpf/libbpf.c | 25 ++++++++++++++++++++++++- > tools/lib/bpf/libbpf.h | 4 ++++ > tools/lib/bpf/libbpf.map | 2 ++ > tools/lib/bpf/libbpf_internal.h | 4 +++- > 9 files changed, 38 insertions(+), 2 deletions(-) > > diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h > index b40fa1a72a75..a6f225e9c95a 100644 > --- a/include/uapi/linux/bpf.h > +++ b/include/uapi/linux/bpf.h > @@ -5639,6 +5639,7 @@ struct bpf_map_info { > __u32 btf_id; > __u32 btf_key_type_id; > __u32 btf_value_type_id; > + __u32 map_extra; > } __attribute__((aligned(8))); > > struct bpf_btf_info { > diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h > index b40fa1a72a75..a6f225e9c95a 100644 > --- a/tools/include/uapi/linux/bpf.h > +++ b/tools/include/uapi/linux/bpf.h > @@ -5639,6 +5639,7 @@ struct bpf_map_info { > __u32 btf_id; > __u32 btf_key_type_id; > __u32 btf_value_type_id; > + __u32 map_extra; > } __attribute__((aligned(8))); > > struct bpf_btf_info { > diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c > index 7d1741ceaa32..41e3e85e7789 100644 > --- a/tools/lib/bpf/bpf.c > +++ b/tools/lib/bpf/bpf.c > @@ -97,6 +97,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) > attr.btf_key_type_id = create_attr->btf_key_type_id; > attr.btf_value_type_id = create_attr->btf_value_type_id; > attr.map_ifindex = create_attr->map_ifindex; > + attr.map_extra = create_attr->map_extra; > if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) > attr.btf_vmlinux_value_type_id = > create_attr->btf_vmlinux_value_type_id; > diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h > index 6fffb3cdf39b..c4049f2d63cc 100644 > --- a/tools/lib/bpf/bpf.h > +++ b/tools/lib/bpf/bpf.h > @@ -50,6 +50,7 @@ struct bpf_create_map_attr { > __u32 inner_map_fd; > __u32 btf_vmlinux_value_type_id; > }; > + __u32 map_extra; btw, I think it might be better to use __u64 for map_extra in kernel UAPI for extensibility (e.g., some maps might specify that this is a pointer to some extra data structure, just like we do for some types of bpf_iter program types to specify extra parameters). In libbpf you can't express entire 64 bits with __uint() macro, but eventually that limitation might be raised separately. > }; > > LIBBPF_API int > diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h > index 963b1060d944..bce5a0090f3f 100644 > --- a/tools/lib/bpf/bpf_helpers.h > +++ b/tools/lib/bpf/bpf_helpers.h > @@ -133,6 +133,7 @@ struct bpf_map_def { > unsigned int value_size; > unsigned int max_entries; > unsigned int map_flags; > + unsigned int map_extra; > }; > > enum libbpf_pin_type { > diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c > index ed313fd491bd..12a9ecd45a78 100644 > --- a/tools/lib/bpf/libbpf.c > +++ b/tools/lib/bpf/libbpf.c > @@ -2274,6 +2274,10 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, > } > map_def->pinning = val; > map_def->parts |= MAP_DEF_PINNING; > + } else if (strcmp(name, "map_extra") == 0) { > + if (!get_map_field_int(map_name, btf, m, &map_def->map_extra)) > + return -EINVAL; > + map_def->parts |= MAP_DEF_MAP_EXTRA; > } else { > if (strict) { > pr_warn("map '%s': unknown field '%s'.\n", map_name, name); > @@ -2298,6 +2302,7 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def > map->def.value_size = def->value_size; > map->def.max_entries = def->max_entries; > map->def.map_flags = def->map_flags; > + map->def.map_extra = def->map_extra; > > map->numa_node = def->numa_node; > map->btf_key_type_id = def->key_type_id; > @@ -2322,6 +2327,8 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def > pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); > if (def->parts & MAP_DEF_MAP_FLAGS) > pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags); > + if (def->parts & MAP_DEF_MAP_EXTRA) > + pr_debug("map '%s': found map_extra = %u.\n", map->name, def->map_extra); > if (def->parts & MAP_DEF_PINNING) > pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); > if (def->parts & MAP_DEF_NUMA_NODE) > @@ -4017,6 +4024,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) > map->def.value_size = info.value_size; > map->def.max_entries = info.max_entries; > map->def.map_flags = info.map_flags; > + map->def.map_extra = info.map_extra; > map->btf_key_type_id = info.btf_key_type_id; > map->btf_value_type_id = info.btf_value_type_id; > map->reused = true; > @@ -4534,7 +4542,8 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) > map_info.key_size == map->def.key_size && > map_info.value_size == map->def.value_size && > map_info.max_entries == map->def.max_entries && > - map_info.map_flags == map->def.map_flags); > + map_info.map_flags == map->def.map_flags && > + map_info.map_extra == map->def.map_extra); > } > > static int > @@ -4631,6 +4640,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b > create_attr.key_size = def->key_size; > create_attr.value_size = def->value_size; > create_attr.numa_node = map->numa_node; > + create_attr.map_extra = def->map_extra; > > if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { > int nr_cpus; > @@ -8637,6 +8647,19 @@ int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) > return 0; > } > > +__u32 bpf_map__map_extra(const struct bpf_map *map) > +{ > + return map->def.map_extra; > +} > + > +int bpf_map__set_map_extra(struct bpf_map *map, __u32 map_extra) > +{ > + if (map->fd >= 0) > + return libbpf_err(-EBUSY); > + map->def.map_extra = map_extra; > + return 0; > +} > + > __u32 bpf_map__numa_node(const struct bpf_map *map) > { > return map->numa_node; > diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h > index 89ca9c83ed4e..55e8dfe6f3e1 100644 > --- a/tools/lib/bpf/libbpf.h > +++ b/tools/lib/bpf/libbpf.h > @@ -486,6 +486,7 @@ struct bpf_map_def { > unsigned int value_size; > unsigned int max_entries; > unsigned int map_flags; > + unsigned int map_extra; > }; > > /** > @@ -562,6 +563,9 @@ LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); > /* get/set map if_index */ > LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); > LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); > +/* get/set map map_extra flags */ > +LIBBPF_API __u32 bpf_map__map_extra(const struct bpf_map *map); > +LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u32 map_extra); > > typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); > LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, > diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map > index f270d25e4af3..308378b3f20b 100644 > --- a/tools/lib/bpf/libbpf.map > +++ b/tools/lib/bpf/libbpf.map > @@ -395,4 +395,6 @@ LIBBPF_0.6.0 { > bpf_object__prev_program; > btf__add_btf; > btf__add_tag; > + bpf_map__map_extra; > + bpf_map__set_map_extra; > } LIBBPF_0.5.0; > diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h > index f7fd3944d46d..188db854d9c2 100644 > --- a/tools/lib/bpf/libbpf_internal.h > +++ b/tools/lib/bpf/libbpf_internal.h > @@ -193,8 +193,9 @@ enum map_def_parts { > MAP_DEF_NUMA_NODE = 0x080, > MAP_DEF_PINNING = 0x100, > MAP_DEF_INNER_MAP = 0x200, > + MAP_DEF_MAP_EXTRA = 0x400, > > - MAP_DEF_ALL = 0x3ff, /* combination of all above */ > + MAP_DEF_ALL = 0x7ff, /* combination of all above */ > }; > > struct btf_map_def { > @@ -208,6 +209,7 @@ struct btf_map_def { > __u32 map_flags; > __u32 numa_node; > __u32 pinning; > + __u32 map_extra; > }; > > int parse_btf_map_def(const char *map_name, struct btf *btf, > -- > 2.30.2 >
On 10/8/21 4:19 PM, Andrii Nakryiko wrote: > On Wed, Oct 6, 2021 at 3:27 PM Joanne Koong <joannekoong@fb.com> wrote: >> This patch adds the libbpf infrastructure for supporting a >> per-map-type "map_extra" field, whose definition will be >> idiosyncratic depending on map type. >> >> For example, for the bitset map, the lower 4 bits of map_extra >> is used to denote the number of hash functions. >> >> Signed-off-by: Joanne Koong <joannekoong@fb.com> >> --- >> include/uapi/linux/bpf.h | 1 + >> tools/include/uapi/linux/bpf.h | 1 + >> tools/lib/bpf/bpf.c | 1 + >> tools/lib/bpf/bpf.h | 1 + >> tools/lib/bpf/bpf_helpers.h | 1 + >> tools/lib/bpf/libbpf.c | 25 ++++++++++++++++++++++++- >> tools/lib/bpf/libbpf.h | 4 ++++ >> tools/lib/bpf/libbpf.map | 2 ++ >> tools/lib/bpf/libbpf_internal.h | 4 +++- >> 9 files changed, 38 insertions(+), 2 deletions(-) >> >> [...] >> >> diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c >> index 7d1741ceaa32..41e3e85e7789 100644 >> --- a/tools/lib/bpf/bpf.c >> +++ b/tools/lib/bpf/bpf.c >> @@ -97,6 +97,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) >> attr.btf_key_type_id = create_attr->btf_key_type_id; >> attr.btf_value_type_id = create_attr->btf_value_type_id; >> attr.map_ifindex = create_attr->map_ifindex; >> + attr.map_extra = create_attr->map_extra; >> if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) >> attr.btf_vmlinux_value_type_id = >> create_attr->btf_vmlinux_value_type_id; >> diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h >> index 6fffb3cdf39b..c4049f2d63cc 100644 >> --- a/tools/lib/bpf/bpf.h >> +++ b/tools/lib/bpf/bpf.h >> @@ -50,6 +50,7 @@ struct bpf_create_map_attr { >> __u32 inner_map_fd; >> __u32 btf_vmlinux_value_type_id; >> }; >> + __u32 map_extra; > this struct is frozen, we can't change it. It's fine to not allow > passing map_extra in libbpf APIs. We have libbpf 1.0 task to revamp > low-level APIs like map creation in a way that will allow good > extensibility. You don't have to worry about that in this patch set. I see! From my understanding, without "map_extra" added to the bpf_create_map_attr struct, it's not possible in the subsequent bloom filter benchmark tests to set the map_extra flag, which means we can't set the number of hash functions. (The entrypoint for propagating the flags to the kernel at map creation time is in the function "bpf_create_map_xattr", which takes in a struct bpf_create_map_attr). 1) To get the benchmark numbers for different # of hash functions, I'll test using a modified version of the code where the map_extra flags gets propagated to the kernel. I'll add a TODO to the benchmarks saying that the specified # of hash functions will get propagated for real once libbpf's map creation supports map_extra. 2) Should I drop this libbpf patch altogether from this patchset, and add it when we do the libbpf 1.0 task to revamp the map creation APIs? Since without extending map creation to include the map_extra, these map_extra libbpf changes don't have much effect right now What are your thoughts? > [...] >> -- >> 2.30.2 >>
On Wed, Oct 20, 2021 at 2:09 PM Joanne Koong <joannekoong@fb.com> wrote: > > On 10/8/21 4:19 PM, Andrii Nakryiko wrote: > > > On Wed, Oct 6, 2021 at 3:27 PM Joanne Koong <joannekoong@fb.com> wrote: > >> This patch adds the libbpf infrastructure for supporting a > >> per-map-type "map_extra" field, whose definition will be > >> idiosyncratic depending on map type. > >> > >> For example, for the bitset map, the lower 4 bits of map_extra > >> is used to denote the number of hash functions. > >> > >> Signed-off-by: Joanne Koong <joannekoong@fb.com> > >> --- > >> include/uapi/linux/bpf.h | 1 + > >> tools/include/uapi/linux/bpf.h | 1 + > >> tools/lib/bpf/bpf.c | 1 + > >> tools/lib/bpf/bpf.h | 1 + > >> tools/lib/bpf/bpf_helpers.h | 1 + > >> tools/lib/bpf/libbpf.c | 25 ++++++++++++++++++++++++- > >> tools/lib/bpf/libbpf.h | 4 ++++ > >> tools/lib/bpf/libbpf.map | 2 ++ > >> tools/lib/bpf/libbpf_internal.h | 4 +++- > >> 9 files changed, 38 insertions(+), 2 deletions(-) > >> > >> [...] > >> > >> diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c > >> index 7d1741ceaa32..41e3e85e7789 100644 > >> --- a/tools/lib/bpf/bpf.c > >> +++ b/tools/lib/bpf/bpf.c > >> @@ -97,6 +97,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) > >> attr.btf_key_type_id = create_attr->btf_key_type_id; > >> attr.btf_value_type_id = create_attr->btf_value_type_id; > >> attr.map_ifindex = create_attr->map_ifindex; > >> + attr.map_extra = create_attr->map_extra; > >> if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) > >> attr.btf_vmlinux_value_type_id = > >> create_attr->btf_vmlinux_value_type_id; > >> diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h > >> index 6fffb3cdf39b..c4049f2d63cc 100644 > >> --- a/tools/lib/bpf/bpf.h > >> +++ b/tools/lib/bpf/bpf.h > >> @@ -50,6 +50,7 @@ struct bpf_create_map_attr { > >> __u32 inner_map_fd; > >> __u32 btf_vmlinux_value_type_id; > >> }; > >> + __u32 map_extra; > > this struct is frozen, we can't change it. It's fine to not allow > > passing map_extra in libbpf APIs. We have libbpf 1.0 task to revamp > > low-level APIs like map creation in a way that will allow good > > extensibility. You don't have to worry about that in this patch set. > I see! From my understanding, without "map_extra" added to the > bpf_create_map_attr struct, it's not possible in the subsequent > bloom filter benchmark tests to set the map_extra flag, which Didn't you add bpf_map__set_map_extra() setter for that? Also one can always do direct bpf syscall (see sys_bpf in tools/lib/bpf/bpf.c), if absolutely necessary. But set_map_extra() setter is the way to go for benchmark, I think. > means we can't set the number of hash functions. (The entrypoint > for propagating the flags to the kernel at map creation time is > in the function "bpf_create_map_xattr", which takes in a > struct bpf_create_map_attr). > > 1) To get the benchmark numbers for different # of hash functions, I'll > test using a modified version of the code where the map_extra flags > gets propagated to the kernel. I'll add a TODO to the benchmarks > saying that the specified # of hash functions will get propagated for real > once libbpf's map creation supports map_extra. > > > 2) Should I drop this libbpf patch altogether from this patchset, and add > it when we do the libbpf 1.0 task to revamp the map creation APIs? Since > without extending map creation to include the map_extra, these map_extra > libbpf changes don't have much effect right now No, getter/setter API is good to have, please keep them. > > What are your thoughts? > > > [...] > >> -- > >> 2.30.2 > >>
On 10/20/21 2:21 PM, Andrii Nakryiko wrote: > On Wed, Oct 20, 2021 at 2:09 PM Joanne Koong <joannekoong@fb.com> wrote: >> On 10/8/21 4:19 PM, Andrii Nakryiko wrote: >> >>> On Wed, Oct 6, 2021 at 3:27 PM Joanne Koong <joannekoong@fb.com> wrote: >>>> This patch adds the libbpf infrastructure for supporting a >>>> per-map-type "map_extra" field, whose definition will be >>>> idiosyncratic depending on map type. >>>> >>>> For example, for the bitset map, the lower 4 bits of map_extra >>>> is used to denote the number of hash functions. >>>> >>>> Signed-off-by: Joanne Koong <joannekoong@fb.com> >>>> --- >>>> include/uapi/linux/bpf.h | 1 + >>>> tools/include/uapi/linux/bpf.h | 1 + >>>> tools/lib/bpf/bpf.c | 1 + >>>> tools/lib/bpf/bpf.h | 1 + >>>> tools/lib/bpf/bpf_helpers.h | 1 + >>>> tools/lib/bpf/libbpf.c | 25 ++++++++++++++++++++++++- >>>> tools/lib/bpf/libbpf.h | 4 ++++ >>>> tools/lib/bpf/libbpf.map | 2 ++ >>>> tools/lib/bpf/libbpf_internal.h | 4 +++- >>>> 9 files changed, 38 insertions(+), 2 deletions(-) >>>> >>>> [...] >>>> >>>> diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c >>>> index 7d1741ceaa32..41e3e85e7789 100644 >>>> --- a/tools/lib/bpf/bpf.c >>>> +++ b/tools/lib/bpf/bpf.c >>>> @@ -97,6 +97,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) >>>> attr.btf_key_type_id = create_attr->btf_key_type_id; >>>> attr.btf_value_type_id = create_attr->btf_value_type_id; >>>> attr.map_ifindex = create_attr->map_ifindex; >>>> + attr.map_extra = create_attr->map_extra; >>>> if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) >>>> attr.btf_vmlinux_value_type_id = >>>> create_attr->btf_vmlinux_value_type_id; >>>> diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h >>>> index 6fffb3cdf39b..c4049f2d63cc 100644 >>>> --- a/tools/lib/bpf/bpf.h >>>> +++ b/tools/lib/bpf/bpf.h >>>> @@ -50,6 +50,7 @@ struct bpf_create_map_attr { >>>> __u32 inner_map_fd; >>>> __u32 btf_vmlinux_value_type_id; >>>> }; >>>> + __u32 map_extra; >>> this struct is frozen, we can't change it. It's fine to not allow >>> passing map_extra in libbpf APIs. We have libbpf 1.0 task to revamp >>> low-level APIs like map creation in a way that will allow good >>> extensibility. You don't have to worry about that in this patch set. >> I see! From my understanding, without "map_extra" added to the >> bpf_create_map_attr struct, it's not possible in the subsequent >> bloom filter benchmark tests to set the map_extra flag, which > Didn't you add bpf_map__set_map_extra() setter for that? Also one can > always do direct bpf syscall (see sys_bpf in tools/lib/bpf/bpf.c), if > absolutely necessary. But set_map_extra() setter is the way to go for > benchmark, I think. bpf_map__set_map_extra() sets the map_extra field for the bpf_map struct, but that field can't get propagated through to the kernel when the BPF_MAP_CREATE syscall is called in bpf_map_create_xattr. This is because bpf_map_create_xattr takes in a "bpf_create_map_attr" struct to instantiate the "bpf_attr" struct it passes to the kernel, but map_extra is not part of "bpf_create_map_attr" struct and can't be added since the struct is frozen. I don't think doing a direct bpf syscall in the userspace program, and then passing the "int bloom_map_fd" to the bpf program through the skeleton works either. This is because in the bpf program, we can't call bpf_map_peek/push since these only take in a "struct bpf_map *", and not an fd. We can't go from fd -> struct bpf_map * either with something like struct fd f = fdget(bloom_map_fd); struct bpf_map *map = __bpf_map_get(f); since both "__bpf_map_get" and "fdget" are not functions bpf programs can call. >> means we can't set the number of hash functions. (The entrypoint >> for propagating the flags to the kernel at map creation time is >> in the function "bpf_create_map_xattr", which takes in a >> struct bpf_create_map_attr). >> >> 1) To get the benchmark numbers for different # of hash functions, I'll >> test using a modified version of the code where the map_extra flags >> gets propagated to the kernel. I'll add a TODO to the benchmarks >> saying that the specified # of hash functions will get propagated for real >> once libbpf's map creation supports map_extra. >> >> >> 2) Should I drop this libbpf patch altogether from this patchset, and add >> it when we do the libbpf 1.0 task to revamp the map creation APIs? Since >> without extending map creation to include the map_extra, these map_extra >> libbpf changes don't have much effect right now > No, getter/setter API is good to have, please keep them. > >>> [...] >>>> -- >>>> 2.30.2 >>>>
On Thu, Oct 21, 2021 at 1:14 PM Joanne Koong <joannekoong@fb.com> wrote: > > On 10/20/21 2:21 PM, Andrii Nakryiko wrote: > > > On Wed, Oct 20, 2021 at 2:09 PM Joanne Koong <joannekoong@fb.com> wrote: > >> On 10/8/21 4:19 PM, Andrii Nakryiko wrote: > >> > >>> On Wed, Oct 6, 2021 at 3:27 PM Joanne Koong <joannekoong@fb.com> wrote: > >>>> This patch adds the libbpf infrastructure for supporting a > >>>> per-map-type "map_extra" field, whose definition will be > >>>> idiosyncratic depending on map type. > >>>> > >>>> For example, for the bitset map, the lower 4 bits of map_extra > >>>> is used to denote the number of hash functions. > >>>> > >>>> Signed-off-by: Joanne Koong <joannekoong@fb.com> > >>>> --- > >>>> include/uapi/linux/bpf.h | 1 + > >>>> tools/include/uapi/linux/bpf.h | 1 + > >>>> tools/lib/bpf/bpf.c | 1 + > >>>> tools/lib/bpf/bpf.h | 1 + > >>>> tools/lib/bpf/bpf_helpers.h | 1 + > >>>> tools/lib/bpf/libbpf.c | 25 ++++++++++++++++++++++++- > >>>> tools/lib/bpf/libbpf.h | 4 ++++ > >>>> tools/lib/bpf/libbpf.map | 2 ++ > >>>> tools/lib/bpf/libbpf_internal.h | 4 +++- > >>>> 9 files changed, 38 insertions(+), 2 deletions(-) > >>>> > >>>> [...] > >>>> > >>>> diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c > >>>> index 7d1741ceaa32..41e3e85e7789 100644 > >>>> --- a/tools/lib/bpf/bpf.c > >>>> +++ b/tools/lib/bpf/bpf.c > >>>> @@ -97,6 +97,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) > >>>> attr.btf_key_type_id = create_attr->btf_key_type_id; > >>>> attr.btf_value_type_id = create_attr->btf_value_type_id; > >>>> attr.map_ifindex = create_attr->map_ifindex; > >>>> + attr.map_extra = create_attr->map_extra; > >>>> if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) > >>>> attr.btf_vmlinux_value_type_id = > >>>> create_attr->btf_vmlinux_value_type_id; > >>>> diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h > >>>> index 6fffb3cdf39b..c4049f2d63cc 100644 > >>>> --- a/tools/lib/bpf/bpf.h > >>>> +++ b/tools/lib/bpf/bpf.h > >>>> @@ -50,6 +50,7 @@ struct bpf_create_map_attr { > >>>> __u32 inner_map_fd; > >>>> __u32 btf_vmlinux_value_type_id; > >>>> }; > >>>> + __u32 map_extra; > >>> this struct is frozen, we can't change it. It's fine to not allow > >>> passing map_extra in libbpf APIs. We have libbpf 1.0 task to revamp > >>> low-level APIs like map creation in a way that will allow good > >>> extensibility. You don't have to worry about that in this patch set. > >> I see! From my understanding, without "map_extra" added to the > >> bpf_create_map_attr struct, it's not possible in the subsequent > >> bloom filter benchmark tests to set the map_extra flag, which > > Didn't you add bpf_map__set_map_extra() setter for that? Also one can > > always do direct bpf syscall (see sys_bpf in tools/lib/bpf/bpf.c), if > > absolutely necessary. But set_map_extra() setter is the way to go for > > benchmark, I think. > bpf_map__set_map_extra() sets the map_extra field for the bpf_map > struct, but that field can't get propagated through to the kernel > when the BPF_MAP_CREATE syscall is called in bpf_map_create_xattr. > This is because bpf_map_create_xattr takes in a "bpf_create_map_attr" > struct to instantiate the "bpf_attr" struct it passes to the kernel, but > map_extra is not part of "bpf_create_map_attr" struct and can't be > added since the struct is frozen. Oh, that's where the problem is. Libbpf internally doesn't have to use bpf_create_map_xattr(). We are going to revamp all these low-level interfaces to be extensible, but until then, I think it will be fine to just create an internal helper that would allow us to create maps without restrictions of maintaining API compatibility. See what we did with libbpf__bpf_prog_load(). > > I don't think doing a direct bpf syscall in the userspace program, > and then passing the "int bloom_map_fd" to the bpf program > through the skeleton works either. This is because in the bpf program, > we can't call bpf_map_peek/push since these only take in a > "struct bpf_map *", and not an fd. We can't go from fd -> struct bpf_map * > either with something like > > struct fd f = fdget(bloom_map_fd); > struct bpf_map *map = __bpf_map_get(f); > > since both "__bpf_map_get" and "fdget" are not functions bpf programs > can call. On BPF side there is no "struct bpf_map", actually. bpf_map_peek() takes just "void *" which will be just a reference to the variable that represents the map (and BPF verifier actually does the right thing during program load, passing correct kernel address of the map). On user-space side, though, user can use bpf_map__reuse_fd() to set everything up, if they create map with their own custom logic. But we are getting too much into the weeds. Let's just copy/paste bpf_map_create_xattr() for now and add map_extra support there. And pretty soon we'll have a nicer set of low-level APIs, at which point we'll switch to using them internally as well. > > > >> means we can't set the number of hash functions. (The entrypoint > >> for propagating the flags to the kernel at map creation time is > >> in the function "bpf_create_map_xattr", which takes in a > >> struct bpf_create_map_attr). > >> > >> 1) To get the benchmark numbers for different # of hash functions, I'll > >> test using a modified version of the code where the map_extra flags > >> gets propagated to the kernel. I'll add a TODO to the benchmarks > >> saying that the specified # of hash functions will get propagated for real > >> once libbpf's map creation supports map_extra. > >> > >> > >> 2) Should I drop this libbpf patch altogether from this patchset, and add > >> it when we do the libbpf 1.0 task to revamp the map creation APIs? Since > >> without extending map creation to include the map_extra, these map_extra > >> libbpf changes don't have much effect right now > > No, getter/setter API is good to have, please keep them. > > > >>> [...] > >>>> -- > >>>> 2.30.2 > >>>>
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b40fa1a72a75..a6f225e9c95a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5639,6 +5639,7 @@ struct bpf_map_info { __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; + __u32 map_extra; } __attribute__((aligned(8))); struct bpf_btf_info { diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index b40fa1a72a75..a6f225e9c95a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5639,6 +5639,7 @@ struct bpf_map_info { __u32 btf_id; __u32 btf_key_type_id; __u32 btf_value_type_id; + __u32 map_extra; } __attribute__((aligned(8))); struct bpf_btf_info { diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 7d1741ceaa32..41e3e85e7789 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -97,6 +97,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) attr.btf_key_type_id = create_attr->btf_key_type_id; attr.btf_value_type_id = create_attr->btf_value_type_id; attr.map_ifindex = create_attr->map_ifindex; + attr.map_extra = create_attr->map_extra; if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS) attr.btf_vmlinux_value_type_id = create_attr->btf_vmlinux_value_type_id; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 6fffb3cdf39b..c4049f2d63cc 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -50,6 +50,7 @@ struct bpf_create_map_attr { __u32 inner_map_fd; __u32 btf_vmlinux_value_type_id; }; + __u32 map_extra; }; LIBBPF_API int diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 963b1060d944..bce5a0090f3f 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -133,6 +133,7 @@ struct bpf_map_def { unsigned int value_size; unsigned int max_entries; unsigned int map_flags; + unsigned int map_extra; }; enum libbpf_pin_type { diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ed313fd491bd..12a9ecd45a78 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2274,6 +2274,10 @@ int parse_btf_map_def(const char *map_name, struct btf *btf, } map_def->pinning = val; map_def->parts |= MAP_DEF_PINNING; + } else if (strcmp(name, "map_extra") == 0) { + if (!get_map_field_int(map_name, btf, m, &map_def->map_extra)) + return -EINVAL; + map_def->parts |= MAP_DEF_MAP_EXTRA; } else { if (strict) { pr_warn("map '%s': unknown field '%s'.\n", map_name, name); @@ -2298,6 +2302,7 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def map->def.value_size = def->value_size; map->def.max_entries = def->max_entries; map->def.map_flags = def->map_flags; + map->def.map_extra = def->map_extra; map->numa_node = def->numa_node; map->btf_key_type_id = def->key_type_id; @@ -2322,6 +2327,8 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); if (def->parts & MAP_DEF_MAP_FLAGS) pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags); + if (def->parts & MAP_DEF_MAP_EXTRA) + pr_debug("map '%s': found map_extra = %u.\n", map->name, def->map_extra); if (def->parts & MAP_DEF_PINNING) pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); if (def->parts & MAP_DEF_NUMA_NODE) @@ -4017,6 +4024,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) map->def.value_size = info.value_size; map->def.max_entries = info.max_entries; map->def.map_flags = info.map_flags; + map->def.map_extra = info.map_extra; map->btf_key_type_id = info.btf_key_type_id; map->btf_value_type_id = info.btf_value_type_id; map->reused = true; @@ -4534,7 +4542,8 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) map_info.key_size == map->def.key_size && map_info.value_size == map->def.value_size && map_info.max_entries == map->def.max_entries && - map_info.map_flags == map->def.map_flags); + map_info.map_flags == map->def.map_flags && + map_info.map_extra == map->def.map_extra); } static int @@ -4631,6 +4640,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.key_size = def->key_size; create_attr.value_size = def->value_size; create_attr.numa_node = map->numa_node; + create_attr.map_extra = def->map_extra; if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { int nr_cpus; @@ -8637,6 +8647,19 @@ int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) return 0; } +__u32 bpf_map__map_extra(const struct bpf_map *map) +{ + return map->def.map_extra; +} + +int bpf_map__set_map_extra(struct bpf_map *map, __u32 map_extra) +{ + if (map->fd >= 0) + return libbpf_err(-EBUSY); + map->def.map_extra = map_extra; + return 0; +} + __u32 bpf_map__numa_node(const struct bpf_map *map) { return map->numa_node; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 89ca9c83ed4e..55e8dfe6f3e1 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -486,6 +486,7 @@ struct bpf_map_def { unsigned int value_size; unsigned int max_entries; unsigned int map_flags; + unsigned int map_extra; }; /** @@ -562,6 +563,9 @@ LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); /* get/set map if_index */ LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); +/* get/set map map_extra flags */ +LIBBPF_API __u32 bpf_map__map_extra(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u32 map_extra); typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index f270d25e4af3..308378b3f20b 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -395,4 +395,6 @@ LIBBPF_0.6.0 { bpf_object__prev_program; btf__add_btf; btf__add_tag; + bpf_map__map_extra; + bpf_map__set_map_extra; } LIBBPF_0.5.0; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index f7fd3944d46d..188db854d9c2 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -193,8 +193,9 @@ enum map_def_parts { MAP_DEF_NUMA_NODE = 0x080, MAP_DEF_PINNING = 0x100, MAP_DEF_INNER_MAP = 0x200, + MAP_DEF_MAP_EXTRA = 0x400, - MAP_DEF_ALL = 0x3ff, /* combination of all above */ + MAP_DEF_ALL = 0x7ff, /* combination of all above */ }; struct btf_map_def { @@ -208,6 +209,7 @@ struct btf_map_def { __u32 map_flags; __u32 numa_node; __u32 pinning; + __u32 map_extra; }; int parse_btf_map_def(const char *map_name, struct btf *btf,
This patch adds the libbpf infrastructure for supporting a per-map-type "map_extra" field, whose definition will be idiosyncratic depending on map type. For example, for the bitset map, the lower 4 bits of map_extra is used to denote the number of hash functions. Signed-off-by: Joanne Koong <joannekoong@fb.com> --- include/uapi/linux/bpf.h | 1 + tools/include/uapi/linux/bpf.h | 1 + tools/lib/bpf/bpf.c | 1 + tools/lib/bpf/bpf.h | 1 + tools/lib/bpf/bpf_helpers.h | 1 + tools/lib/bpf/libbpf.c | 25 ++++++++++++++++++++++++- tools/lib/bpf/libbpf.h | 4 ++++ tools/lib/bpf/libbpf.map | 2 ++ tools/lib/bpf/libbpf_internal.h | 4 +++- 9 files changed, 38 insertions(+), 2 deletions(-)