@@ -4361,7 +4361,6 @@ static int probe_kern_prog_name(void)
static int probe_kern_global_data(void)
{
- struct bpf_create_map_attr map_attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
@@ -4371,13 +4370,7 @@ static int probe_kern_global_data(void)
};
int ret, map, insn_cnt = ARRAY_SIZE(insns);
- memset(&map_attr, 0, sizeof(map_attr));
- map_attr.map_type = BPF_MAP_TYPE_ARRAY;
- map_attr.key_size = sizeof(int);
- map_attr.value_size = 32;
- map_attr.max_entries = 1;
-
- map = bpf_create_map_xattr(&map_attr);
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
if (map < 0) {
ret = -errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -4507,15 +4500,11 @@ static int probe_kern_btf_type_tag(void)
static int probe_kern_array_mmap(void)
{
- struct bpf_create_map_attr attr = {
- .map_type = BPF_MAP_TYPE_ARRAY,
- .map_flags = BPF_F_MMAPABLE,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 1,
- };
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
+ int fd;
- return probe_fd(bpf_create_map_xattr(&attr));
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), sizeof(int), 1, &opts);
+ return probe_fd(fd);
}
static int probe_kern_exp_attach_type(void)
@@ -4554,7 +4543,6 @@ static int probe_kern_probe_read_kernel(void)
static int probe_prog_bind_map(void)
{
- struct bpf_create_map_attr map_attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -4562,13 +4550,7 @@ static int probe_prog_bind_map(void)
};
int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
- memset(&map_attr, 0, sizeof(map_attr));
- map_attr.map_type = BPF_MAP_TYPE_ARRAY;
- map_attr.key_size = sizeof(int);
- map_attr.value_size = 32;
- map_attr.max_entries = 1;
-
- map = bpf_create_map_xattr(&map_attr);
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 32, 1, NULL);
if (map < 0) {
ret = -errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -201,7 +201,6 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
{
int key_size, value_size, max_entries, map_flags;
__u32 btf_key_type_id = 0, btf_value_type_id = 0;
- struct bpf_create_map_attr attr = {};
int fd = -1, btf_fd = -1, fd_inner;
key_size = sizeof(__u32);
@@ -271,34 +270,35 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
+
/* TODO: probe for device, once libbpf has a function to create
* map-in-map for offload
*/
if (ifindex)
return false;
- fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
- sizeof(__u32), sizeof(__u32), 1, 0);
+ fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
+ sizeof(__u32), sizeof(__u32), 1, NULL);
if (fd_inner < 0)
return false;
- fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
- fd_inner, 1, 0);
+
+ opts.inner_map_fd = fd_inner;
+ fd = bpf_map_create(map_type, NULL, sizeof(__u32), sizeof(__u32), 1, &opts);
close(fd_inner);
} else {
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
+
/* Note: No other restriction on map type probes for offload */
- attr.map_type = map_type;
- attr.key_size = key_size;
- attr.value_size = value_size;
- attr.max_entries = max_entries;
- attr.map_flags = map_flags;
- attr.map_ifindex = ifindex;
+ opts.map_flags = map_flags;
+ opts.map_ifindex = ifindex;
if (btf_fd >= 0) {
- attr.btf_fd = btf_fd;
- attr.btf_key_type_id = btf_key_type_id;
- attr.btf_value_type_id = btf_value_type_id;
+ opts.btf_fd = btf_fd;
+ opts.btf_key_type_id = btf_key_type_id;
+ opts.btf_value_type_id = btf_value_type_id;
}
- fd = bpf_create_map_xattr(&attr);
+ fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
}
if (fd >= 0)
close(fd);
@@ -65,8 +65,7 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
int map_fd = -1, prog_fd = -1, key = 0, err;
union bpf_attr attr;
- map_fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "__loader.map", 4,
- opts->data_sz, 1, 0);
+ map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1, NULL);
if (map_fd < 0) {
opts->errstr = "failed to create loader map";
err = -errno;
@@ -364,7 +364,6 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
static enum xsk_prog get_xsk_prog(void)
{
enum xsk_prog detected = XSK_PROG_FALLBACK;
- struct bpf_create_map_attr map_attr;
__u32 size_out, retval, duration;
char data_in = 0, data_out;
struct bpf_insn insns[] = {
@@ -376,13 +375,7 @@ static enum xsk_prog get_xsk_prog(void)
};
int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns);
- memset(&map_attr, 0, sizeof(map_attr));
- map_attr.map_type = BPF_MAP_TYPE_XSKMAP;
- map_attr.key_size = sizeof(int);
- map_attr.value_size = sizeof(int);
- map_attr.max_entries = 1;
-
- map_fd = bpf_create_map_xattr(&map_attr);
+ map_fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, NULL, sizeof(int), sizeof(int), 1, NULL);
if (map_fd < 0)
return detected;
@@ -586,8 +579,8 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk)
if (max_queues < 0)
return max_queues;
- fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
- sizeof(int), sizeof(int), max_queues, 0);
+ fd = bpf_map_create(BPF_MAP_TYPE_XSKMAP, "xsks_map",
+ sizeof(int), sizeof(int), max_queues, NULL);
if (fd < 0)
return fd;
Remove all the remaining uses of to-be-deprecated bpf_create_map*() APIs. Signed-off-by: Andrii Nakryiko <andrii@kernel.org> --- tools/lib/bpf/libbpf.c | 30 ++++++------------------------ tools/lib/bpf/libbpf_probes.c | 30 +++++++++++++++--------------- tools/lib/bpf/skel_internal.h | 3 +-- tools/lib/bpf/xsk.c | 13 +++---------- 4 files changed, 25 insertions(+), 51 deletions(-)