diff mbox series

[RFC,8/9] selftests: sync bpf.h

Message ID 20211014143436.54470-12-lmb@cloudflare.com (mailing list archive)
State RFC
Headers show
Series uapi/bpf.h for robots | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch
bpf/vmtest-bpf fail VM_Test
bpf/vmtest-bpf-PR fail PR summary
bpf/vmtest-bpf-next fail VM_Test
bpf/vmtest-bpf-next-PR fail PR summary

Commit Message

Lorenz Bauer Oct. 14, 2021, 2:34 p.m. UTC
---
 tools/include/uapi/linux/bpf.h | 200 ++++++++++++++++++++++-----------
 1 file changed, 134 insertions(+), 66 deletions(-)
diff mbox series

Patch

diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 6fc59d61937a..d3acd12d98c1 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -49,8 +49,14 @@ 
 #define BPF_XCHG	(0xe0 | BPF_FETCH)	/* atomic exchange */
 #define BPF_CMPXCHG	(0xf0 | BPF_FETCH)	/* atomic compare-and-write */
 
+#define __bpf_md_ptr(type, name)	\
+union {					\
+	type name;			\
+	__u64 name##_u64;		\
+} __attribute__((aligned(8)))
+
 /* Register numbers */
-enum {
+enum bpf_reg {
 	BPF_REG_0 = 0,
 	BPF_REG_1,
 	BPF_REG_2,
@@ -1056,16 +1062,19 @@  enum bpf_link_type {
  * All eligible programs are executed regardless of return code from
  * earlier programs.
  */
-#define BPF_F_ALLOW_OVERRIDE	(1U << 0)
-#define BPF_F_ALLOW_MULTI	(1U << 1)
-#define BPF_F_REPLACE		(1U << 2)
+enum bpf_prog_attach_flag {
+	BPF_F_ALLOW_OVERRIDE	= (1U << 0),
+	BPF_F_ALLOW_MULTI	= (1U << 1),
+	BPF_F_REPLACE		= (1U << 2),
+};
 
+enum bpf_prog_load_flag {
 /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
  * verifier will perform strict alignment checking as if the kernel
  * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
  * and NET_IP_ALIGN defined to 2.
  */
-#define BPF_F_STRICT_ALIGNMENT	(1U << 0)
+	BPF_F_STRICT_ALIGNMENT = (1U << 0),
 
 /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
  * verifier will allow any alignment whatsoever.  On platforms
@@ -1079,7 +1088,7 @@  enum bpf_link_type {
  * of an unaligned access the alignment check would trigger before
  * the one we are interested in.
  */
-#define BPF_F_ANY_ALIGNMENT	(1U << 1)
+	BPF_F_ANY_ALIGNMENT = (1U << 1),
 
 /* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose.
  * Verifier does sub-register def/use analysis and identifies instructions whose
@@ -1097,10 +1106,10 @@  enum bpf_link_type {
  * Then, if verifier is not doing correct analysis, such randomization will
  * regress tests to expose bugs.
  */
-#define BPF_F_TEST_RND_HI32	(1U << 2)
+	BPF_F_TEST_RND_HI32 = (1U << 2),
 
 /* The verifier internal test flag. Behavior is undefined */
-#define BPF_F_TEST_STATE_FREQ	(1U << 3)
+	BPF_F_TEST_STATE_FREQ = (1U << 3),
 
 /* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
  * restrict map and helper usage for such programs. Sleepable BPF programs can
@@ -1108,8 +1117,10 @@  enum bpf_link_type {
  * Such programs are allowed to use helpers that may sleep like
  * bpf_copy_from_user().
  */
-#define BPF_F_SLEEPABLE		(1U << 4)
+	BPF_F_SLEEPABLE = (1U << 4),
+};
 
+enum bpf_pseudo_src_reg {
 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have
  * the following extensions:
  *
@@ -1121,8 +1132,8 @@  enum bpf_link_type {
  * ldimm64 rewrite:  address of map
  * verifier type:    CONST_PTR_TO_MAP
  */
-#define BPF_PSEUDO_MAP_FD	1
-#define BPF_PSEUDO_MAP_IDX	5
+	BPF_PSEUDO_MAP_FD  = 1,
+	BPF_PSEUDO_MAP_IDX = 5,
 
 /* insn[0].src_reg:  BPF_PSEUDO_MAP_[IDX_]VALUE
  * insn[0].imm:      map fd or fd_idx
@@ -1132,8 +1143,8 @@  enum bpf_link_type {
  * ldimm64 rewrite:  address of map[0]+offset
  * verifier type:    PTR_TO_MAP_VALUE
  */
-#define BPF_PSEUDO_MAP_VALUE		2
-#define BPF_PSEUDO_MAP_IDX_VALUE	6
+	BPF_PSEUDO_MAP_VALUE     = 2,
+	BPF_PSEUDO_MAP_IDX_VALUE = 6,
 
 /* insn[0].src_reg:  BPF_PSEUDO_BTF_ID
  * insn[0].imm:      kernel btd id of VAR
@@ -1144,7 +1155,7 @@  enum bpf_link_type {
  * verifier type:    PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
  *                   is struct/union.
  */
-#define BPF_PSEUDO_BTF_ID	3
+	BPF_PSEUDO_BTF_ID = 3,
 /* insn[0].src_reg:  BPF_PSEUDO_FUNC
  * insn[0].imm:      insn offset to the func
  * insn[1].imm:      0
@@ -1153,19 +1164,20 @@  enum bpf_link_type {
  * ldimm64 rewrite:  address of the function
  * verifier type:    PTR_TO_FUNC.
  */
-#define BPF_PSEUDO_FUNC		4
+	BPF_PSEUDO_FUNC = 4,
 
 /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
  * offset to another bpf function
  */
-#define BPF_PSEUDO_CALL		1
+	BPF_PSEUDO_CALL = 1,
 /* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL,
  * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel
  */
-#define BPF_PSEUDO_KFUNC_CALL	2
+	BPF_PSEUDO_KFUNC_CALL = 2,
+};
 
 /* flags for BPF_MAP_UPDATE_ELEM command */
-enum {
+enum bpf_map_update_elem_flag {
 	BPF_ANY		= 0, /* create new element or update existing */
 	BPF_NOEXIST	= 1, /* create new element if it didn't exist */
 	BPF_EXIST	= 2, /* update existing element */
@@ -1173,7 +1185,7 @@  enum {
 };
 
 /* flags for BPF_MAP_CREATE command */
-enum {
+enum bpf_map_create_flag {
 	BPF_F_NO_PREALLOC	= (1U << 0),
 /* Instead of having one common LRU list in the
  * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
@@ -1213,17 +1225,19 @@  enum {
 };
 
 /* Flags for BPF_PROG_QUERY. */
-
+enum bpf_prog_query_flag {
 /* Query effective (directly attached + inherited from ancestor cgroups)
  * programs that will be executed for events within a cgroup.
  * attach_flags with this flag are returned only for directly attached programs.
  */
-#define BPF_F_QUERY_EFFECTIVE	(1U << 0)
-
-/* Flags for BPF_PROG_TEST_RUN */
+	BPF_F_QUERY_EFFECTIVE = (1U << 0),
+};
 
+/* Flags for BPF_PROG_RUN */
+enum bpf_prog_run_flag {
 /* If set, run the test on the cpu specified by bpf_attr.test.cpu */
-#define BPF_F_TEST_RUN_ON_CPU	(1U << 0)
+	BPF_F_TEST_RUN_ON_CPU = (1U << 0),
+};
 
 /* type for BPF_ENABLE_STATS */
 enum bpf_stats_type {
@@ -1240,7 +1254,10 @@  enum bpf_stack_build_id_status {
 	BPF_STACK_BUILD_ID_IP = 2,
 };
 
-#define BPF_BUILD_ID_SIZE 20
+enum {
+	BPF_BUILD_ID_SIZE = 20,
+};
+
 struct bpf_stack_build_id {
 	__s32		status;
 	unsigned char	build_id[BPF_BUILD_ID_SIZE];
@@ -1250,41 +1267,68 @@  struct bpf_stack_build_id {
 	};
 };
 
-#define BPF_OBJ_NAME_LEN 16U
+enum {
+	BPF_OBJ_NAME_LEN = 16U,
+};
+
+struct bpf_map_create_attr {
+	__u32	map_type;	/* one of enum bpf_map_type */
+	__u32	key_size;	/* size of key in bytes */
+	__u32	value_size;	/* size of value in bytes */
+	__u32	max_entries;	/* max number of entries in a map */
+	__u32	map_flags;	/* BPF_MAP_CREATE related
+				 * flags defined above.
+				 */
+	__u32	inner_map_fd;	/* fd pointing to the inner map */
+	__u32	numa_node;	/* numa node (effective only if
+				 * BPF_F_NUMA_NODE is set).
+				 */
+	char	map_name[BPF_OBJ_NAME_LEN];
+	__u32	map_ifindex;	/* ifindex of netdev to create on */
+	__u32	btf_fd;		/* fd pointing to a BTF type data */
+	__u32	btf_key_type_id;	/* BTF type_id of the key */
+	__u32	btf_value_type_id;	/* BTF type_id of the value */
+	__u32	btf_vmlinux_value_type_id;	/* BTF type_id of a kernel-
+						 * struct stored as the
+						 * map value
+						 */
+};
+
+struct bpf_map_lookup_elem_attr {
+	__u32 map_fd;
+	__bpf_md_ptr(const void *, key);
+	__bpf_md_ptr(void *, value);
+	__u64 flags;
+};
+
+struct bpf_map_update_elem_attr {
+	__u32 map_fd;
+	__bpf_md_ptr(const void *, key);
+	__bpf_md_ptr(void *, value);
+	__u64 flags;
+};
+
+struct bpf_map_delete_elem_attr {
+	__u32 map_fd;
+	__bpf_md_ptr(const void *, key);
+};
+
+struct bpf_map_get_next_key_attr {
+	__u32 map_fd;
+	__bpf_md_ptr(const void *, key);
+	__bpf_md_ptr(void *, next_key);
+};
 
 union bpf_attr {
-	struct { /* anonymous struct used by BPF_MAP_CREATE command */
-		__u32	map_type;	/* one of enum bpf_map_type */
-		__u32	key_size;	/* size of key in bytes */
-		__u32	value_size;	/* size of value in bytes */
-		__u32	max_entries;	/* max number of entries in a map */
-		__u32	map_flags;	/* BPF_MAP_CREATE related
-					 * flags defined above.
-					 */
-		__u32	inner_map_fd;	/* fd pointing to the inner map */
-		__u32	numa_node;	/* numa node (effective only if
-					 * BPF_F_NUMA_NODE is set).
-					 */
-		char	map_name[BPF_OBJ_NAME_LEN];
-		__u32	map_ifindex;	/* ifindex of netdev to create on */
-		__u32	btf_fd;		/* fd pointing to a BTF type data */
-		__u32	btf_key_type_id;	/* BTF type_id of the key */
-		__u32	btf_value_type_id;	/* BTF type_id of the value */
-		__u32	btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
-						   * struct stored as the
-						   * map value
-						   */
-	};
+	struct bpf_map_create_attr map_create;
 
-	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
-		__u32		map_fd;
-		__aligned_u64	key;
-		union {
-			__aligned_u64 value;
-			__aligned_u64 next_key;
-		};
-		__u64		flags;
-	};
+	struct bpf_map_lookup_elem_attr map_lookup_elem;
+
+	struct bpf_map_update_elem_attr map_update_elem;
+
+	struct bpf_map_delete_elem_attr map_delete_elem;
+
+	struct bpf_map_get_next_key_attr map_get_next_key;
 
 	struct { /* struct used by BPF_MAP_*_BATCH commands */
 		__aligned_u64	in_batch;	/* start batch,
@@ -1487,6 +1531,32 @@  union bpf_attr {
 		__u32		flags;		/* extra flags */
 	} prog_bind_map;
 
+	/* DEPRECATED: these are kept for compatibility purposes. */
+	struct { /* anonymous struct used by BPF_MAP_CREATE command */
+		__u32	map_type;
+		__u32	key_size;
+		__u32	value_size;
+		__u32	max_entries;
+		__u32	map_flags;
+		__u32	inner_map_fd;
+		__u32	numa_node;
+		char	map_name[BPF_OBJ_NAME_LEN];
+		__u32	map_ifindex;
+		__u32	btf_fd;
+		__u32	btf_key_type_id;
+		__u32	btf_value_type_id;
+		__u32	btf_vmlinux_value_type_id;
+	};
+
+	struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
+		__u32		map_fd;
+		__aligned_u64	key;
+		union {
+			__aligned_u64 value;
+			__aligned_u64 next_key;
+		};
+		__u64		flags;
+	};
 } __attribute__((aligned(8)));
 
 /* The description below is an attempt at providing documentation to eBPF
@@ -5230,7 +5300,7 @@  enum {
 };
 
 /* BPF ring buffer constants */
-enum {
+enum bpf_ringbuf_const {
 	BPF_RINGBUF_BUSY_BIT		= (1U << 31),
 	BPF_RINGBUF_DISCARD_BIT		= (1U << 30),
 	BPF_RINGBUF_HDR_SZ		= 8,
@@ -5272,12 +5342,6 @@  enum {
 	BPF_F_EXCLUDE_INGRESS	= (1ULL << 4),
 };
 
-#define __bpf_md_ptr(type, name)	\
-union {					\
-	type name;			\
-	__u64 :64;			\
-} __attribute__((aligned(8)))
-
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
  */
@@ -5456,7 +5520,9 @@  struct bpf_xdp_sock {
 	__u32 queue_id;
 };
 
-#define XDP_PACKET_HEADROOM 256
+enum {
+	XDP_PACKET_HEADROOM = 256,
+};
 
 /* User return codes for XDP prog type.
  * A valid XDP program must return one of these defined values. All other
@@ -5574,7 +5640,9 @@  struct sk_reuseport_md {
 	__bpf_md_ptr(struct bpf_sock *, migrating_sk);
 };
 
-#define BPF_TAG_SIZE	8
+enum {
+	BPF_TAG_SIZE = 8,
+};
 
 struct bpf_prog_info {
 	__u32 type;