@@ -3561,8 +3561,24 @@ void bpf_insn_set_adjust(struct bpf_map *map, u32 off, u32 len);
void bpf_insn_set_adjust_after_remove(struct bpf_map *map, u32 off, u32 len);
struct bpf_insn_ptr {
+ void *jitted_ip;
+ u32 jitted_off;
+ u32 jitted_len;
+ int jitted_jump_offset;
+
u32 orig_xlated_off;
u32 xlated_off;
+ bool inverse_ja_or_nop;
};
+void bpf_prog_update_insn_ptr(struct bpf_prog *prog,
+ u32 xlated_off,
+ u32 jitted_off,
+ u32 jitted_len,
+ int jitted_jump_offset,
+ void *jitted_ip);
+
+int bpf_static_key_set(struct bpf_map *map, bool on);
+int bpf_arch_poke_static_branch(struct bpf_insn_ptr *ptr, bool on);
+
#endif /* _LINUX_BPF_H */
@@ -906,6 +906,19 @@ union bpf_iter_link_info {
* A new file descriptor (a nonnegative integer), or -1 if an
* error occurred (in which case, *errno* is set appropriately).
*
+ * BPF_STATIC_KEY_UPDATE
+ * Description
+ * Turn a static key on/off: update jitted code for the specified
+ * jump instructions controlled by the *map_fd* static key.
+ * Depending on the type of instruction (goto_or_nop/nop_or_goto)
+ * and the *on* parameter the binary code of each instruction is
+ * set to either jump or nop.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -961,6 +974,7 @@ enum bpf_cmd {
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
BPF_TOKEN_CREATE,
+ BPF_STATIC_KEY_UPDATE,
__MAX_BPF_CMD,
};
@@ -1853,6 +1867,11 @@ union bpf_attr {
__u32 bpffs_fd;
} token_create;
+ struct { /* struct used by BPF_STATIC_KEY_UPDATE command */
+ __u32 map_fd;
+ __u32 on;
+ } static_key;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -7551,4 +7570,12 @@ enum bpf_kfunc_flags {
BPF_F_PAD_ZEROS = (1ULL << 0),
};
+/*
+ * Flags to control creation of BPF Instruction Sets
+ * - BPF_F_STATIC_KEY: Map will be used as a Static Key.
+ */
+enum bpf_insn_set_flags {
+ BPF_F_STATIC_KEY = (1ULL << 0),
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
@@ -33,7 +33,8 @@ static int insn_set_alloc_check(union bpf_attr *attr)
if (attr->max_entries == 0 ||
attr->key_size != 4 ||
attr->value_size != 4 ||
- attr->map_flags != 0)
+ attr->map_flags != 0 ||
+ attr->map_extra & ~BPF_F_STATIC_KEY)
return -EINVAL;
if (attr->max_entries > MAX_ISET_ENTRIES)
@@ -176,6 +177,30 @@ static inline bool is_frozen(struct bpf_map *map)
return ret;
}
+static bool is_static_key(const struct bpf_map *map)
+{
+ if (map->map_type != BPF_MAP_TYPE_INSN_SET)
+ return false;
+
+ if (!(map->map_extra & BPF_F_STATIC_KEY))
+ return false;
+
+ return true;
+}
+
+static bool is_ja_or_nop(const struct bpf_insn *insn)
+{
+ u8 code = insn->code;
+
+ return (code == (BPF_JMP | BPF_JA) || code == (BPF_JMP32 | BPF_JA)) &&
+ (insn->src_reg & BPF_STATIC_BRANCH_JA);
+}
+
+static bool is_inverse_ja_or_nop(const struct bpf_insn *insn)
+{
+ return insn->src_reg & BPF_STATIC_BRANCH_NOP;
+}
+
static inline bool valid_offsets(const struct bpf_insn_set *insn_set,
const struct bpf_prog *prog)
{
@@ -188,16 +213,17 @@ static inline bool valid_offsets(const struct bpf_insn_set *insn_set,
if (off >= prog->len)
return false;
- if (off > 0) {
- if (prog->insnsi[off-1].code == (BPF_LD | BPF_DW | BPF_IMM))
- return false;
- }
+ if (off > 0 && prog->insnsi[off-1].code == (BPF_LD | BPF_DW | BPF_IMM))
+ return false;
if (i > 0) {
prev_off = insn_set->ptrs[i-1].orig_xlated_off;
if (off <= prev_off)
return false;
}
+
+ if (is_static_key(&insn_set->map) && !is_ja_or_nop(&prog->insnsi[off]))
+ return false;
}
return true;
@@ -206,6 +232,7 @@ static inline bool valid_offsets(const struct bpf_insn_set *insn_set,
int bpf_insn_set_init(struct bpf_map *map, const struct bpf_prog *prog)
{
struct bpf_insn_set *insn_set = cast_insn_set(map);
+ const struct bpf_insn *insn;
int i;
if (!is_frozen(map))
@@ -228,11 +255,16 @@ int bpf_insn_set_init(struct bpf_map *map, const struct bpf_prog *prog)
/*
* Reset all the map indexes to the original values. This is needed,
* e.g., when a replay of verification with different log level should
- * be performed.
+ * be performed
*/
for (i = 0; i < map->max_entries; i++)
insn_set->ptrs[i].xlated_off = insn_set->ptrs[i].orig_xlated_off;
+ for (i = 0; i < map->max_entries; i++) {
+ insn = &prog->insnsi[insn_set->ptrs[i].xlated_off];
+ insn_set->ptrs[i].inverse_ja_or_nop = is_inverse_ja_or_nop(insn);
+ }
+
return 0;
}
@@ -286,3 +318,83 @@ void bpf_insn_set_adjust_after_remove(struct bpf_map *map, u32 off, u32 len)
insn_set->ptrs[i].xlated_off -= len;
}
}
+
+static struct bpf_insn_ptr *insn_ptr_by_offset(struct bpf_prog *prog, u32 xlated_off)
+{
+ struct bpf_insn_set *insn_set;
+ struct bpf_map *map;
+ int i, j;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++) {
+ map = prog->aux->used_maps[i];
+ if (!is_static_key(map))
+ continue;
+
+ insn_set = cast_insn_set(map);
+ for (j = 0; j < map->max_entries; j++) {
+ if (insn_set->ptrs[j].xlated_off == xlated_off)
+ return &insn_set->ptrs[j];
+ }
+ }
+
+ return NULL;
+}
+
+void bpf_prog_update_insn_ptr(struct bpf_prog *prog,
+ u32 xlated_off,
+ u32 jitted_off,
+ u32 jitted_len,
+ int jitted_jump_offset,
+ void *jitted_ip)
+{
+ struct bpf_insn_ptr *ptr;
+
+ ptr = insn_ptr_by_offset(prog, xlated_off);
+ if (ptr) {
+ ptr->jitted_ip = jitted_ip;
+ ptr->jitted_off = jitted_off;
+ ptr->jitted_len = jitted_len;
+ ptr->jitted_jump_offset = jitted_jump_offset;
+ }
+}
+
+static int check_state(struct bpf_insn_set *insn_set)
+{
+ int ret = 0;
+
+ mutex_lock(&insn_set->state_mutex);
+ if (insn_set->state == INSN_SET_STATE_FREE)
+ ret = -EINVAL;
+ if (insn_set->state == INSN_SET_STATE_INIT)
+ ret = -EBUSY;
+ mutex_unlock(&insn_set->state_mutex);
+
+ return ret;
+}
+
+int bpf_static_key_set(struct bpf_map *map, bool on)
+{
+ struct bpf_insn_set *insn_set = cast_insn_set(map);
+ struct bpf_insn_ptr *ptr;
+ int ret = 0;
+ int i;
+
+ if (!is_static_key(map))
+ return -EINVAL;
+
+ ret = check_state(insn_set);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < map->max_entries && ret == 0; i++) {
+ ptr = &insn_set->ptrs[i];
+ if (ptr->xlated_off == INSN_DELETED)
+ continue;
+
+ ret = bpf_arch_poke_static_branch(ptr, on ^ ptr->inverse_ja_or_nop);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
@@ -3173,6 +3173,11 @@ static int __init bpf_global_ma_init(void)
late_initcall(bpf_global_ma_init);
#endif
+int __weak bpf_arch_poke_static_branch(struct bpf_insn_ptr *ptr, bool on)
+{
+ return -EOPNOTSUPP;
+}
+
DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
EXPORT_SYMBOL(bpf_stats_enabled_key);
@@ -1346,6 +1346,7 @@ static int map_create(union bpf_attr *attr, bool kernel)
if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
attr->map_type != BPF_MAP_TYPE_ARENA &&
+ attr->map_type != BPF_MAP_TYPE_INSN_SET &&
attr->map_extra != 0)
return -EINVAL;
@@ -1691,6 +1692,29 @@ static int map_lookup_elem(union bpf_attr *attr)
return err;
}
+#define BPF_STATIC_KEY_UPDATE_LAST_FIELD static_key.on
+
+static int bpf_static_key_update(const union bpf_attr *attr)
+{
+ bool on = attr->static_key.on & 1;
+ struct bpf_map *map;
+ int ret;
+
+ if (CHECK_ATTR(BPF_STATIC_KEY_UPDATE))
+ return -EINVAL;
+
+ if (attr->static_key.on & ~1)
+ return -EINVAL;
+
+ map = bpf_map_get(attr->static_key.map_fd);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ ret = bpf_static_key_set(map, on);
+
+ bpf_map_put(map);
+ return ret;
+}
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
@@ -5908,6 +5932,9 @@ static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
case BPF_TOKEN_CREATE:
err = token_create(&attr);
break;
+ case BPF_STATIC_KEY_UPDATE:
+ err = bpf_static_key_update(&attr);
+ break;
default:
err = -EINVAL;
break;
@@ -906,6 +906,19 @@ union bpf_iter_link_info {
* A new file descriptor (a nonnegative integer), or -1 if an
* error occurred (in which case, *errno* is set appropriately).
*
+ * BPF_STATIC_KEY_UPDATE
+ * Description
+ * Turn a static key on/off: update jitted code for the specified
+ * jump instructions controlled by the *map_fd* static key.
+ * Depending on the type of instruction (goto_or_nop/nop_or_goto)
+ * and the *on* parameter the binary code of each instruction is
+ * set to either jump or nop.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ *
* NOTES
* eBPF objects (maps and programs) can be shared between processes.
*
@@ -961,6 +974,7 @@ enum bpf_cmd {
BPF_LINK_DETACH,
BPF_PROG_BIND_MAP,
BPF_TOKEN_CREATE,
+ BPF_STATIC_KEY_UPDATE,
__MAX_BPF_CMD,
};
@@ -1853,6 +1867,11 @@ union bpf_attr {
__u32 bpffs_fd;
} token_create;
+ struct { /* struct used by BPF_STATIC_KEY_UPDATE command */
+ __u32 map_fd;
+ __u32 on;
+ } static_key;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -7551,4 +7570,12 @@ enum bpf_kfunc_flags {
BPF_F_PAD_ZEROS = (1ULL << 0),
};
+/*
+ * Flags to control creation of BPF Instruction Sets
+ * - BPF_F_STATIC_KEY: Map will be used as a Static Key.
+ */
+enum bpf_insn_set_flags {
+ BPF_F_STATIC_KEY = (1ULL << 0),
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
Add a new bpf system call, BPF_STATIC_KEY_UPDATE, which allows users to update static keys in BPF. Namely, this system call is executed as bpf(BPF_STATIC_KEY_UPDATE, attrs={map_fd, on}) where map_fd is a BPF static key, i.e., a map of type BPF_MAP_TYPE_INSN_SET which points to one or more goto_or_nop/nop_or_goto instructions. The "on" parameter is a boolean value to set this key on or off. if it is true/false, then goto_or_nop/nop_or_goto instructions controlled by the key are jitted to jump/nop, correspondingly. To implement this for a particular architecture, re-define the weak bpf_arch_poke_static_branch() function in the corresponding bpf_jit_comp.c Signed-off-by: Anton Protopopov <aspsk@isovalent.com> --- include/linux/bpf.h | 16 +++++ include/uapi/linux/bpf.h | 27 +++++++ kernel/bpf/bpf_insn_set.c | 124 +++++++++++++++++++++++++++++++-- kernel/bpf/core.c | 5 ++ kernel/bpf/syscall.c | 27 +++++++ tools/include/uapi/linux/bpf.h | 27 +++++++ 6 files changed, 220 insertions(+), 6 deletions(-)