@@ -2909,6 +2909,8 @@ static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto;
extern const struct bpf_func_proto bpf_cgrp_storage_get_proto;
extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto;
+extern const struct bpf_func_proto bpf_perf_type_kprobe_proto;
+extern const struct bpf_func_proto bpf_perf_type_uprobe_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
@@ -5572,6 +5572,22 @@ struct bpf_stack_build_id {
* 0 on success.
*
* **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * int bpf_perf_type_kprobe(void)
+ * Description
+ * Get perf_kprobe.type
+ * Return
+ * perf_kprobe.type on success.
+ *
+ * **-EOPNOTSUPP** if CONFIG_KPROBE_EVENTS is not set.
+ *
+ * int bpf_perf_type_uprobe(void)
+ * Description
+ * Get perf_uprobe.type
+ * Return
+ * perf_uprobe.type on success.
+ *
+ * **-EOPNOTSUPP** if CONFIG_UPROBE_EVENTS is not set.
*/
#define ___BPF_FUNC_MAPPER(FN, ctx...) \
FN(unspec, 0, ##ctx) \
@@ -5786,6 +5802,8 @@ struct bpf_stack_build_id {
FN(user_ringbuf_drain, 209, ##ctx) \
FN(cgrp_storage_get, 210, ##ctx) \
FN(cgrp_storage_delete, 211, ##ctx) \
+ FN(perf_type_kprobe, 212, ##ctx) \
+ FN(perf_type_uprobe, 213, ##ctx) \
/* */
/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
@@ -2666,6 +2666,8 @@ void bpf_user_rnd_init_once(void)
const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
const struct bpf_func_proto bpf_set_retval_proto __weak;
const struct bpf_func_proto bpf_get_retval_proto __weak;
+const struct bpf_func_proto bpf_perf_type_kprobe__weak;
+const struct bpf_func_proto bpf_perf_type_uprobe__weak;
const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
{
@@ -22,6 +22,7 @@
#include <linux/security.h>
#include <linux/btf_ids.h>
#include <linux/bpf_mem_alloc.h>
+#include <linux/perf_event.h>
#include "../../lib/kstrtox.h"
@@ -1654,6 +1655,28 @@ static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offse
.arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO,
};
+BPF_CALL_0(bpf_perf_type_kprobe)
+{
+ return perf_type_kprobe();
+}
+
+const struct bpf_func_proto bpf_perf_type_kprobe_proto = {
+ .func = bpf_perf_type_kprobe,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+};
+
+BPF_CALL_0(bpf_perf_type_uprobe)
+{
+ return perf_type_uprobe();
+}
+
+const struct bpf_func_proto bpf_perf_type_uprobe_proto = {
+ .func = bpf_perf_type_uprobe,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+};
+
const struct bpf_func_proto bpf_get_current_task_proto __weak;
const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_proto __weak;
@@ -1510,6 +1510,10 @@ static int __init bpf_key_sig_kfuncs_init(void)
return &bpf_find_vma_proto;
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
+ case BPF_FUNC_perf_type_kprobe:
+ return &bpf_perf_type_kprobe_proto;
+ case BPF_FUNC_perf_type_uprobe:
+ return &bpf_perf_type_uprobe_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -5572,6 +5572,22 @@ struct bpf_stack_build_id {
* 0 on success.
*
* **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * int bpf_perf_type_kprobe(void)
+ * Description
+ * Get perf_kprobe.type
+ * Return
+ * perf_kprobe.type on success.
+ *
+ * **-EOPNOTSUPP** if CONFIG_KPROBE_EVENTS is not set.
+ *
+ * int bpf_perf_type_uprobe(void)
+ * Description
+ * Get perf_uprobe.type
+ * Return
+ * perf_uprobe.type on success.
+ *
+ * **-EOPNOTSUPP** if CONFIG_UPROBE_EVENTS is not set.
*/
#define ___BPF_FUNC_MAPPER(FN, ctx...) \
FN(unspec, 0, ##ctx) \
@@ -5786,6 +5802,8 @@ struct bpf_stack_build_id {
FN(user_ringbuf_drain, 209, ##ctx) \
FN(cgrp_storage_get, 210, ##ctx) \
FN(cgrp_storage_delete, 211, ##ctx) \
+ FN(perf_type_kprobe, 212, ##ctx) \
+ FN(perf_type_uprobe, 213, ##ctx) \
/* */
/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
We are utilizing BPF LSM to monitor BPF operations within our container environment. Our goal is to examine the program type and perform the respective audits in our LSM program. When it comes to the perf_event BPF program, there are no specific definitions for the perf types of kprobe or uprobe. In other words, there is no PERF_TYPE_[UK]PROBE. It appears that defining them as UAPI at this stage would be impractical. Therefore, if we wish to determine whether a new BPF program created via perf_event_open() is a kprobe or an uprobe, we need to retrieve the type in userspace by reading /sys/bus/event_source/devices/[uk]probe/type and subsequently store it in global variables within the LSM program. This approach proves to be inconvenient. Two new BPF helpers have been introduced to enhance the functionality. These helpers allow us to directly obtain the perf type of a kprobe or uprobe within a BPF program. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> --- include/linux/bpf.h | 2 ++ include/uapi/linux/bpf.h | 18 ++++++++++++++++++ kernel/bpf/core.c | 2 ++ kernel/bpf/helpers.c | 23 +++++++++++++++++++++++ kernel/trace/bpf_trace.c | 4 ++++ tools/include/uapi/linux/bpf.h | 18 ++++++++++++++++++ 6 files changed, 67 insertions(+)