new file mode 100644
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM64_CFI_H
+#define _ASM_ARM64_CFI_H
+
+#ifdef CONFIG_CFI_CLANG
+#define __bpfcall
+static inline int cfi_get_offset(void)
+{
+ return 4;
+}
+#define cfi_get_offset cfi_get_offset
+extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
+extern u32 cfi_get_func_hash(void *func);
+#else
+#define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
+static inline u32 cfi_get_func_hash(void *func)
+{
+ return 0;
+}
+#endif /* CONFIG_CFI_CLANG */
+#endif /* _ASM_ARM64_CFI_H */
@@ -8,11 +8,13 @@
#define pr_fmt(fmt) "alternatives: " fmt
+#include <linux/cfi_types.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elf.h>
#include <asm/cacheflush.h>
#include <asm/alternative.h>
+#include <asm/cfi.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/module.h>
@@ -298,3 +300,19 @@ noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
}
EXPORT_SYMBOL(alt_cb_patch_nops);
+
+#ifdef CONFIG_CFI_CLANG
+struct bpf_insn;
+/* Must match bpf_func_t / DEFINE_BPF_PROG_RUN() */
+extern unsigned int __bpf_prog_runX(const void *ctx,
+ const struct bpf_insn *insn);
+DEFINE_CFI_TYPE(cfi_bpf_hash, __bpf_prog_runX);
+/* Must match bpf_callback_t */
+extern u64 __bpf_callback_fn(u64, u64, u64, u64, u64);
+DEFINE_CFI_TYPE(cfi_bpf_subprog_hash, __bpf_callback_fn);
+u32 cfi_get_func_hash(void *func)
+{
+ u32 *hashp = func - cfi_get_offset();
+ return READ_ONCE(*hashp);
+}
+#endif
@@ -17,6 +17,7 @@
#include <asm/asm-extable.h>
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
+#include <asm/cfi.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/patching.h>
@@ -162,6 +163,12 @@ static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
emit(insn, ctx);
}
+static inline void emit_kcfi(u32 hash, struct jit_ctx *ctx)
+{
+ if (IS_ENABLED(CONFIG_CFI_CLANG))
+ emit(hash, ctx);
+}
+
/*
* Kernel addresses in the vmalloc space use at most 48 bits, and the
* remaining bits are guaranteed to be 0x1. So we can compose the address
@@ -311,7 +318,6 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
const u8 tcc = bpf2a64[TCALL_CNT];
const u8 fpb = bpf2a64[FP_BOTTOM];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
- const int idx0 = ctx->idx;
int cur_offset;
/*
@@ -337,6 +343,9 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf,
*
*/
+ emit_kcfi(is_main_prog ? cfi_bpf_hash : cfi_bpf_subprog_hash, ctx);
+ const int idx0 = ctx->idx;
+
/* bpf function may be invoked by 3 instruction types:
* 1. bl, attached via freplace to bpf prog via short jump
* 2. br, attached via freplace to bpf prog via long jump
@@ -1849,9 +1858,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
jit_data->ro_header = ro_header;
}
- prog->bpf_func = (void *)ctx.ro_image;
+ prog->bpf_func = (void *)ctx.ro_image + cfi_get_offset();
prog->jited = 1;
- prog->jited_len = prog_size;
+ prog->jited_len = prog_size - cfi_get_offset();
if (!prog->is_func || extra_pass) {
int i;
@@ -2104,6 +2113,12 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
/* return address locates above FP */
retaddr_off = stack_size + 8;
+ if (flags & BPF_TRAMP_F_INDIRECT) {
+ /*
+ * Indirect call for bpf_struct_ops
+ */
+ emit_kcfi(cfi_get_func_hash(func_addr), ctx);
+ }
/* bpf trampoline may be invoked by 3 instruction types:
* 1. bl, attached to bpf prog or kernel function via short jump
* 2. br, attached to bpf prog or kernel function via long jump