@@ -99,4 +99,17 @@ config BPF_LSM
If you are unsure how to answer this question, answer N.
+config BPF_PROG_KASAN
+ bool "Enable BPF Program Address Sanitize"
+ depends on BPF_JIT_ALWAYS_ON
+ depends on KASAN
+ help
+ Enables instrumentation on LDX/STX/ST insn to capture memory
+ access errors in BPF programs missed by the verifier.
+
+ The actual check is conducted by KASAN, this feature presents
+ certain overhead, and should be used mainly by testing purpose.
+
+ If you are unsure how to answer this question, answer N.
+
endmenu # "BPF subsystem"
@@ -15327,6 +15327,25 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return 0;
}
+#ifdef CONFIG_BPF_PROG_KASAN
+
+/* Those are functions instrumented with KASAN for actual sanitizing. */
+
+#define BPF_ASAN_STORE(n) \
+ notrace u64 bpf_asan_store##n(u##n *addr) \
+ { \
+ u##n ret = *addr; \
+ *addr = ret; \
+ return ret; \
+ }
+
+BPF_ASAN_STORE(8);
+BPF_ASAN_STORE(16);
+BPF_ASAN_STORE(32);
+BPF_ASAN_STORE(64);
+
+#endif
+
/* Do various post-verification rewrites in a single program pass.
* These rewrites simplify JIT and interpreter implementations.
*/
@@ -15340,7 +15359,12 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
const int insn_cnt = prog->len;
const struct bpf_map_ops *ops;
struct bpf_insn_aux_data *aux;
+#ifndef CONFIG_BPF_PROG_KASAN
struct bpf_insn insn_buf[16];
+#else
+ struct bpf_insn insn_buf[32];
+ bool in_patch_use_ax = false;
+#endif
struct bpf_prog *new_prog;
struct bpf_map *map_ptr;
int i, ret, cnt, delta = 0;
@@ -15460,6 +15484,112 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
continue;
}
+#ifdef CONFIG_BPF_PROG_KASAN
+
+/* With CONFIG_BPF_PROG_KASAN, we extend prog stack to MAX_BPF_STACK + 64
+ * to backup scratch regs before calling the sanitize functions, because
+ * we don't rely on verifier's knowledge about calculated stack size or
+ * liveness of each reg.
+ */
+#define __BACKUP_REG(n) \
+ *patch++ = BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_##n, -(MAX_BPF_STACK + 8 * n))
+#define BACKUP_SCRATCH_REGS \
+ __BACKUP_REG(1); \
+ __BACKUP_REG(2); \
+ __BACKUP_REG(3); \
+ __BACKUP_REG(4); \
+ __BACKUP_REG(5)
+
+#define __RESTORE_REG(n) \
+ *patch++ = BPF_LDX_MEM(BPF_DW, BPF_REG_##n, BPF_REG_10, -(MAX_BPF_STACK + 8 * n))
+#define RESTORE_SCRATCH_REGS \
+ __RESTORE_REG(1); \
+ __RESTORE_REG(2); \
+ __RESTORE_REG(3); \
+ __RESTORE_REG(4); \
+ __RESTORE_REG(5)
+
+ /* Patches that use REG_AX confilict with us, skip it.
+ * This starts with first use of REG_AX, stops only when
+ * we see next ldx/stx/st insn with valid aux information.
+ */
+ aux = &env->insn_aux_data[i + delta];
+ if (in_patch_use_ax && (int)aux->ptr_type != 0)
+ in_patch_use_ax = false;
+ if (insn->dst_reg == BPF_REG_AX || insn->src_reg == BPF_REG_AX)
+ in_patch_use_ax = true;
+
+ /* Sanitize ST/STX operation. */
+ if (BPF_CLASS(insn->code) == BPF_ST ||
+ BPF_CLASS(insn->code) == BPF_STX) {
+ struct bpf_insn sanitize_fn;
+ struct bpf_insn *patch = &insn_buf[0];
+
+ /* Skip st/stx to R10, they're trivial to check. */
+ if (in_patch_use_ax || insn->dst_reg == BPF_REG_10 ||
+ BPF_MODE(insn->code) == BPF_NOSPEC)
+ continue;
+
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ sanitize_fn = BPF_EMIT_CALL(bpf_asan_store8);
+ break;
+ case BPF_H:
+ sanitize_fn = BPF_EMIT_CALL(bpf_asan_store16);
+ break;
+ case BPF_W:
+ sanitize_fn = BPF_EMIT_CALL(bpf_asan_store32);
+ break;
+ case BPF_DW:
+ sanitize_fn = BPF_EMIT_CALL(bpf_asan_store64);
+ break;
+ }
+
+ /* Backup R0 and R1, store `dst + off` to R1, invoke the
+ * sanitize fn, and then restore each reg.
+ */
+ if (insn->dst_reg == BPF_REG_1) {
+ *patch++ = BPF_MOV64_REG(BPF_REG_AX, BPF_REG_0);
+ } else if (insn->dst_reg == BPF_REG_0) {
+ *patch++ = BPF_MOV64_REG(BPF_REG_AX, BPF_REG_1);
+ *patch++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
+ } else {
+ *patch++ = BPF_MOV64_REG(BPF_REG_AX, BPF_REG_1);
+ *patch++ = BPF_MOV64_REG(BPF_REG_1, insn->dst_reg);
+ *patch++ = BPF_MOV64_REG(insn->dst_reg, BPF_REG_0);
+ }
+ if (insn->off != 0)
+ *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, insn->off);
+ BACKUP_SCRATCH_REGS;
+ /* Call sanitize fn, R1~R5 are saved to stack during jit. */
+ *patch++ = sanitize_fn;
+ RESTORE_SCRATCH_REGS;
+ if (insn->off != 0)
+ *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -insn->off);
+ if (insn->dst_reg == BPF_REG_1) {
+ *patch++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_AX);
+ } else if (insn->dst_reg == BPF_REG_0) {
+ *patch++ = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
+ *patch++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_AX);
+ } else {
+ *patch++ = BPF_MOV64_REG(BPF_REG_0, insn->dst_reg);
+ *patch++ = BPF_MOV64_REG(insn->dst_reg, BPF_REG_1);
+ *patch++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_AX);
+ }
+ *patch++ = *insn;
+ cnt = patch - insn_buf;
+
+ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ continue;
+ }
+#endif
+
if (insn->code != (BPF_JMP | BPF_CALL))
continue;
if (insn->src_reg == BPF_PSEUDO_CALL)
@@ -15852,6 +15982,10 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
}
}
+#ifdef CONFIG_BPF_PROG_KASAN
+ prog->aux->stack_depth = MAX_BPF_STACK + 64;
+#endif
+
sort_kfunc_descs_by_imm(env->prog);
return 0;
Make the verifier sanitize STX/ST insns in jited BPF programs by dispatching addr to kernel functions that are instrumented by KASAN. Only STX/ST insns that aren't in patches added by other passes using REG_AX or dst_reg isn't R10 are sanitized. The former confilicts with us, the latter are trivial for the verifier to check, skip them to reduce the footprint. The instrumentation is conducted in bpf_misc_fixup(). During it, R0 and R1 are backed up or exchanged with dst_reg, and the addr to check is stored into R1. We extend stack size to backup all the scatch regs, because we don't rely on verifier's knowledge about the calculated stack size and liveness of each regs. And the corresponding bpf_asan_storeN() is inserted before store. The sanitize functions are instrumented with KASAN and they simply write to the target addr for certain bytes, KASAN conducts the actual checking. An extra Kconfig is used to enable this, so normal use case won't be impacted at all. Signed-off-by: Hao Sun <sunhao.th@gmail.com> --- kernel/bpf/Kconfig | 13 ++++ kernel/bpf/verifier.c | 134 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 147 insertions(+)