diff mbox series

[2/2] Adding BPF CFI

Message ID SEZPR03MB67866BC3232BC67023B7E24CB4602@SEZPR03MB6786.apcprd03.prod.outlook.com (mailing list archive)
State Superseded
Headers show
Series [1/2] Adding BPF NX | expand

Checks

Context Check Description
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Guessed tree name to be net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 6 this patch: 6
netdev/cc_maintainers fail 11 maintainers not CCed: sdf@google.com andrii@kernel.org haoluo@google.com martin.lau@linux.dev kpsingh@kernel.org daniel@iogearbox.net jolsa@kernel.org yonghong.song@linux.dev ast@kernel.org song@kernel.org john.fastabend@gmail.com
netdev/build_clang fail Errors and warnings before: 6 this patch: 6
netdev/verify_signedoff fail author Signed-off-by missing
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 6 this patch: 6
netdev/checkpatch fail CHECK: Please use a blank line after function/struct/union/enum declarations ERROR: spaces required around that ':' (ctx:VxW) WARNING: Do not crash the kernel unless it is absolutely unavoidable--use WARN_ON_ONCE() plus recovery code (if feasible) instead of BUG() or variants WARNING: Missing a blank line after declarations WARNING: please write a help paragraph that fully describes the config symbol
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Maxwell Bland Jan. 3, 2024, 6:56 p.m. UTC
From: Tenut <tenut@Niobium>
Subject: [PATCH 2/2] Adding BPF CFI

Check offset of BPF instructions in the interpreter to make sure the BPF program is
executed from the correct starting point

Signed-off-by: Maxwell Bland <mbland@motorola.com>
---
kernel/bpf/Kconfig | 10 +++++++
 kernel/bpf/core.c  | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 89 insertions(+)
diff mbox series

Patch

diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig
index 7160dcaaa58a..9c64db0ddd63 100644
--- a/kernel/bpf/Kconfig
+++ b/kernel/bpf/Kconfig
@@ -94,6 +94,7 @@  config BPF_HARDENING
 	help
 	  Enhance bpf interpreter's security
 
+if BPF_HARDENING
 config BPF_NX
 bool "Enable bpf NX"
 	depends on BPF_HARDENING && !DYNAMIC_MEMORY_LAYOUT
@@ -102,6 +103,15 @@  bool "Enable bpf NX"
 	  Allocate eBPF programs in seperate area and make sure the
 	  interpreted programs are in the region.
 
+config BPF_CFI
+	bool "Enable bpf CFI"
+	depends on BPF_NX
+	default n
+	help
+	  Enable alignment checks for eBPF program starting points
+
+endif
+
 source "kernel/bpf/preload/Kconfig"
 
 config BPF_LSM
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 56d9e8d4a6de..dee0d2713c3b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -116,6 +116,75 @@  static void bpf_insn_check_range(const struct bpf_insn *insn)
 }
 #endif /* CONFIG_BPF_NX */
 
+#ifdef CONFIG_BPF_CFI
+#define BPF_ON  1
+#define BPF_OFF 0
+
+struct bpf_mode_flag {
+	u8 byte_array[PAGE_SIZE];
+};
+DEFINE_PER_CPU_PAGE_ALIGNED(struct bpf_mode_flag, bpf_exec_mode);
+
+static void __init lock_bpf_exec_mode(void)
+{
+	struct bpf_mode_flag *flag_page;
+	int cpu;
+	for_each_possible_cpu(cpu) {
+		flag_page = per_cpu_ptr(&bpf_exec_mode, cpu);
+		set_memory_ro((unsigned long)flag_page, 1);
+	};
+}
+subsys_initcall(lock_bpf_exec_mode);
+
+static void write_cr0_nocheck(unsigned long val)
+{
+	asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
+}
+
+/*
+ * Notice that get_cpu_var also disables preemption so no
+ * extra care needed for that.
+ */
+static void enter_bpf_exec_mode(unsigned long *flagsp)
+{
+	struct bpf_mode_flag *flag_page;
+	flag_page = &get_cpu_var(bpf_exec_mode);
+	local_irq_save(*flagsp);
+	write_cr0_nocheck(read_cr0() & ~X86_CR0_WP);
+	flag_page->byte_array[0] = BPF_ON;
+	write_cr0_nocheck(read_cr0() | X86_CR0_WP);
+}
+
+static void leave_bpf_exec_mode(unsigned long *flagsp)
+{
+	struct bpf_mode_flag *flag_page;
+	flag_page = this_cpu_ptr(&bpf_exec_mode);
+	write_cr0_nocheck(read_cr0() & ~X86_CR0_WP);
+	flag_page->byte_array[0] = BPF_OFF;
+	write_cr0_nocheck(read_cr0() | X86_CR0_WP);
+	local_irq_restore(*flagsp);
+	put_cpu_var(bpf_exec_mode);
+}
+
+static void check_bpf_exec_mode(void)
+{
+	struct bpf_mode_flag *flag_page;
+	flag_page = this_cpu_ptr(&bpf_exec_mode);
+	BUG_ON(flag_page->byte_array[0] != BPF_ON);
+}
+
+static void bpf_check_cfi(const struct bpf_insn *insn)
+{
+	const struct bpf_prog *fp;
+	fp = container_of(insn, struct bpf_prog, insnsi[0]);
+	if (!IS_ALIGNED((unsigned long)fp, BPF_MEMORY_ALIGN))
+		BUG();
+}
+
+#else /* CONFIG_BPF_CFI */
+static void check_bpf_exec_mode(void) {}
+#endif /* CONFIG_BPF_CFI */
+
 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
 {
 	gfp_t gfp_flags = bpf_memcg_flags(GFP_KERNEL | __GFP_ZERO | gfp_extra_flags);
@@ -1719,11 +1788,18 @@  static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 #undef BPF_INSN_2_LBL
 	u32 tail_call_cnt = 0;
 
+#ifdef CONFIG_BPF_CFI
+	unsigned long flags;
+	enter_bpf_exec_mode(&flags);
+	bpf_check_cfi(insn);
+#endif
+
 #define CONT	 ({ insn++; goto select_insn; })
 #define CONT_JMP ({ insn++; goto select_insn; })
 
 select_insn:
 	bpf_insn_check_range(insn);
+	check_bpf_exec_mode();
 	goto *jumptable[insn->code];
 
 	/* Explicitly mask the register-based shift amounts with 63 or 31
@@ -2034,6 +2110,9 @@  static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn)
 		insn += insn->imm;
 		CONT;
 	JMP_EXIT:
+#ifdef CONFIG_BPF_CFI
+		leave_bpf_exec_mode(&flags);
+#endif
 		return BPF_R0;
 	/* JMP */
 #define COND_JMP(SIGN, OPCODE, CMP_OP)				\