diff mbox series

[RFC,bpf-next,08/11] bpf: special rules for kernel function calls inside inlinable kfuncs

Message ID 20241107175040.1659341-9-eddyz87@gmail.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: inlinable kfuncs for BPF | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-10 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-11 pending Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-13 pending Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-14 pending Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-15 pending Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-16 pending Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 27 this patch: 27
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 6 maintainers not CCed: john.fastabend@gmail.com kpsingh@kernel.org song@kernel.org jolsa@kernel.org haoluo@google.com sdf@fomichev.me
netdev/build_clang fail Errors and warnings before: 6 this patch: 6
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 14 this patch: 14
netdev/checkpatch warning WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 97 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Eduard Zingerman Nov. 7, 2024, 5:50 p.m. UTC
Inlinable kfuncs can call arbitrary kernel functions,
there is no need to check if these functions conform to kfunc or
helper usage rules. Upon seeing such calls:
- mark registers R0-R5 as KERNEL_VALUEs after the call;
- if there are any PTR_TO_STACK parameters, mark all
  allocated stack slots as KERNEL_VALUEs.

The assumption is that KERNEL_VALUE marks should never escape form
kfunc instance body: at the call site R0 is set in accordance to kfunc
processing rules, PTR_TO_STACK parameters are never passed from bpf
program to inlinable kfunc.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
---
 kernel/bpf/verifier.c | 67 ++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 66 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 87b6cc8c94f8..5b109139f356 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -13209,6 +13209,67 @@  static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 	return 0;
 }
 
+static int mark_stack_as_kernel_values(struct bpf_verifier_env *env, struct bpf_func_state *func)
+{
+	struct bpf_stack_state *slot;
+	struct bpf_reg_state *spill;
+	int spi, i;
+
+	if (!inside_inlinable_kfunc(env, func->callsite)) {
+		verbose(env, "verifier bug: shouldn't mark frame#%d as kernel values\n",
+			func->frameno);
+		return -EFAULT;
+	}
+
+	for (spi = 0; spi < func->allocated_stack / BPF_REG_SIZE; spi++) {
+		slot = &func->stack[spi];
+		spill = &slot->spilled_ptr;
+		mark_reg_kernel_value(spill);
+		spill->live |= REG_LIVE_WRITTEN;
+		for (i = 0; i < BPF_REG_SIZE; i++)
+			slot->slot_type[i] = STACK_SPILL;
+		mark_stack_slot_scratched(env, spi);
+	}
+
+	return 0;
+}
+
+static int check_internal_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
+{
+	struct bpf_reg_state *reg, *regs = cur_regs(env);
+	struct bpf_kfunc_call_arg_meta meta;
+	int err, i, nargs;
+
+	err = fetch_kfunc_meta(env, insn, &meta, NULL);
+	if (err < 0)
+		return -EFAULT;
+
+	nargs = btf_type_vlen(meta.func_proto);
+	for (i = 0; i < nargs; i++) {
+		reg = &regs[BPF_REG_1 + i];
+		switch (reg->type) {
+		case SCALAR_VALUE:
+		case KERNEL_VALUE:
+			break;
+		case PTR_TO_STACK:
+			err = mark_stack_as_kernel_values(env, func(env, reg));
+			if (err)
+				return err;
+			break;
+		default:
+			verbose(env, "verifier bug: arg#%i unexpected register type %s\n",
+				i, reg_type_str(env, reg->type));
+			return -EFAULT;
+		}
+	}
+	for (i = 0; i < CALLER_SAVED_REGS; i++) {
+		mark_reg_not_init(env, regs, caller_saved[i]);
+		check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK);
+	}
+	mark_reg_kernel_value(&regs[BPF_REG_0]);
+	return 0;
+}
+
 static bool check_reg_sane_offset(struct bpf_verifier_env *env,
 				  const struct bpf_reg_state *reg,
 				  enum bpf_reg_type type)
@@ -18828,7 +18889,8 @@  static int do_check(struct bpf_verifier_env *env)
 					return -EINVAL;
 				}
 
-				if (env->cur_state->active_lock.ptr) {
+				if (env->cur_state->active_lock.ptr &&
+				    !inside_inlinable_kfunc(env, env->insn_idx)) {
 					if ((insn->src_reg == BPF_REG_0 && insn->imm != BPF_FUNC_spin_unlock) ||
 					    (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
 					     (insn->off != 0 || !is_bpf_graph_api_kfunc(insn->imm)))) {
@@ -18838,6 +18900,9 @@  static int do_check(struct bpf_verifier_env *env)
 				}
 				if (insn->src_reg == BPF_PSEUDO_CALL) {
 					err = check_func_call(env, insn, &env->insn_idx);
+				} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL &&
+					   inside_inlinable_kfunc(env, env->insn_idx)) {
+					err = check_internal_call(env, insn);
 				} else if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
 					err = check_kfunc_call(env, insn, &env->insn_idx);
 					if (!err && is_bpf_throw_kfunc(insn)) {