diff mbox series

[bpf-next,v1,07/15] bpf: Prevent escaping of pointers loaded from maps

Message ID 20220220134813.3411982-8-memxor@gmail.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series Introduce typed pointer support in BPF maps | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1450 this patch: 1450
netdev/cc_maintainers warning 5 maintainers not CCed: kpsingh@kernel.org john.fastabend@gmail.com kafai@fb.com songliubraving@fb.com yhs@fb.com
netdev/build_clang success Errors and warnings before: 196 this patch: 196
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1467 this patch: 1467
netdev/checkpatch warning WARNING: line length of 103 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next success VM_Test

Commit Message

Kumar Kartikeya Dwivedi Feb. 20, 2022, 1:48 p.m. UTC
While we can guarantee that even for unreferenced pointer, the object
pointer points to being freed etc. can be handled by the verifier's
exception handling (normal load patching to PROBE_MEM loads), we still
cannot allow the user to pass these pointers to BPF helpers and kfunc,
because the same exception handling won't be done for accesses inside
the kernel.

Hence introduce a new type flag, PTR_UNTRUSTED, which is used to mark
all registers loading unreferenced PTR_TO_BTF_ID from BPF maps, and
ensure they can never escape the BPF program and into the kernel by way
of calling stable/unstable helpers.

Adjust the check in check_mem_access so that we allow calling
check_ptr_to_btf_access only when no or PTR_UNTRUSTED type flag is set.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 include/linux/bpf.h   |  7 +++++++
 kernel/bpf/verifier.c | 29 ++++++++++++++++++++++++++---
 2 files changed, 33 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 37ca92f4c7b7..ae599aaf8d4c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -364,6 +364,13 @@  enum bpf_type_flag {
 	/* MEM is in user address space. */
 	MEM_USER		= BIT(3 + BPF_BASE_TYPE_BITS),
 
+	/* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark
+	 * unrefcounted pointers loaded from map value, so that they can only
+	 * be dereferenced but not escape the BPF program into the kernel (i.e.
+	 * cannot be passed as arguments to kfunc or bpf helpers).
+	 */
+	PTR_UNTRUSTED		= BIT(4 + BPF_BASE_TYPE_BITS),
+
 	__BPF_TYPE_LAST_FLAG	= MEM_USER,
 };
 
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 28da858bb921..0a2cd21d9ec1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -582,6 +582,8 @@  static const char *reg_type_str(struct bpf_verifier_env *env,
 		strncpy(prefix, "alloc_", 32);
 	if (type & MEM_USER)
 		strncpy(prefix, "user_", 32);
+	if (type & PTR_UNTRUSTED)
+		strncpy(prefix, "untrusted_", 32);
 
 	snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
 		 prefix, str[base_type(type)], postfix);
@@ -3490,10 +3492,17 @@  static int map_ptr_to_btf_id_match_type(struct bpf_verifier_env *env,
 		if (reg->type != PTR_TO_PERCPU_BTF_ID &&
 		    reg->type != (PTR_TO_PERCPU_BTF_ID | PTR_MAYBE_NULL))
 			goto end;
-	} else { /* referenced and unreferenced case */
+	} else if (off_desc->flags & BPF_MAP_VALUE_OFF_F_REF) {
+		/* register state (ref_obj_id) must be checked by caller */
 		if (reg->type != PTR_TO_BTF_ID &&
 		    reg->type != (PTR_TO_BTF_ID | PTR_MAYBE_NULL))
 			goto end;
+	} else { /* only unreferenced case accepts untrusted pointers */
+		if (reg->type != PTR_TO_BTF_ID &&
+		    reg->type != (PTR_TO_BTF_ID | PTR_MAYBE_NULL) &&
+		    reg->type != (PTR_TO_BTF_ID | PTR_UNTRUSTED) &&
+		    reg->type != (PTR_TO_BTF_ID | PTR_MAYBE_NULL | PTR_UNTRUSTED))
+			goto end;
 	}
 
 	if (!btf_is_kernel(reg->btf)) {
@@ -3597,10 +3606,13 @@  static int check_map_ptr_to_btf_id(struct bpf_verifier_env *env, u32 regno, int
 	ref_ptr = off_desc->flags & BPF_MAP_VALUE_OFF_F_REF;
 	percpu_ptr = off_desc->flags & BPF_MAP_VALUE_OFF_F_PERCPU;
 	user_ptr = off_desc->flags & BPF_MAP_VALUE_OFF_F_USER;
+
 	if (percpu_ptr)
 		reg_type = PTR_TO_PERCPU_BTF_ID;
 	else if (user_ptr)
 		reg_flags |= MEM_USER;
+	else
+		reg_flags |= PTR_UNTRUSTED;
 
 	if (is_xchg_insn(insn)) {
 		/* We do checks and updates during register fill call for fetch case */
@@ -3629,6 +3641,10 @@  static int check_map_ptr_to_btf_id(struct bpf_verifier_env *env, u32 regno, int
 			if (ret < 0)
 				return ret;
 			ref_obj_id = ret;
+			/* Unset PTR_UNTRUSTED, so that it can be passed to bpf
+			 * helpers or kfunc.
+			 */
+			reg_flags &= ~PTR_UNTRUSTED;
 		}
 		/* val_reg might be NULL at this point */
 		mark_btf_ld_reg(env, cur_regs(env), value_regno, reg_type, off_desc->btf,
@@ -4454,6 +4470,12 @@  static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
 	if (ret < 0)
 		return ret;
 
+	/* If this is an untrusted pointer, all btf_id pointers formed by
+	 * walking it also inherit the untrusted flag.
+	 */
+	if (type_flag(reg->type) & PTR_UNTRUSTED)
+		flag |= PTR_UNTRUSTED;
+
 	if (atype == BPF_READ && value_regno >= 0)
 		mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
 
@@ -4804,7 +4826,7 @@  static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
 		err = check_tp_buffer_access(env, reg, regno, off, size);
 		if (!err && t == BPF_READ && value_regno >= 0)
 			mark_reg_unknown(env, regs, value_regno);
-	} else if (reg->type == PTR_TO_BTF_ID) {
+	} else if (base_type(reg->type) == PTR_TO_BTF_ID && !(type_flag(reg->type) & ~PTR_UNTRUSTED)) {
 		err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
 					      value_regno);
 	} else if (reg->type == CONST_PTR_TO_MAP) {
@@ -13041,7 +13063,7 @@  static int convert_ctx_accesses(struct bpf_verifier_env *env)
 		if (!ctx_access)
 			continue;
 
-		switch (env->insn_aux_data[i + delta].ptr_type) {
+		switch ((int)env->insn_aux_data[i + delta].ptr_type) {
 		case PTR_TO_CTX:
 			if (!ops->convert_ctx_access)
 				continue;
@@ -13058,6 +13080,7 @@  static int convert_ctx_accesses(struct bpf_verifier_env *env)
 			convert_ctx_access = bpf_xdp_sock_convert_ctx_access;
 			break;
 		case PTR_TO_BTF_ID:
+		case PTR_TO_BTF_ID | PTR_UNTRUSTED:
 			if (type == BPF_READ) {
 				insn->code = BPF_LDX | BPF_PROBE_MEM |
 					BPF_SIZE((insn)->code);