diff mbox series

[RFC,v8,07/20] bpf: Allow adding kernel objects to collections

Message ID 20240510192412.3297104-8-amery.hung@bytedance.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf qdisc | expand

Checks

Context Check Description
netdev/tree_selection success Guessing tree name failed - patch did not apply, async

Commit Message

Amery Hung May 10, 2024, 7:23 p.m. UTC
To allow adding/removing kernel objects to/from collections, we teach the
verifier that a graph node can be in a trusted kptr in addition to local
objects. Besides, a kernel graph value removed from a collection should
still be a trusted kptr.

Signed-off-by: Amery Hung <amery.hung@bytedance.com>
---
 include/linux/bpf_verifier.h |  8 +++++++-
 kernel/bpf/verifier.c        | 18 ++++++++++++------
 2 files changed, 19 insertions(+), 7 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7cb1b75eee38..edb306ef4c61 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -864,9 +864,15 @@  static inline bool type_is_ptr_alloc_obj(u32 type)
 	return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & MEM_ALLOC;
 }
 
+static inline bool type_is_ptr_trusted(u32 type)
+{
+	return base_type(type) == PTR_TO_BTF_ID && type_flag(type) & PTR_TRUSTED;
+}
+
 static inline bool type_is_non_owning_ref(u32 type)
 {
-	return type_is_ptr_alloc_obj(type) && type_flag(type) & NON_OWN_REF;
+	return (type_is_ptr_alloc_obj(type) || type_is_ptr_trusted(type)) &&
+	       type_flag(type) & NON_OWN_REF;
 }
 
 static inline bool type_is_pkt_pointer(enum bpf_reg_type type)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2d4a55ead85b..f01d2b876a2e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -413,7 +413,8 @@  static struct btf_record *reg_btf_record(const struct bpf_reg_state *reg)
 
 	if (reg->type == PTR_TO_MAP_VALUE) {
 		rec = reg->map_ptr->record;
-	} else if (type_is_ptr_alloc_obj(reg->type)) {
+	} else if (type_is_ptr_alloc_obj(reg->type) || type_is_ptr_trusted(reg->type) ||
+		   reg->type == PTR_TO_BTF_ID) {
 		meta = btf_find_struct_meta(reg->btf, reg->btf_id);
 		if (meta)
 			rec = meta->record;
@@ -1860,7 +1861,8 @@  static void mark_reg_graph_node(struct bpf_reg_state *regs, u32 regno,
 				struct btf_field_graph_root *ds_head)
 {
 	__mark_reg_known_zero(&regs[regno]);
-	regs[regno].type = PTR_TO_BTF_ID | MEM_ALLOC;
+	regs[regno].type = btf_is_kernel(ds_head->btf) ? PTR_TO_BTF_ID | PTR_TRUSTED :
+							 PTR_TO_BTF_ID | MEM_ALLOC;
 	regs[regno].btf = ds_head->btf;
 	regs[regno].btf_id = ds_head->value_btf_id;
 	regs[regno].off = ds_head->node_offset;
@@ -11931,8 +11933,10 @@  static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
 				return ret;
 			break;
 		case KF_ARG_PTR_TO_LIST_NODE:
-			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
-				verbose(env, "arg#%d expected pointer to allocated object\n", i);
+			if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC) &&
+			    reg->type != (PTR_TO_BTF_ID | PTR_TRUSTED) &&
+			    reg->type != PTR_TO_BTF_ID) {
+				verbose(env, "arg#%d expected pointer to allocated object or trusted pointer\n", i);
 				return -EINVAL;
 			}
 			if (!reg->ref_obj_id) {
@@ -11954,8 +11958,10 @@  static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
 					return -EINVAL;
 				}
 			} else {
-				if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC)) {
-					verbose(env, "arg#%d expected pointer to allocated object\n", i);
+				if (reg->type != (PTR_TO_BTF_ID | MEM_ALLOC) &&
+				    reg->type != (PTR_TO_BTF_ID | PTR_TRUSTED) &&
+				    reg->type != PTR_TO_BTF_ID) {
+					verbose(env, "arg#%d expected pointer to allocated object or trusted pointer\n", i);
 					return -EINVAL;
 				}
 				if (!reg->ref_obj_id) {