diff mbox series

[RFC,bpf-next,07/11] bpf: Enforce spinlock hold for bpf_rbtree_{add,remove,find}

Message ID 20220722183438.3319790-8-davemarchevsky@fb.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: Introduce rbtree map | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail merge-conflict
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/apply fail Patch does not apply to bpf-next

Commit Message

Dave Marchevsky July 22, 2022, 6:34 p.m. UTC
The bpf program calling these helpers must hold the spinlock associated
with the rbtree map when doing so. Otherwise, a concurrent add/remove
operation could corrupt the tree while {add,remove,find} are walking it
with callback or pivoting after update.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
---
 kernel/bpf/rbtree.c | 14 ++++++++++++++
 1 file changed, 14 insertions(+)
diff mbox series

Patch

diff --git a/kernel/bpf/rbtree.c b/kernel/bpf/rbtree.c
index bf2e30af82ec..5b1ab73e164f 100644
--- a/kernel/bpf/rbtree.c
+++ b/kernel/bpf/rbtree.c
@@ -14,6 +14,11 @@  struct bpf_rbtree {
 
 BTF_ID_LIST_SINGLE(bpf_rbtree_btf_ids, struct, rb_node);
 
+static bool __rbtree_lock_held(struct bpf_rbtree *tree)
+{
+	return spin_is_locked((spinlock_t *)tree->lock);
+}
+
 static int rbtree_map_alloc_check(union bpf_attr *attr)
 {
 	if (attr->max_entries || !attr->btf_value_type_id)
@@ -93,6 +98,9 @@  BPF_CALL_3(bpf_rbtree_add, struct bpf_map *, map, void *, value, void *, cb)
 	struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
 	struct rb_node *node = (struct rb_node *)value;
 
+	if (!__rbtree_lock_held(tree))
+		return (u64)NULL;
+
 	if (WARN_ON_ONCE(!RB_EMPTY_NODE(node)))
 		return (u64)NULL;
 
@@ -114,6 +122,9 @@  BPF_CALL_3(bpf_rbtree_find, struct bpf_map *, map, void *, key, void *, cb)
 {
 	struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
 
+	if (!__rbtree_lock_held(tree))
+		return (u64)NULL;
+
 	return (u64)rb_find(key, &tree->root.rb_root,
 			    (int (*)(const void *key,
 				     const struct rb_node *))cb);
@@ -206,6 +217,9 @@  BPF_CALL_2(bpf_rbtree_remove, struct bpf_map *, map, void *, value)
 	struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
 	struct rb_node *node = (struct rb_node *)value;
 
+	if (!__rbtree_lock_held(tree))
+		return (u64)NULL;
+
 	if (WARN_ON_ONCE(RB_EMPTY_NODE(node)))
 		return (u64)NULL;