diff mbox series

[RFCv2,bpf-next,11/18] bpf: Check rbtree lock held during verification

Message ID 20220830172759.4069786-12-davemarchevsky@fb.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: Introduce rbtree map | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail merge-conflict
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/apply fail Patch does not apply to bpf-next

Commit Message

Dave Marchevsky Aug. 30, 2022, 5:27 p.m. UTC
This patch builds on the previous few locking patches, teaching the
verifier to check whether rbtree's lock is held.

[ RFC Notes:

  * After this patch, could change helpers which only can fail due to
    dynamic lock check to always succeed, likely allowing us to get rid
    of CONDITIONAL_RELEASE logic. But since dynamic lock checking is
    almost certainly going to be necessary regardless, didn't do so.
]

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
---
 include/linux/bpf.h   |  2 ++
 kernel/bpf/rbtree.c   |  8 ++++++++
 kernel/bpf/verifier.c | 43 +++++++++++++++++++++++++++++++++++++++----
 3 files changed, 49 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d6458aa7b79c..b762c6b3dcfb 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -155,6 +155,8 @@  struct bpf_map_ops {
 				     bpf_callback_t callback_fn,
 				     void *callback_ctx, u64 flags);
 
+	bool (*map_lock_held)(struct bpf_map *map, void *current_lock);
+
 	/* BTF id of struct allocated by map_alloc */
 	int *map_btf_id;
 
diff --git a/kernel/bpf/rbtree.c b/kernel/bpf/rbtree.c
index 0821e841a518..b5d158254de6 100644
--- a/kernel/bpf/rbtree.c
+++ b/kernel/bpf/rbtree.c
@@ -245,6 +245,13 @@  static int rbtree_map_delete_elem(struct bpf_map *map, void *value)
 	return -ENOTSUPP;
 }
 
+static bool rbtree_map_lock_held(struct bpf_map *map, void *current_lock)
+{
+	struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
+
+	return tree->lock == current_lock;
+}
+
 BPF_CALL_2(bpf_rbtree_remove, struct bpf_map *, map, void *, value)
 {
 	struct bpf_rbtree *tree = container_of(map, struct bpf_rbtree, map);
@@ -353,4 +360,5 @@  const struct bpf_map_ops rbtree_map_ops = {
 	.map_delete_elem = rbtree_map_delete_elem,
 	.map_check_btf = rbtree_map_check_btf,
 	.map_btf_id = &bpf_rbtree_map_btf_ids[0],
+	.map_lock_held = rbtree_map_lock_held,
 };
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f8ba381f1327..3c9af1047d80 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -508,16 +508,25 @@  static bool is_ptr_cast_function(enum bpf_func_id func_id)
 		func_id == BPF_FUNC_skc_to_tcp_request_sock;
 }
 
+/* These functions can only be called when spinlock associated with rbtree
+ * is held. They all take struct bpf_map ptr to rbtree as their first argument,
+ * so we can verify that the correct lock is held before loading program
+ */
+static bool is_rbtree_lock_check_function(enum bpf_func_id func_id)
+{
+	return func_id == BPF_FUNC_rbtree_add ||
+		func_id == BPF_FUNC_rbtree_remove ||
+		func_id == BPF_FUNC_rbtree_find;
+}
+
 /* These functions can only be called when spinlock associated with rbtree
  * is held. If they have a callback argument, that callback is not required
  * to release active_spin_lock before exiting
  */
 static bool is_rbtree_lock_required_function(enum bpf_func_id func_id)
 {
-	return func_id == BPF_FUNC_rbtree_add ||
-		func_id == BPF_FUNC_rbtree_remove ||
-		func_id == BPF_FUNC_rbtree_find ||
-		func_id == BPF_FUNC_rbtree_unlock;
+	return func_id == BPF_FUNC_rbtree_unlock ||
+		is_rbtree_lock_check_function(func_id);
 }
 
 /* These functions are OK to call when spinlock associated with rbtree
@@ -7354,6 +7363,26 @@  static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno
 				 state->callback_subprogno == subprogno);
 }
 
+static int check_rbtree_lock_held(struct bpf_verifier_env *env,
+				  struct bpf_map *map)
+{
+	struct bpf_verifier_state *cur = env->cur_state;
+
+	if (!map)
+		return -1;
+
+	if (!cur->active_spin_lock || !cur->maybe_active_spin_lock_addr ||
+	    !map || !map->ops->map_lock_held)
+		return -1;
+
+	if (!map->ops->map_lock_held(map, cur->maybe_active_spin_lock_addr)) {
+		verbose(env, "active spin lock doesn't match rbtree's lock\n");
+		return -1;
+	}
+
+	return 0;
+}
+
 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 			     int *insn_idx_p)
 {
@@ -7560,6 +7589,12 @@  static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
 	if (err)
 		return err;
 
+	if (is_rbtree_lock_check_function(func_id) &&
+	    check_rbtree_lock_held(env, meta.map_ptr)) {
+		verbose(env, "lock associated with rbtree is not held\n");
+		return -EINVAL;
+	}
+
 	/* reset caller saved regs */
 	for (i = 0; i < CALLER_SAVED_REGS; i++) {
 		mark_reg_not_init(env, regs, caller_saved[i]);