diff mbox series

[RFCv2,bpf-next,17/18] selftests/bpf: Lock tracking test changes

Message ID 20220830172759.4069786-18-davemarchevsky@fb.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: Introduce rbtree map | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail merge-conflict
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/apply fail Patch does not apply to bpf-next

Commit Message

Dave Marchevsky Aug. 30, 2022, 5:27 p.m. UTC
This patch contains test changes corresponding to the functional changes
in "bpf: Verifier tracking of rbtree_spin_lock held". It will be
squashed with other test patches, leaving in this state for RFCv2
feedback.

iter section of rbtree_map.c prog is commented out because iter helpers
will be tossed anyways.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
---
 .../selftests/bpf/prog_tests/rbtree_map.c     |  2 +-
 .../testing/selftests/bpf/progs/rbtree_map.c  | 16 ++++++++-------
 .../selftests/bpf/progs/rbtree_map_fail.c     | 20 +++++++++----------
 3 files changed, 20 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree_map.c b/tools/testing/selftests/bpf/prog_tests/rbtree_map.c
index 17cadcd05ee4..7634a2d93f0b 100644
--- a/tools/testing/selftests/bpf/prog_tests/rbtree_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/rbtree_map.c
@@ -17,7 +17,7 @@  static struct {
 	{"rb_node__field_store", "only read is supported"},
 	{"rb_node__alloc_no_add", "Unreleased reference id=2 alloc_insn=3"},
 	{"rb_node__two_alloc_one_add", "Unreleased reference id=2 alloc_insn=3"},
-	{"rb_node__remove_no_free", "Unreleased reference id=5 alloc_insn=28"},
+	{"rb_node__remove_no_free", "Unreleased reference id=6 alloc_insn=26"},
 	{"rb_tree__add_wrong_type", "rbtree: R2 is of type task_struct but node_data is expected"},
 	{"rb_tree__conditional_release_helper_usage",
 		"R2 type=ptr_cond_rel_ expected=ptr_"},
diff --git a/tools/testing/selftests/bpf/progs/rbtree_map.c b/tools/testing/selftests/bpf/progs/rbtree_map.c
index 50f29b9a5b82..957672cce82a 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_map.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_map.c
@@ -65,6 +65,7 @@  int check_rbtree(void *ctx)
 	struct node_data *node, *found, *ret;
 	struct node_data popped;
 	struct node_data search;
+	struct bpf_spin_lock *lock;
 	__u32 search2;
 
 	node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
@@ -73,7 +74,8 @@  int check_rbtree(void *ctx)
 
 	node->one = calls;
 	node->two = 6;
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	lock = &rbtree_lock;
+	bpf_rbtree_lock(lock);
 
 	ret = (struct node_data *)bpf_rbtree_add(&rbtree, node, less);
 	if (!ret) {
@@ -81,28 +83,28 @@  int check_rbtree(void *ctx)
 		goto unlock_ret;
 	}
 
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(lock);
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(lock);
 
 	search.one = calls;
 	found = (struct node_data *)bpf_rbtree_find(&rbtree, &search, cmp);
 	if (!found)
 		goto unlock_ret;
 
-	int node_ct = 0;
+	/*int node_ct = 0;
 	struct node_data *iter = (struct node_data *)bpf_rbtree_first(&rbtree);
 
 	while (iter) {
 		node_ct++;
 		iter = (struct node_data *)bpf_rbtree_next(&rbtree, iter);
-	}
+	}*/
 
 	ret = (struct node_data *)bpf_rbtree_remove(&rbtree, found);
 	if (!ret)
 		goto unlock_ret;
 
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(lock);
 
 	bpf_rbtree_free_node(&rbtree, ret);
 
@@ -110,7 +112,7 @@  int check_rbtree(void *ctx)
 	return 0;
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
diff --git a/tools/testing/selftests/bpf/progs/rbtree_map_fail.c b/tools/testing/selftests/bpf/progs/rbtree_map_fail.c
index ab4002a8211c..779b85294f37 100644
--- a/tools/testing/selftests/bpf/progs/rbtree_map_fail.c
+++ b/tools/testing/selftests/bpf/progs/rbtree_map_fail.c
@@ -61,7 +61,7 @@  int alloc_node__size_too_small(void *ctx)
 		return 0;
 	}
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 	/* will never execute, alloc_node should fail */
 	node->one = 1;
 	ret = bpf_rbtree_add(&rbtree, node, less);
@@ -71,7 +71,7 @@  int alloc_node__size_too_small(void *ctx)
 	}
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
@@ -148,7 +148,7 @@  int rb_node__two_alloc_one_add(void *ctx)
 		return 0;
 	node->one = 42;
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 
 	ret = bpf_rbtree_add(&rbtree, node, less);
 	if (!ret) {
@@ -157,7 +157,7 @@  int rb_node__two_alloc_one_add(void *ctx)
 	}
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
@@ -171,7 +171,7 @@  int rb_node__remove_no_free(void *ctx)
 		return 0;
 	node->one = 42;
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 
 	ret = bpf_rbtree_add(&rbtree, node, less);
 	if (!ret) {
@@ -188,7 +188,7 @@  int rb_node__remove_no_free(void *ctx)
 	 */
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
@@ -202,14 +202,14 @@  int rb_tree__add_wrong_type(void *ctx)
 
 	task = bpf_get_current_task_btf();
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 
 	ret = bpf_rbtree_add(&rbtree, task, less);
 	/* Verifier should fail at bpf_rbtree_add, so don't bother handling
 	 * failure.
 	 */
 
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }
 
@@ -223,7 +223,7 @@  int rb_tree__conditional_release_helper_usage(void *ctx)
 		return 0;
 	node->one = 42;
 
-	bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_lock(&rbtree_lock);
 
 	ret = bpf_rbtree_add(&rbtree, node, less);
 	/* Verifier should fail when trying to use CONDITIONAL_RELEASE
@@ -236,7 +236,7 @@  int rb_tree__conditional_release_helper_usage(void *ctx)
 	}
 
 unlock_ret:
-	bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+	bpf_rbtree_unlock(&rbtree_lock);
 	return 0;
 }