diff mbox series

[4/7] kcov: introduce new kcov KCOV_TRACE_UNIQ_CMP mode

Message ID 20250114-kcov-v1-4-004294b931a2@quicinc.com (mailing list archive)
State New
Headers show
Series kcov: Introduce New Unique PC|EDGE|CMP Modes | expand

Commit Message

Joey Jiao Jan. 14, 2025, 5:34 a.m. UTC
Similar to KCOV_TRACE_CMP mode, KCOV_TRACE_UNIQ_CMP stores unique CMP data
into area.

Signed-off-by: Jiao, Joey <quic_jiangenj@quicinc.com>
---
 include/linux/kcov.h      |   2 +
 include/uapi/linux/kcov.h |   2 +
 kernel/kcov.c             | 112 ++++++++++++++++++++++++++++++++--------------
 3 files changed, 83 insertions(+), 33 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index 56b858205ba16c47fc72bda9938c98f034503c8c..a78d78164bf75368c71a958a5438fc3ee68c95ca 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -27,6 +27,8 @@  enum kcov_mode {
 	KCOV_MODE_TRACE_UNIQ_PC = 16,
 	/* Collecting uniq edge mode. */
 	KCOV_MODE_TRACE_UNIQ_EDGE = 32,
+	/* Collecting uniq cmp mode. */
+	KCOV_MODE_TRACE_UNIQ_CMP = 64,
 };
 
 #define KCOV_IN_CTXSW	(1 << 30)
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
index 9b2019f0ab8b8cb5426d2d6b74472fa1a7293817..08abfca273c9624dc54a2c70b12a4a9302700f26 100644
--- a/include/uapi/linux/kcov.h
+++ b/include/uapi/linux/kcov.h
@@ -39,6 +39,8 @@  enum {
 	KCOV_TRACE_UNIQ_PC = 2,
 	/* Collecting uniq edge mode. */
 	KCOV_TRACE_UNIQ_EDGE = 4,
+	/* Collecting uniq CMP mode. */
+	KCOV_TRACE_UNIQ_CMP = 8,
 };
 
 /*
diff --git a/kernel/kcov.c b/kernel/kcov.c
index c04bbec9ac3186a5145240de8ac609ad8a7ca733..af73c40114d23adedab8318e8657d24bf36ae865 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -36,6 +36,11 @@ 
 
 struct kcov_entry {
 	unsigned long		ent;
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+	unsigned long		type;
+	unsigned long		arg1;
+	unsigned long		arg2;
+#endif
 
 	struct hlist_node	node;
 };
@@ -44,7 +49,7 @@  struct kcov_entry {
 #define MIN_POOL_ALLOC_ORDER ilog2(roundup_pow_of_two(sizeof(struct kcov_entry)))
 
 /*
- * kcov hashmap to store uniq pc, prealloced mem for kcov_entry
+ * kcov hashmap to store uniq pc|edge|cmp, prealloced mem for kcov_entry
  * and area shared between kernel and userspace.
  */
 struct kcov_map {
@@ -87,7 +92,7 @@  struct kcov {
 	unsigned long		prev_pc;
 	/* Coverage buffer shared with user space. */
 	void			*area;
-	/* Coverage hashmap for unique pc. */
+	/* Coverage hashmap for unique pc|cmp. */
 	struct kcov_map		*map;
 	/* Edge hashmap for unique edge. */
 	struct kcov_map		*map_edge;
@@ -289,14 +294,23 @@  static notrace inline void kcov_map_add(struct kcov_map *map, struct kcov_entry
 	struct kcov *kcov;
 	struct kcov_entry *entry;
 	unsigned int key = hash_key(ent);
-	unsigned long pos, *area;
+	unsigned long pos, start_index, end_pos, max_pos, *area;
 
 	kcov = t->kcov;
 
-	hash_for_each_possible_rcu(map->buckets, entry, node, key) {
-		if (entry->ent == ent->ent)
-			return;
-	}
+	if ((mode == KCOV_MODE_TRACE_UNIQ_PC ||
+	     mode == KCOV_MODE_TRACE_UNIQ_EDGE))
+		hash_for_each_possible_rcu(map->buckets, entry, node, key) {
+			if (entry->ent == ent->ent)
+				return;
+		}
+	else
+		hash_for_each_possible_rcu(map->buckets, entry, node, key) {
+			if (entry->ent == ent->ent && entry->type == ent->type &&
+			    entry->arg1 == ent->arg1 && entry->arg2 == ent->arg2) {
+				return;
+			}
+		}
 
 	entry = (struct kcov_entry *)gen_pool_alloc(map->pool, 1 << MIN_POOL_ALLOC_ORDER);
 	if (unlikely(!entry))
@@ -306,16 +320,31 @@  static notrace inline void kcov_map_add(struct kcov_map *map, struct kcov_entry
 	memcpy(entry, ent, sizeof(*entry));
 	hash_add_rcu(map->buckets, &entry->node, key);
 
-	if (mode == KCOV_MODE_TRACE_UNIQ_PC)
+	if (mode == KCOV_MODE_TRACE_UNIQ_PC || mode == KCOV_MODE_TRACE_UNIQ_CMP)
 		area = t->kcov_area;
 	else
 		area = kcov->map_edge->area;
 
 	pos = READ_ONCE(area[0]) + 1;
-	if (likely(pos < t->kcov_size)) {
-		WRITE_ONCE(area[0], pos);
-		barrier();
-		area[pos] = ent->ent;
+	if (mode == KCOV_MODE_TRACE_UNIQ_PC || mode == KCOV_MODE_TRACE_UNIQ_EDGE) {
+		if (likely(pos < t->kcov_size)) {
+			WRITE_ONCE(area[0], pos);
+			barrier();
+			area[pos] = ent->ent;
+		}
+	} else {
+		start_index = 1 + (pos - 1) * KCOV_WORDS_PER_CMP;
+		max_pos = t->kcov_size * sizeof(unsigned long);
+		end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
+		if (likely(end_pos <= max_pos)) {
+			/* See comment in __sanitizer_cov_trace_pc(). */
+			WRITE_ONCE(area[0], pos);
+			barrier();
+			area[start_index] = ent->type;
+			area[start_index + 1] = ent->arg1;
+			area[start_index + 2] = ent->arg2;
+			area[start_index + 3] = ent->ent;
+		}
 	}
 }
 
@@ -384,33 +413,44 @@  static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
 	struct task_struct *t;
 	u64 *area;
 	u64 count, start_index, end_pos, max_pos;
+	struct kcov_entry entry = {0};
+	unsigned int mode;
 
 	t = current;
-	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
+	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP | KCOV_MODE_TRACE_UNIQ_CMP, t))
 		return;
 
+	mode = t->kcov_mode;
 	ip = canonicalize_ip(ip);
 
-	/*
-	 * We write all comparison arguments and types as u64.
-	 * The buffer was allocated for t->kcov_size unsigned longs.
-	 */
-	area = (u64 *)t->kcov_area;
-	max_pos = t->kcov_size * sizeof(unsigned long);
-
-	count = READ_ONCE(area[0]);
-
-	/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
-	start_index = 1 + count * KCOV_WORDS_PER_CMP;
-	end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
-	if (likely(end_pos <= max_pos)) {
-		/* See comment in __sanitizer_cov_trace_pc(). */
-		WRITE_ONCE(area[0], count + 1);
-		barrier();
-		area[start_index] = type;
-		area[start_index + 1] = arg1;
-		area[start_index + 2] = arg2;
-		area[start_index + 3] = ip;
+	if (mode == KCOV_MODE_TRACE_CMP) {
+		/*
+		 * We write all comparison arguments and types as u64.
+		 * The buffer was allocated for t->kcov_size unsigned longs.
+		 */
+		area = (u64 *)t->kcov_area;
+		max_pos = t->kcov_size * sizeof(unsigned long);
+
+		count = READ_ONCE(area[0]);
+
+		/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
+		start_index = 1 + count * KCOV_WORDS_PER_CMP;
+		end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
+		if (likely(end_pos <= max_pos)) {
+			/* See comment in __sanitizer_cov_trace_pc(). */
+			WRITE_ONCE(area[0], count + 1);
+			barrier();
+			area[start_index] = type;
+			area[start_index + 1] = arg1;
+			area[start_index + 2] = arg2;
+			area[start_index + 3] = ip;
+		}
+	} else {
+		entry.type = type;
+		entry.arg1 = arg1;
+		entry.arg2 = arg2;
+		entry.ent = ip;
+		kcov_map_add(t->kcov->map, &entry, t, KCOV_MODE_TRACE_UNIQ_CMP);
 	}
 }
 
@@ -730,6 +770,12 @@  static int kcov_get_mode(unsigned long arg)
 		mode |= KCOV_MODE_TRACE_UNIQ_PC;
 	if (arg & KCOV_TRACE_UNIQ_EDGE)
 		mode |= KCOV_MODE_TRACE_UNIQ_EDGE;
+	if (arg == KCOV_TRACE_UNIQ_CMP)
+#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
+		mode = KCOV_MODE_TRACE_UNIQ_CMP;
+#else
+		return -EOPNOTSUPP;
+#endif
 	if (!mode)
 		return -EINVAL;