diff mbox series

[10/10] coresight: trace-id: Add debug & test macros to trace id allocation

Message ID 20220308205000.27646-11-mike.leach@linaro.org (mailing list archive)
State New, archived
Headers show
Series coresight: Add new API to allocate trace source ID values | expand

Commit Message

Mike Leach March 8, 2022, 8:50 p.m. UTC
Adds in a number of pr_debug macros to allow the debugging and test of
the trace ID allocation system.

Signed-off-by: Mike Leach <mike.leach@linaro.org>
---
 .../hwtracing/coresight/coresight-trace-id.c  | 33 +++++++++++++++++++
 1 file changed, 33 insertions(+)
diff mbox series

Patch

diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index ce6c7d7b55d6..8dcb698b8899 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -71,6 +71,27 @@  static int coresight_trace_id_find_new_id(struct coresight_trace_id_map *id_map)
 	return id;
 }
 
+/* #define TRACE_ID_DEBUG 1 */
+#ifdef TRACE_ID_DEBUG
+static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
+					  const char *func_name)
+{
+	/* currently 2 u64s are sufficient to hold all the ids */
+	pr_debug("%s id_map::\n", func_name);
+	pr_debug("Avial= 0x%016lx%016lx\n", id_map->avail_ids[1], id_map->avail_ids[0]);
+	pr_debug("Pend = 0x%016lx%016lx\n", id_map->pend_rel_ids[1], id_map->pend_rel_ids[0]);
+}
+#define DUMP_ID_MAP(map)   coresight_trace_id_dump_table(map, __func__)
+#define DUMP_ID_CPU(cpu, id) pr_debug("%s called;  cpu=%d, id=%d\n", __func__, cpu, id)
+#define DUMP_ID(id)   pr_debug("%s called; id=%d\n", __func__, id)
+#define PERF_SESSION(n) pr_debug("%s perf count %d\n", __func__, n)
+#else
+#define DUMP_ID_MAP(map)
+#define DUMP_ID(id)
+#define DUMP_ID_CPU(cpu, id)
+#define PERF_SESSION(n)
+#endif
+
 /* release all pending IDs for all current maps & clear CPU associations */
 static void coresight_trace_id_release_all_pending(void)
 {
@@ -82,6 +103,7 @@  static void coresight_trace_id_release_all_pending(void)
 			clear_bit(bit, id_map->avail_ids);
 			clear_bit(bit, id_map->pend_rel_ids);
 		}
+		DUMP_ID_MAP(id_map);
 	}
 
 	for_each_possible_cpu(cpu) {
@@ -112,6 +134,8 @@  int coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map
 
 get_cpu_id_out:
 	spin_unlock_irqrestore(&id_map_lock, flags);
+	DUMP_ID_CPU(cpu, id);
+	DUMP_ID_MAP(id_map);
 	return id;
 }
 EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
@@ -138,6 +162,8 @@  void coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_ma
 
  put_cpu_id_out:
 	spin_unlock_irqrestore(&id_map_lock, flags);
+	DUMP_ID_CPU(cpu, id);
+	DUMP_ID_MAP(id_map);
 }
 EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
 
@@ -152,6 +178,8 @@  int coresight_trace_id_get_system_id(struct coresight_trace_id_map *id_map)
 		coresight_trace_id_set_inuse(id, id_map);
 	spin_unlock_irqrestore(&id_map_lock, flags);
 
+	DUMP_ID(id);
+	DUMP_ID_MAP(id_map);
 	return id;
 }
 EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id);
@@ -163,6 +191,9 @@  void coresight_trace_id_put_system_id(struct coresight_trace_id_map *id_map, int
 	spin_lock_irqsave(&id_map_lock, flags);
 	coresight_trace_id_clear_inuse(id, id_map);
 	spin_unlock_irqrestore(&id_map_lock, flags);
+
+	DUMP_ID(id);
+	DUMP_ID_MAP(id_map);
 }
 EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
 
@@ -172,6 +203,7 @@  void coresight_trace_id_perf_start(void)
 
 	spin_lock_irqsave(&id_map_lock, flags);
 	perf_cs_etm_session_active++;
+	PERF_SESSION(perf_cs_etm_session_active);
 	spin_unlock_irqrestore(&id_map_lock, flags);
 }
 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
@@ -182,6 +214,7 @@  void coresight_trace_id_perf_stop(void)
 
 	spin_lock_irqsave(&id_map_lock, flags);
 	perf_cs_etm_session_active--;
+	PERF_SESSION(perf_cs_etm_session_active);
 	if (!perf_cs_etm_session_active)
 		coresight_trace_id_release_all_pending();
 	spin_unlock_irqrestore(&id_map_lock, flags);