diff mbox series

[v2,13/13] coresight: trace-id: Add debug & test macros to Trace ID allocation

Message ID 20220704081149.16797-14-mike.leach@linaro.org (mailing list archive)
State New, archived
Headers show
Series coresight: Add new API to allocate trace source ID values | expand

Commit Message

Mike Leach July 4, 2022, 8:11 a.m. UTC
Adds in a number of pr_debug macros to allow the debugging and test of
the trace ID allocation system.

Signed-off-by: Mike Leach <mike.leach@linaro.org>
---
 .../hwtracing/coresight/coresight-trace-id.c  | 33 +++++++++++++++++++
 1 file changed, 33 insertions(+)

Comments

Suzuki K Poulose July 20, 2022, 9:41 a.m. UTC | #1
On 04/07/2022 09:11, Mike Leach wrote:
> Adds in a number of pr_debug macros to allow the debugging and test of
> the trace ID allocation system.
> 
> Signed-off-by: Mike Leach <mike.leach@linaro.org>
> ---
>   .../hwtracing/coresight/coresight-trace-id.c  | 33 +++++++++++++++++++
>   1 file changed, 33 insertions(+)
> 
> diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
> index dac9c89ae00d..841307e0d899 100644
> --- a/drivers/hwtracing/coresight/coresight-trace-id.c
> +++ b/drivers/hwtracing/coresight/coresight-trace-id.c
> @@ -71,6 +71,27 @@ static int coresight_trace_id_find_new_id(struct coresight_trace_id_map *id_map)
>   	return id;
>   }
>   
> +/* #define TRACE_ID_DEBUG 1 */
> +#ifdef TRACE_ID_DEBUG
> +static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
> +					  const char *func_name)
> +{
> +	/* currently 2 u64s are sufficient to hold all the ids */
> +	pr_debug("%s id_map::\n", func_name);
> +	pr_debug("Avial= 0x%016lx%016lx\n", id_map->avail_ids[1], id_map->avail_ids[0]);
> +	pr_debug("Pend = 0x%016lx%016lx\n", id_map->pend_rel_ids[1], id_map->pend_rel_ids[0]);

minor nit: You may use bitmap_print_to_pagebuf() to print the bitmaps.

> +}
> +#define DUMP_ID_MAP(map)   coresight_trace_id_dump_table(map, __func__)
> +#define DUMP_ID_CPU(cpu, id) pr_debug("%s called;  cpu=%d, id=%d\n", __func__, cpu, id)
> +#define DUMP_ID(id)   pr_debug("%s called; id=%d\n", __func__, id)
> +#define PERF_SESSION(n) pr_debug("%s perf count %d\n", __func__, n)
> +#else
> +#define DUMP_ID_MAP(map)
> +#define DUMP_ID(id)
> +#define DUMP_ID_CPU(cpu, id)
> +#define PERF_SESSION(n)
> +#endif
> +
>   /* release all pending IDs for all current maps & clear CPU associations */
>   static void coresight_trace_id_release_all_pending(void)
>   {
> @@ -81,6 +102,7 @@ static void coresight_trace_id_release_all_pending(void)
>   		clear_bit(bit, id_map->avail_ids);
>   		clear_bit(bit, id_map->pend_rel_ids);
>   	}
> +	DUMP_ID_MAP(id_map);
>   
>   	for_each_possible_cpu(cpu) {
>   		if (per_cpu(cpu_ids, cpu).pend_rel) {
> @@ -126,6 +148,8 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_
>   
>   get_cpu_id_out:
>   	spin_unlock_irqrestore(&id_map_lock, flags);
> +	DUMP_ID_CPU(cpu, id);
> +	DUMP_ID_MAP(id_map);
>   	return id;
>   }
>   
> @@ -151,6 +175,8 @@ static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id
>   
>    put_cpu_id_out:
>   	spin_unlock_irqrestore(&id_map_lock, flags);
> +	DUMP_ID_CPU(cpu, id);
> +	DUMP_ID_MAP(id_map);
>   }
>   
>   static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map)
> @@ -164,6 +190,8 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i
>   		coresight_trace_id_set_inuse(id, id_map);
>   	spin_unlock_irqrestore(&id_map_lock, flags);
>   
> +	DUMP_ID(id);
> +	DUMP_ID_MAP(id_map);
>   	return id;
>   }
>   
> @@ -174,6 +202,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
>   	spin_lock_irqsave(&id_map_lock, flags);
>   	coresight_trace_id_clear_inuse(id, id_map);
>   	spin_unlock_irqrestore(&id_map_lock, flags);
> +
> +	DUMP_ID(id);
> +	DUMP_ID_MAP(id_map);
>   }
>   
>   /* API functions */
> @@ -207,6 +238,7 @@ void coresight_trace_id_perf_start(void)
>   
	int n;

>   	spin_lock_irqsave(&id_map_lock, flags);
>   	perf_cs_etm_session_active++;
	n = perf_cs_etm_session_active++;


>   	spin_unlock_irqrestore(&id_map_lock, flags);

	PERF_SESSION(n);

Not a good idea to print something from within spin_lock.

>   }
>   EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
> @@ -217,6 +249,7 @@ void coresight_trace_id_perf_stop(void)
>   
>   	spin_lock_irqsave(&id_map_lock, flags);
>   	perf_cs_etm_session_active--;
> +	PERF_SESSION(perf_cs_etm_session_active);

Same as above.

>   	if (!perf_cs_etm_session_active)
>   		coresight_trace_id_release_all_pending();
>   	spin_unlock_irqrestore(&id_map_lock, flags);

Suzuki
diff mbox series

Patch

diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
index dac9c89ae00d..841307e0d899 100644
--- a/drivers/hwtracing/coresight/coresight-trace-id.c
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -71,6 +71,27 @@  static int coresight_trace_id_find_new_id(struct coresight_trace_id_map *id_map)
 	return id;
 }
 
+/* #define TRACE_ID_DEBUG 1 */
+#ifdef TRACE_ID_DEBUG
+static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
+					  const char *func_name)
+{
+	/* currently 2 u64s are sufficient to hold all the ids */
+	pr_debug("%s id_map::\n", func_name);
+	pr_debug("Avial= 0x%016lx%016lx\n", id_map->avail_ids[1], id_map->avail_ids[0]);
+	pr_debug("Pend = 0x%016lx%016lx\n", id_map->pend_rel_ids[1], id_map->pend_rel_ids[0]);
+}
+#define DUMP_ID_MAP(map)   coresight_trace_id_dump_table(map, __func__)
+#define DUMP_ID_CPU(cpu, id) pr_debug("%s called;  cpu=%d, id=%d\n", __func__, cpu, id)
+#define DUMP_ID(id)   pr_debug("%s called; id=%d\n", __func__, id)
+#define PERF_SESSION(n) pr_debug("%s perf count %d\n", __func__, n)
+#else
+#define DUMP_ID_MAP(map)
+#define DUMP_ID(id)
+#define DUMP_ID_CPU(cpu, id)
+#define PERF_SESSION(n)
+#endif
+
 /* release all pending IDs for all current maps & clear CPU associations */
 static void coresight_trace_id_release_all_pending(void)
 {
@@ -81,6 +102,7 @@  static void coresight_trace_id_release_all_pending(void)
 		clear_bit(bit, id_map->avail_ids);
 		clear_bit(bit, id_map->pend_rel_ids);
 	}
+	DUMP_ID_MAP(id_map);
 
 	for_each_possible_cpu(cpu) {
 		if (per_cpu(cpu_ids, cpu).pend_rel) {
@@ -126,6 +148,8 @@  static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_
 
 get_cpu_id_out:
 	spin_unlock_irqrestore(&id_map_lock, flags);
+	DUMP_ID_CPU(cpu, id);
+	DUMP_ID_MAP(id_map);
 	return id;
 }
 
@@ -151,6 +175,8 @@  static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id
 
  put_cpu_id_out:
 	spin_unlock_irqrestore(&id_map_lock, flags);
+	DUMP_ID_CPU(cpu, id);
+	DUMP_ID_MAP(id_map);
 }
 
 static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map)
@@ -164,6 +190,8 @@  static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i
 		coresight_trace_id_set_inuse(id, id_map);
 	spin_unlock_irqrestore(&id_map_lock, flags);
 
+	DUMP_ID(id);
+	DUMP_ID_MAP(id_map);
 	return id;
 }
 
@@ -174,6 +202,9 @@  static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
 	spin_lock_irqsave(&id_map_lock, flags);
 	coresight_trace_id_clear_inuse(id, id_map);
 	spin_unlock_irqrestore(&id_map_lock, flags);
+
+	DUMP_ID(id);
+	DUMP_ID_MAP(id_map);
 }
 
 /* API functions */
@@ -207,6 +238,7 @@  void coresight_trace_id_perf_start(void)
 
 	spin_lock_irqsave(&id_map_lock, flags);
 	perf_cs_etm_session_active++;
+	PERF_SESSION(perf_cs_etm_session_active);
 	spin_unlock_irqrestore(&id_map_lock, flags);
 }
 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
@@ -217,6 +249,7 @@  void coresight_trace_id_perf_stop(void)
 
 	spin_lock_irqsave(&id_map_lock, flags);
 	perf_cs_etm_session_active--;
+	PERF_SESSION(perf_cs_etm_session_active);
 	if (!perf_cs_etm_session_active)
 		coresight_trace_id_release_all_pending();
 	spin_unlock_irqrestore(&id_map_lock, flags);