@@ -232,15 +232,21 @@ static void free_event_data(struct work_struct *work)
if (!(IS_ERR_OR_NULL(*ppath))) {
struct coresight_device *sink = coresight_get_sink(*ppath);
- coresight_trace_id_put_cpu_id_map(cpu, &sink->perf_sink_id_map);
+ /*
+ * Mark perf event as done for trace id allocator, but don't call
+ * coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions
+ * never free trace IDs to ensure that the ID associated with a CPU
+ * cannot change during their and other's concurrent sessions. Instead,
+ * a refcount is used so that the last event to call
+ * coresight_trace_id_perf_stop() frees all IDs.
+ */
+ coresight_trace_id_perf_stop(&sink->perf_sink_id_map);
+
coresight_release_path(*ppath);
}
*ppath = NULL;
}
- /* mark perf event as done for trace id allocator */
- coresight_trace_id_perf_stop();
-
free_percpu(event_data->path);
kfree(event_data);
}
@@ -328,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = user_sink = coresight_get_sink_by_id(id);
}
- /* tell the trace ID allocator that a perf event is starting up */
- coresight_trace_id_perf_start();
-
/* check if user wants a coresight configuration selected */
cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
if (cfg_hash) {
@@ -411,6 +414,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
continue;
}
+ coresight_trace_id_perf_start(&sink->perf_sink_id_map);
*etm_event_cpu_path_ptr(event_data, cpu) = path;
}
@@ -18,12 +18,6 @@ static struct coresight_trace_id_map id_map_default = {
.cpu_map = &id_map_default_cpu_ids
};
-/* maintain a record of the pending releases per cpu */
-static cpumask_t cpu_id_release_pending;
-
-/* perf session active counter */
-static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
-
/* lock to protect id_map and cpu data */
static DEFINE_SPINLOCK(id_map_lock);
@@ -35,7 +29,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
{
pr_debug("%s id_map::\n", func_name);
pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
- pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
}
#define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
#define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
@@ -122,34 +115,18 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma
clear_bit(id, id_map->used_ids);
}
-static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
-{
- if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
- return;
- set_bit(id, id_map->pend_rel_ids);
-}
-
/*
- * release all pending IDs for all current maps & clear CPU associations
- *
- * This currently operates on the default id map, but may be extended to
- * operate on all registered id maps if per sink id maps are used.
+ * Release all IDs and clear CPU associations.
*/
-static void coresight_trace_id_release_all_pending(void)
+static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map)
{
- struct coresight_trace_id_map *id_map = &id_map_default;
unsigned long flags;
- int cpu, bit;
+ int cpu;
spin_lock_irqsave(&id_map_lock, flags);
- for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) {
- clear_bit(bit, id_map->used_ids);
- clear_bit(bit, id_map->pend_rel_ids);
- }
- for_each_cpu(cpu, &cpu_id_release_pending) {
- atomic_set(per_cpu_ptr(id_map_default.cpu_map, cpu), 0);
- cpumask_clear_cpu(cpu, &cpu_id_release_pending);
- }
+ bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
+ for_each_possible_cpu(cpu)
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
spin_unlock_irqrestore(&id_map_lock, flags);
DUMP_ID_MAP(id_map);
}
@@ -164,7 +141,7 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
/* check for existing allocation for this CPU */
id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (id)
- goto get_cpu_id_clr_pend;
+ goto get_cpu_id_out_unlock;
/*
* Find a new ID.
@@ -185,11 +162,6 @@ static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map
/* allocate the new id to the cpu */
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
-get_cpu_id_clr_pend:
- /* we are (re)using this ID - so ensure it is not marked for release */
- cpumask_clear_cpu(cpu, &cpu_id_release_pending);
- clear_bit(id, id_map->pend_rel_ids);
-
get_cpu_id_out_unlock:
spin_unlock_irqrestore(&id_map_lock, flags);
@@ -210,15 +182,8 @@ static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_ma
spin_lock_irqsave(&id_map_lock, flags);
- if (atomic_read(&perf_cs_etm_session_active)) {
- /* set release at pending if perf still active */
- coresight_trace_id_set_pend_rel(id, id_map);
- cpumask_set_cpu(cpu, &cpu_id_release_pending);
- } else {
- /* otherwise clear id */
- coresight_trace_id_free(id, id_map);
- atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
- }
+ coresight_trace_id_free(id, id_map);
+ atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
spin_unlock_irqrestore(&id_map_lock, flags);
DUMP_ID_CPU(cpu, id);
@@ -302,17 +267,17 @@ void coresight_trace_id_put_system_id(int id)
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
-void coresight_trace_id_perf_start(void)
+void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map)
{
- atomic_inc(&perf_cs_etm_session_active);
- PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
+ atomic_inc(&id_map->perf_cs_etm_session_active);
+ PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
}
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
-void coresight_trace_id_perf_stop(void)
+void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map)
{
- if (!atomic_dec_return(&perf_cs_etm_session_active))
- coresight_trace_id_release_all_pending();
- PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
+ if (!atomic_dec_return(&id_map->perf_cs_etm_session_active))
+ coresight_trace_id_release_all(id_map);
+ PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
}
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);
@@ -17,9 +17,10 @@
* released when done.
*
* In order to ensure that a consistent cpu / ID matching is maintained
- * throughout a perf cs_etm event session - a session in progress flag will
- * be maintained, and released IDs not cleared until the perf session is
- * complete. This allows the same CPU to be re-allocated its prior ID.
+ * throughout a perf cs_etm event session - a session in progress flag will be
+ * maintained for each sink, and IDs are cleared when all the perf sessions
+ * complete. This allows the same CPU to be re-allocated its prior ID when
+ * events are scheduled in and out.
*
*
* Trace ID maps will be created and initialised to prevent architecturally
@@ -66,11 +67,7 @@ int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id
/**
* Release an allocated trace ID associated with the CPU.
*
- * This will release the CoreSight trace ID associated with the CPU,
- * unless a perf session is in operation.
- *
- * If a perf session is in operation then the ID will be marked as pending
- * release.
+ * This will release the CoreSight trace ID associated with the CPU.
*
* @cpu: The CPU index to release the associated trace ID.
*/
@@ -133,21 +130,21 @@ void coresight_trace_id_put_system_id(int id);
/**
* Notify the Trace ID allocator that a perf session is starting.
*
- * Increase the perf session reference count - called by perf when setting up
- * a trace event.
+ * Increase the perf session reference count - called by perf when setting up a
+ * trace event.
*
- * This reference count is used by the ID allocator to ensure that trace IDs
- * associated with a CPU cannot change or be released during a perf session.
+ * Perf sessions never free trace IDs to ensure that the ID associated with a
+ * CPU cannot change during their and other's concurrent sessions. Instead,
+ * this refcount is used so that the last event to finish always frees all IDs.
*/
-void coresight_trace_id_perf_start(void);
+void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map);
/**
* Notify the ID allocator that a perf session is stopping.
*
- * Decrease the perf session reference count.
- * if this causes the count to go to zero, then all Trace IDs marked as pending
- * release, will be released.
+ * Decrease the perf session reference count. If this causes the count to go to
+ * zero, then all Trace IDs will be released.
*/
-void coresight_trace_id_perf_stop(void);
+void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map);
#endif /* _CORESIGHT_TRACE_ID_H */
@@ -227,14 +227,12 @@ struct coresight_sysfs_link {
* @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
* Initialised so that the reserved IDs are permanently marked as
* in use.
- * @pend_rel_ids: CPU IDs that have been released by the trace source but not
- * yet marked as available, to allow re-allocation to the same
- * CPU during a perf session.
+ * @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
*/
struct coresight_trace_id_map {
DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
- DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX);
atomic_t __percpu *cpu_map;
+ atomic_t perf_cs_etm_session_active;
};
/**