@@ -1124,6 +1124,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
if (!e)
return ERR_PTR(-ENOMEM);
+ mutex_lock(&ctx->i915->uabi_engines_mutex);
for_each_uabi_engine(engine, ctx->i915) {
struct intel_context *ce;
struct intel_sseu sseu = {};
@@ -1155,9 +1156,11 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
}
+ mutex_unlock(&ctx->i915->uabi_engines_mutex);
return e;
free_engines:
+ mutex_unlock(&ctx->i915->uabi_engines_mutex);
free_engines(e);
return err;
}
@@ -210,6 +210,13 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
LIST_HEAD(engines);
sort_engines(i915, &engines);
+ mutex_init(&i915->uabi_engines_mutex);
+
+ /*
+ * We are still booting i915 and we are sure we are running
+ * single-threaded. We don't need at this point to protect the
+ * uabi_engines access list with the mutex.
+ */
prev = NULL;
p = &i915->uabi_engines.rb_node;
@@ -508,6 +508,11 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
i915->sysfs_engine = dir;
+ /*
+ * We are still booting i915 and we are sure we are running
+ * single-threaded. We don't need at this point to protect the
+ * uabi_engines access list with the mutex.
+ */
for_each_uabi_engine(engine, i915) {
struct kobject *kobj;
@@ -1592,12 +1592,14 @@ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
+ mutex_lock(&dev_priv->uabi_engines_mutex);
for_each_uabi_engine(engine, dev_priv) {
if (intel_engine_using_cmd_parser(engine)) {
active = true;
break;
}
}
+ mutex_unlock(&dev_priv->uabi_engines_mutex);
if (!active)
return 0;
@@ -459,8 +459,10 @@ static int i915_engine_info(struct seq_file *m, void *unused)
to_gt(i915)->clock_period_ns);
p = drm_seq_file_printer(m);
+ mutex_lock(&i915->uabi_engines_mutex);
for_each_uabi_engine(engine, i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
+ mutex_unlock(&i915->uabi_engines_mutex);
intel_gt_show_timelines(to_gt(i915), &p, i915_request_show_with_schedule);
@@ -474,6 +476,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ mutex_lock(&i915->uabi_engines_mutex);
for_each_uabi_engine(engine, i915) {
const struct i915_wa_list *wal = &engine->ctx_wa_list;
const struct i915_wa *wa;
@@ -493,6 +496,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
seq_printf(m, "\n");
}
+ mutex_unlock(&i915->uabi_engines_mutex);
return 0;
}
@@ -231,6 +231,10 @@ struct drm_i915_private {
struct rb_root uabi_engines;
};
unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
+ /*
+ * Protect access to the uabi_engines list.
+ */
+ struct mutex uabi_engines_mutex;
/* protects the irq masks */
spinlock_t irq_lock;
@@ -1263,7 +1263,11 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
i915_gem_suspend_late(dev_priv);
for_each_gt(gt, dev_priv, i)
intel_gt_driver_remove(gt);
+
+ /* Let's make sure no one is using the uabi_engines list */
+ mutex_lock(>->i915->uabi_engines_mutex);
dev_priv->uabi_engines = RB_ROOT;
+ mutex_unlock(>->i915->uabi_engines_mutex);
/* Flush any outstanding unpin_work. */
i915_gem_drain_workqueue(dev_priv);
@@ -2732,6 +2732,7 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
* If we don't modify the kernel_context, we do not get events while
* idle.
*/
+ mutex_lock(&i915->uabi_engines_mutex);
for_each_uabi_engine(engine, i915) {
struct intel_context *ce = engine->kernel_context;
@@ -2744,6 +2745,7 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
if (err)
return err;
}
+ mutex_unlock(&i915->uabi_engines_mutex);
return 0;
}
@@ -1022,6 +1022,7 @@ create_event_attributes(struct i915_pmu *pmu)
}
}
+ mutex_lock(&i915->uabi_engines_mutex);
for_each_uabi_engine(engine, i915) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
if (!engine_event_status(engine,
@@ -1029,6 +1030,7 @@ create_event_attributes(struct i915_pmu *pmu)
count++;
}
}
+ mutex_unlock(&i915->uabi_engines_mutex);
/* Allocate attribute objects and table. */
i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
@@ -1086,6 +1088,7 @@ create_event_attributes(struct i915_pmu *pmu)
}
/* Initialize supported engine counters. */
+ mutex_lock(&i915->uabi_engines_mutex);
for_each_uabi_engine(engine, i915) {
for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
char *str;
@@ -1115,6 +1118,7 @@ create_event_attributes(struct i915_pmu *pmu)
pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
}
}
+ mutex_unlock(&i915->uabi_engines_mutex);
pmu->i915_attr = i915_attr;
pmu->pmu_attr = pmu_attr;
@@ -140,6 +140,7 @@ query_engine_info(struct drm_i915_private *i915,
if (query_item->flags)
return -EINVAL;
+ mutex_lock(&i915->uabi_engines_mutex);
for_each_uabi_engine(engine, i915)
num_uabi_engines++;
@@ -168,6 +169,7 @@ query_engine_info(struct drm_i915_private *i915,
query.num_engines++;
info_ptr++;
}
+ mutex_unlock(&i915->uabi_engines_mutex);
if (copy_to_user(query_ptr, &query, sizeof(query)))
return -EFAULT;
Until now, the UABI engines list has been accessed in read-only mode, as it was created once during boot and destroyed upon module unload. In upcoming commits, we will be modifying this list by changing the CCS mode, allowing compute engines to be dynamically added and removed at runtime based on user whims. To ensure thread safety and prevent race conditions, we need to protect the engine list with a mutex, thereby serializing access to it. Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 3 +++ drivers/gpu/drm/i915/gt/intel_engine_user.c | 7 +++++++ drivers/gpu/drm/i915/gt/sysfs_engines.c | 5 +++++ drivers/gpu/drm/i915/i915_cmd_parser.c | 2 ++ drivers/gpu/drm/i915/i915_debugfs.c | 4 ++++ drivers/gpu/drm/i915/i915_drv.h | 4 ++++ drivers/gpu/drm/i915/i915_gem.c | 4 ++++ drivers/gpu/drm/i915/i915_perf.c | 2 ++ drivers/gpu/drm/i915/i915_pmu.c | 4 ++++ drivers/gpu/drm/i915/i915_query.c | 2 ++ 10 files changed, 37 insertions(+)