@@ -125,6 +125,10 @@ gt-y += \
gt/gen9_renderstate.o
i915-y += $(gt-y)
+# Memory + DMA management
+i915-y += \
+ mm/i915_acquire_ctx.o
+
# GEM (Graphics Execution Management) code
gem-y += \
gem/i915_gem_busy.o \
@@ -87,6 +87,7 @@ static void __i915_globals_cleanup(void)
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
+ i915_global_acquire_init,
i915_global_buddy_init,
i915_global_context_init,
i915_global_gem_context_init,
@@ -27,6 +27,7 @@ void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
+int i915_global_acquire_init(void);
int i915_global_buddy_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
new file mode 100644
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/dma-resv.h>
+
+#include "i915_globals.h"
+#include "gem/i915_gem_object.h"
+
+#include "i915_acquire_ctx.h"
+
+static struct i915_global_acquire {
+ struct i915_global base;
+ struct kmem_cache *slab_acquires;
+} global;
+
+struct i915_acquire {
+ struct drm_i915_gem_object *obj;
+ struct i915_acquire *next;
+};
+
+static struct i915_acquire *i915_acquire_alloc(void)
+{
+ return kmem_cache_alloc(global.slab_acquires, GFP_KERNEL);
+}
+
+static void i915_acquire_free(struct i915_acquire *lnk)
+{
+ kmem_cache_free(global.slab_acquires, lnk);
+}
+
+void i915_acquire_ctx_init(struct i915_acquire_ctx *ctx)
+{
+ ww_acquire_init(&ctx->ctx, &reservation_ww_class);
+ ctx->locked = NULL;
+}
+
+int i915_acquire_ctx_lock(struct i915_acquire_ctx *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_acquire *lock, *lnk;
+ int err;
+
+ lock = i915_acquire_alloc();
+ if (!lock)
+ return -ENOMEM;
+
+ lock->obj = i915_gem_object_get(obj);
+ lock->next = NULL;
+
+ while ((lnk = lock)) {
+ obj = lnk->obj;
+ lock = lnk->next;
+
+ err = dma_resv_lock_interruptible(obj->base.resv, &ctx->ctx);
+ if (err == -EDEADLK) {
+ struct i915_acquire *old;
+
+ while ((old = ctx->locked)) {
+ i915_gem_object_unlock(old->obj);
+ ctx->locked = old->next;
+ old->next = lock;
+ lock = old;
+ }
+
+ err = dma_resv_lock_slow_interruptible(obj->base.resv,
+ &ctx->ctx);
+ }
+ if (!err) {
+ lnk->next = ctx->locked;
+ ctx->locked = lnk;
+ } else {
+ i915_gem_object_put(obj);
+ i915_acquire_free(lnk);
+ }
+ if (err == -EALREADY)
+ err = 0;
+ if (err)
+ break;
+ }
+
+ while ((lnk = lock)) {
+ lock = lnk->next;
+ i915_gem_object_put(lnk->obj);
+ i915_acquire_free(lnk);
+ }
+
+ return err;
+}
+
+int i915_acquire_mm(struct i915_acquire_ctx *acquire)
+{
+ return 0;
+}
+
+void i915_acquire_ctx_fini(struct i915_acquire_ctx *ctx)
+{
+ struct i915_acquire *lnk;
+
+ while ((lnk = ctx->locked)) {
+ i915_gem_object_unlock(lnk->obj);
+ i915_gem_object_put(lnk->obj);
+
+ ctx->locked = lnk->next;
+ i915_acquire_free(lnk);
+ }
+
+ ww_acquire_fini(&ctx->ctx);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "st_acquire_ctx.c"
+#endif
+
+static void i915_global_acquire_shrink(void)
+{
+ kmem_cache_shrink(global.slab_acquires);
+}
+
+static void i915_global_acquire_exit(void)
+{
+ kmem_cache_destroy(global.slab_acquires);
+}
+
+static struct i915_global_acquire global = { {
+ .shrink = i915_global_acquire_shrink,
+ .exit = i915_global_acquire_exit,
+} };
+
+int __init i915_global_acquire_init(void)
+{
+ global.slab_acquires = KMEM_CACHE(i915_acquire, 0);
+ if (!global.slab_acquires)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_ACQUIRE_CTX_H__
+#define __I915_ACQUIRE_CTX_H__
+
+#include <linux/list.h>
+#include <linux/ww_mutex.h>
+
+struct drm_i915_gem_object;
+struct i915_acquire;
+
+struct i915_acquire_ctx {
+ struct ww_acquire_ctx ctx;
+ struct i915_acquire *locked;
+};
+
+void i915_acquire_ctx_init(struct i915_acquire_ctx *acquire);
+
+static inline void i915_acquire_ctx_done(struct i915_acquire_ctx *acquire)
+{
+ ww_acquire_done(&acquire->ctx);
+}
+
+void i915_acquire_ctx_fini(struct i915_acquire_ctx *acquire);
+
+int __must_check i915_acquire_ctx_lock(struct i915_acquire_ctx *acquire,
+ struct drm_i915_gem_object *obj);
+
+int i915_acquire_mm(struct i915_acquire_ctx *acquire);
+
+#endif /* __I915_ACQUIRE_CTX_H__ */
new file mode 100644
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_selftest.h"
+
+#include "selftests/i915_random.h"
+#include "selftests/mock_gem_device.h"
+
+static int checked_acquire_lock(struct i915_acquire_ctx *acquire,
+ struct drm_i915_gem_object *obj,
+ const char *name)
+{
+ int err;
+
+ err = i915_acquire_ctx_lock(acquire, obj);
+ if (err) {
+ pr_err("i915_acquire_lock(%s) failed, err:%d\n", name, err);
+ return err;
+ }
+
+ if (!mutex_is_locked(&obj->base.resv->lock.base)) {
+ pr_err("Failed to lock %s!\n", name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igt_acquire_lock(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *a, *b;
+ struct i915_acquire_ctx acquire;
+ int err;
+
+ a = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto out_a;
+ }
+
+ i915_acquire_ctx_init(&acquire);
+
+ err = checked_acquire_lock(&acquire, a, "A");
+ if (err)
+ goto out_fini;
+
+ err = checked_acquire_lock(&acquire, b, "B");
+ if (err)
+ goto out_fini;
+
+ /* Again for EALREADY */
+
+ err = checked_acquire_lock(&acquire, a, "A");
+ if (err)
+ goto out_fini;
+
+ err = checked_acquire_lock(&acquire, b, "B");
+ if (err)
+ goto out_fini;
+
+ i915_acquire_ctx_done(&acquire);
+
+ if (!mutex_is_locked(&a->base.resv->lock.base)) {
+ pr_err("Failed to lock A, after i915_acquire_done\n");
+ err = -EINVAL;
+ }
+ if (!mutex_is_locked(&b->base.resv->lock.base)) {
+ pr_err("Failed to lock B, after i915_acquire_done\n");
+ err = -EINVAL;
+ }
+
+out_fini:
+ i915_acquire_ctx_fini(&acquire);
+
+ if (mutex_is_locked(&a->base.resv->lock.base)) {
+ pr_err("A is still locked!\n");
+ err = -EINVAL;
+ }
+ if (mutex_is_locked(&b->base.resv->lock.base)) {
+ pr_err("B is still locked!\n");
+ err = -EINVAL;
+ }
+
+ i915_gem_object_put(b);
+out_a:
+ i915_gem_object_put(a);
+ return err;
+}
+
+struct deadlock {
+ struct drm_i915_gem_object *obj[64];
+};
+
+static int __igt_acquire_deadlock(void *arg)
+{
+ struct deadlock *dl = arg;
+ const unsigned int total = ARRAY_SIZE(dl->obj);
+ I915_RND_STATE(prng);
+ unsigned int *order;
+ int n, count, err = 0;
+
+ order = i915_random_order(total, &prng);
+ if (!order)
+ return -ENOMEM;
+
+ while (!kthread_should_stop()) {
+ struct i915_acquire_ctx acquire;
+
+ i915_random_reorder(order, total, &prng);
+ count = i915_prandom_u32_max_state(total, &prng);
+
+ i915_acquire_ctx_init(&acquire);
+
+ for (n = 0; n < count; n++) {
+ struct drm_i915_gem_object *obj = dl->obj[order[n]];
+
+ err = checked_acquire_lock(&acquire, obj, "dl");
+ if (err) {
+ i915_acquire_ctx_fini(&acquire);
+ goto out;
+ }
+ }
+
+ i915_acquire_ctx_done(&acquire);
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ for (n = 0; n < count; n++) {
+ struct drm_i915_gem_object *obj = dl->obj[order[n]];
+
+ if (!lockdep_is_held(&obj->base.resv->lock.base)) {
+ pr_err("lock not taken!\n");
+ i915_acquire_ctx_fini(&acquire);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+#endif
+
+ i915_acquire_ctx_fini(&acquire);
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ for (n = 0; n < count; n++) {
+ struct drm_i915_gem_object *obj = dl->obj[order[n]];
+
+ if (lockdep_is_held(&obj->base.resv->lock.base)) {
+ pr_err("lock still held after fini!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+#endif
+ }
+
+out:
+ kfree(order);
+ return err;
+}
+
+static int igt_acquire_deadlock(void *arg)
+{
+ unsigned int ncpus = num_online_cpus();
+ struct drm_i915_private *i915 = arg;
+ struct task_struct **threads;
+ struct deadlock dl;
+ int ret = 0, n;
+
+ threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ for (n = 0; n < ARRAY_SIZE(dl.obj); n += 2) {
+ dl.obj[n] = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(dl.obj[n])) {
+ ret = PTR_ERR(dl.obj[n]);
+ goto out_obj;
+ }
+
+ /* Repeat the objects for -EALREADY */
+ dl.obj[n + 1] = i915_gem_object_get(dl.obj[n]);
+ }
+
+ for (n = 0; n < ncpus; n++) {
+ threads[n] = kthread_run(__igt_acquire_deadlock,
+ &dl, "igt/%d", n);
+ if (IS_ERR(threads[n])) {
+ ret = PTR_ERR(threads[n]);
+ ncpus = n;
+ break;
+ }
+
+ get_task_struct(threads[n]);
+ }
+
+ yield(); /* start all threads before we begin */
+ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+
+ for (n = 0; n < ncpus; n++) {
+ int err;
+
+ err = kthread_stop(threads[n]);
+ if (err < 0 && !ret)
+ ret = err;
+
+ put_task_struct(threads[n]);
+ }
+
+out_obj:
+ for (n = 0; n < ARRAY_SIZE(dl.obj); n++) {
+ if (IS_ERR(dl.obj[n]))
+ break;
+ i915_gem_object_put(dl.obj[n]);
+ }
+ kfree(threads);
+ return ret;
+}
+
+int i915_acquire_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_acquire_lock),
+ SUBTEST(igt_acquire_deadlock),
+ };
+ struct drm_i915_private *i915;
+ int err = 0;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+ drm_dev_put(&i915->drm);
+
+ return err;
+}
@@ -26,6 +26,7 @@ selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
+selftest(acquire, i915_acquire_mock_selftests)
selftest(phys, i915_gem_phys_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)