@@ -1960,6 +1960,10 @@ struct drm_i915_private {
struct mutex timeline_lock;
struct list_head timelines;
+ /* Pack multiple timelines' seqnos into the same page */
+ struct i915_vma *timeline_hwsp;
+ u64 timeline_free;
+
struct list_head active_rings;
struct list_head closed_vma;
u32 active_requests;
@@ -9,10 +9,85 @@
#include "i915_timeline.h"
#include "i915_syncmap.h"
-void i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *timeline,
- const char *name)
+#define NBITS BITS_PER_TYPE(typeof(i915->gt.timeline_free))
+
+static int find_first_cacheline(struct drm_i915_private *i915)
+{
+ return find_first_bit((unsigned long *)&i915->gt.timeline_free, NBITS);
+}
+
+static int alloc_hwsp(struct i915_timeline *timeline)
+{
+ struct drm_i915_private *i915 = timeline->i915;
+ struct i915_vma *vma;
+ void *vaddr;
+ int offset;
+
+ mutex_lock(&i915->gt.timeline_lock);
+
+restart:
+ offset = find_first_cacheline(i915);
+ if (offset == NBITS && i915->gt.timeline_hwsp) {
+ i915_vma_put(i915->gt.timeline_hwsp);
+ i915->gt.timeline_hwsp = NULL;
+ }
+
+ vma = i915->gt.timeline_hwsp;
+ if (!vma) {
+ struct drm_i915_gem_object *bo;
+
+ /* Drop the lock before allocations */
+ mutex_unlock(&i915->gt.timeline_lock);
+
+ BUILD_BUG_ON(NBITS * CACHELINE_BYTES > PAGE_SIZE);
+ bo = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
+
+ vma = i915_vma_instance(bo, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ mutex_lock(&i915->gt.timeline_lock);
+ if (i915->gt.timeline_hwsp) {
+ i915_gem_object_put(bo);
+ goto restart;
+ }
+
+ i915->gt.timeline_hwsp = vma;
+ i915->gt.timeline_free = ~0ull;
+ offset = 0;
+ }
+
+ i915->gt.timeline_free &= ~BIT_ULL(offset);
+
+ timeline->hwsp_ggtt = i915_vma_get(vma);
+ timeline->hwsp_offset = offset * CACHELINE_BYTES;
+
+ mutex_unlock(&i915->gt.timeline_lock);
+
+ vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) { /* leak the cacheline, but will clean up later */
+ i915_vma_put(vma);
+ return PTR_ERR(vaddr);
+ }
+
+ timeline->hwsp_seqno =
+ memset(vaddr + timeline->hwsp_offset,
+ 0,
+ sizeof(*timeline->hwsp_seqno));
+
+ return 0;
+}
+
+int i915_timeline_init(struct drm_i915_private *i915,
+ struct i915_timeline *timeline,
+ const char *name)
{
+ int err;
+
/*
* Ideally we want a set of engines on a single leaf as we expect
* to mostly be tracking synchronisation between engines. It is not
@@ -23,10 +98,11 @@ void i915_timeline_init(struct drm_i915_private *i915,
timeline->i915 = i915;
timeline->name = name;
+ timeline->pin_count = 0;
- mutex_lock(&i915->gt.timeline_lock);
- list_add(&timeline->link, &i915->gt.timelines);
- mutex_unlock(&i915->gt.timeline_lock);
+ err = alloc_hwsp(timeline);
+ if (err)
+ return err;
/* Called during early_init before we know how many engines there are */
@@ -38,6 +114,12 @@ void i915_timeline_init(struct drm_i915_private *i915,
INIT_LIST_HEAD(&timeline->requests);
i915_syncmap_init(&timeline->sync);
+
+ mutex_lock(&i915->gt.timeline_lock);
+ list_add(&timeline->link, &i915->gt.timelines);
+ mutex_unlock(&i915->gt.timeline_lock);
+
+ return 0;
}
void i915_timelines_init(struct drm_i915_private *i915)
@@ -78,30 +160,73 @@ void i915_timelines_park(struct drm_i915_private *i915)
void i915_timeline_fini(struct i915_timeline *timeline)
{
+ struct drm_i915_private *i915 = timeline->i915;
+
+ GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(&timeline->requests));
i915_syncmap_free(&timeline->sync);
- mutex_lock(&timeline->i915->gt.timeline_lock);
+ mutex_lock(&i915->gt.timeline_lock);
list_del(&timeline->link);
- mutex_unlock(&timeline->i915->gt.timeline_lock);
+ if (timeline->hwsp_ggtt == i915->gt.timeline_hwsp)
+ i915->gt.timeline_free |=
+ BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES);
+ mutex_unlock(&i915->gt.timeline_lock);
+
+ i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+ i915_vma_put(timeline->hwsp_ggtt);
}
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915, const char *name)
{
struct i915_timeline *timeline;
+ int err;
timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
if (!timeline)
return ERR_PTR(-ENOMEM);
- i915_timeline_init(i915, timeline, name);
+ err = i915_timeline_init(i915, timeline, name);
+ if (err) {
+ kfree(timeline);
+ return ERR_PTR(err);
+ }
+
kref_init(&timeline->kref);
return timeline;
}
+int i915_timeline_pin(struct i915_timeline *tl)
+{
+ int err;
+
+ if (tl->pin_count++)
+ return 0;
+ GEM_BUG_ON(!tl->pin_count);
+
+ err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto unpin;
+
+ return 0;
+
+unpin:
+ tl->pin_count = 0;
+ return err;
+}
+
+void i915_timeline_unpin(struct i915_timeline *tl)
+{
+ GEM_BUG_ON(!tl->pin_count);
+ if (--tl->pin_count)
+ return;
+
+ __i915_vma_unpin(tl->hwsp_ggtt);
+}
+
void __i915_timeline_free(struct kref *kref)
{
struct i915_timeline *timeline =
@@ -113,8 +238,14 @@ void __i915_timeline_free(struct kref *kref)
void i915_timelines_fini(struct drm_i915_private *i915)
{
+ struct i915_vma *vma;
+
GEM_BUG_ON(!list_empty(&i915->gt.timelines));
+ vma = fetch_and_zero(&i915->gt.timeline_hwsp);
+ if (vma)
+ i915_vma_put(vma);
+
mutex_destroy(&i915->gt.timeline_lock);
}
@@ -32,6 +32,8 @@
#include "i915_syncmap.h"
#include "i915_utils.h"
+struct i915_vma;
+
struct i915_timeline {
u64 fence_context;
u32 seqno;
@@ -40,6 +42,11 @@ struct i915_timeline {
#define TIMELINE_CLIENT 0 /* default subclass */
#define TIMELINE_ENGINE 1
+ unsigned int pin_count;
+ const u32 *hwsp_seqno;
+ struct i915_vma *hwsp_ggtt;
+ u32 hwsp_offset;
+
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding.
@@ -71,9 +78,9 @@ struct i915_timeline {
struct kref kref;
};
-void i915_timeline_init(struct drm_i915_private *i915,
- struct i915_timeline *tl,
- const char *name);
+int i915_timeline_init(struct drm_i915_private *i915,
+ struct i915_timeline *tl,
+ const char *name);
void i915_timeline_fini(struct i915_timeline *tl);
static inline void
@@ -135,6 +142,9 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
return __i915_timeline_sync_is_later(tl, fence->context, fence->seqno);
}
+int i915_timeline_pin(struct i915_timeline *tl);
+void i915_timeline_unpin(struct i915_timeline *tl);
+
void i915_timelines_init(struct drm_i915_private *i915);
void i915_timelines_park(struct drm_i915_private *i915);
void i915_timelines_fini(struct drm_i915_private *i915);
@@ -493,15 +493,22 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
*
* Typically done early in the submission mode specific engine setup stage.
*/
-void intel_engine_setup_common(struct intel_engine_cs *engine)
+int intel_engine_setup_common(struct intel_engine_cs *engine)
{
- i915_timeline_init(engine->i915, &engine->timeline, engine->name);
+ int err;
+
+ err = i915_timeline_init(engine->i915, &engine->timeline, engine->name);
+ if (err)
+ return err;
+
i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
intel_engine_init_batch_pool(engine);
intel_engine_init_cmd_parser(engine);
+
+ return 0;
}
static void cleanup_status_page(struct intel_engine_cs *engine)
@@ -2183,10 +2183,14 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
-static void
+static int
logical_ring_setup(struct intel_engine_cs *engine)
{
- intel_engine_setup_common(engine);
+ int err;
+
+ err = intel_engine_setup_common(engine);
+ if (err)
+ return err;
/* Intentionally left blank. */
engine->buffer = NULL;
@@ -2196,6 +2200,8 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
+
+ return 0;
}
static int logical_ring_init(struct intel_engine_cs *engine)
@@ -2243,7 +2249,9 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
int ret;
- logical_ring_setup(engine);
+ ret = logical_ring_setup(engine);
+ if (ret)
+ return ret;
if (HAS_L3_DPF(dev_priv))
engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
@@ -2277,7 +2285,11 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
int logical_xcs_ring_init(struct intel_engine_cs *engine)
{
- logical_ring_setup(engine);
+ int err;
+
+ err = logical_ring_setup(engine);
+ if (err)
+ return err;
return logical_ring_init(engine);
}
@@ -1522,7 +1522,9 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
struct intel_ring *ring;
int err;
- intel_engine_setup_common(engine);
+ err = intel_engine_setup_common(engine);
+ if (err)
+ return err;
timeline = i915_timeline_create(engine->i915, engine->name);
if (IS_ERR(timeline)) {
@@ -823,7 +823,7 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
-void intel_engine_setup_common(struct intel_engine_cs *engine);
+int intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
@@ -13,6 +13,7 @@ selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(workarounds, intel_workarounds_live_selftests)
selftest(requests, i915_request_live_selftests)
+selftest(timelines, i915_timeline_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
@@ -16,7 +16,7 @@ selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
-selftest(timelines, i915_gem_timeline_mock_selftests)
+selftest(timelines, i915_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
@@ -256,7 +256,7 @@ static int bench_sync(void *arg)
return 0;
}
-int i915_gem_timeline_mock_selftests(void)
+int i915_timeline_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_sync),
@@ -265,3 +265,374 @@ int i915_gem_timeline_mock_selftests(void)
return i915_subtests(tests, NULL);
}
+
+static int live_hwsp_engine(void *arg)
+{
+#define NUM_TIMELINES 4096
+ struct drm_i915_private *i915 = arg;
+ struct i915_timeline **timelines;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned long count, n;
+ int err = 0;
+
+ /*
+ * Create a bunch of timelines and check we can write
+ * independently to each of their breadcrumb slots.
+ */
+
+ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+ sizeof(*timelines),
+ GFP_KERNEL);
+ if (!timelines)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ count = 0;
+ for_each_engine(engine, i915, id) {
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ for (n = 0; n < NUM_TIMELINES; n++) {
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+ u32 addr;
+ u32 *cs;
+
+ tl = i915_timeline_create(i915, "live");
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ if (*tl->hwsp_seqno) {
+ pr_err("Timeline %lu created with non-zero breadcrumb, found %x\n",
+ count, *tl->hwsp_seqno);
+ err = -EINVAL;
+ i915_timeline_put(tl);
+ goto out;
+ }
+
+ err = i915_timeline_pin(tl);
+ if (err) {
+ i915_timeline_put(tl);
+ goto out;
+ }
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ i915_timeline_unpin(tl);
+ i915_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ i915_timeline_unpin(tl);
+ i915_timeline_put(tl);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ addr = i915_ggtt_offset(tl->hwsp_ggtt) + tl->hwsp_offset;
+
+ if (INTEL_GEN(i915) >= 8) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = 0;
+ *cs++ = count;
+ } else if (INTEL_GEN(i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ *cs++ = addr;
+ *cs++ = count;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = addr;
+ *cs++ = count;
+ *cs++ = MI_NOOP;
+ }
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+ i915_timeline_unpin(tl);
+
+ timelines[count++] = tl;
+ }
+ }
+
+ err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
+
+out:
+ for (n = 0; n < count; n++) {
+ struct i915_timeline *tl = timelines[n];
+
+ if (!err && *tl->hwsp_seqno != n) {
+ pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ n, *tl->hwsp_seqno);
+ err = -EINVAL;
+ }
+ i915_timeline_put(tl);
+ }
+
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kvfree(timelines);
+
+ return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_alternate(void *arg)
+{
+#define NUM_TIMELINES 4096
+ struct drm_i915_private *i915 = arg;
+ struct i915_timeline **timelines;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned long count, n;
+ int err = 0;
+
+ /*
+ * Create a bunch of timelines and check we can write
+ * independently to each of their breadcrumb slots with adjacent
+ * engines.
+ */
+
+ timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
+ sizeof(*timelines),
+ GFP_KERNEL);
+ if (!timelines)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ count = 0;
+ for (n = 0; n < NUM_TIMELINES; n++) {
+ for_each_engine(engine, i915, id) {
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+ u32 addr;
+ u32 *cs;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ tl = i915_timeline_create(i915, "live");
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ if (*tl->hwsp_seqno) {
+ pr_err("Timeline %lu created with non-zero breadcrumb, found %x\n",
+ count, *tl->hwsp_seqno);
+ err = -EINVAL;
+ i915_timeline_put(tl);
+ goto out;
+ }
+
+ err = i915_timeline_pin(tl);
+ if (err) {
+ i915_timeline_put(tl);
+ goto out;
+ }
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ i915_timeline_unpin(tl);
+ i915_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ i915_timeline_unpin(tl);
+ i915_timeline_put(tl);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ addr = i915_ggtt_offset(tl->hwsp_ggtt) + tl->hwsp_offset;
+
+ if (INTEL_GEN(i915) >= 8) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = 0;
+ *cs++ = count;
+ } else if (INTEL_GEN(i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ *cs++ = addr;
+ *cs++ = count;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = addr;
+ *cs++ = count;
+ *cs++ = MI_NOOP;
+ }
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+ i915_timeline_unpin(tl);
+
+ timelines[count++] = tl;
+ }
+ }
+
+ err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
+
+out:
+ for (n = 0; n < count; n++) {
+ struct i915_timeline *tl = timelines[n];
+
+ if (!err && *tl->hwsp_seqno != n) {
+ pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ n, *tl->hwsp_seqno);
+ err = -EINVAL;
+ }
+ i915_timeline_put(tl);
+ }
+
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ kvfree(timelines);
+
+ return err;
+#undef NUM_TIMELINES
+}
+
+static int live_hwsp_recycle(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned long count;
+ int err = 0;
+
+ /*
+ * Check seqno writes into one timeline at a time. We expect to
+ * recycle the breadcrumb slot between iterations and neither
+ * want to confuse ourselves or the GPU.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ count = 0;
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ do {
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+ u32 addr;
+ u32 *cs;
+
+ tl = i915_timeline_create(i915, "live");
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out;
+ }
+
+ if (*tl->hwsp_seqno) {
+ pr_err("Timeline %lu created with non-zero breadcrumb, found %x\n",
+ count, *tl->hwsp_seqno);
+ err = -EINVAL;
+ i915_timeline_put(tl);
+ goto out;
+ }
+
+ err = i915_timeline_pin(tl);
+ if (err) {
+ i915_timeline_put(tl);
+ goto out;
+ }
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ i915_timeline_unpin(tl);
+ i915_timeline_put(tl);
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ i915_timeline_unpin(tl);
+ i915_timeline_put(tl);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ addr = i915_ggtt_offset(tl->hwsp_ggtt) + tl->hwsp_offset;
+
+ if (INTEL_GEN(i915) >= 8) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = addr;
+ *cs++ = 0;
+ *cs++ = count;
+ } else if (INTEL_GEN(i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ *cs++ = addr;
+ *cs++ = count;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = addr;
+ *cs++ = count;
+ *cs++ = MI_NOOP;
+ }
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+ i915_timeline_unpin(tl);
+
+ i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
+ if (*tl->hwsp_seqno != count) {
+ pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
+ count, *tl->hwsp_seqno);
+ err = -EINVAL;
+ }
+
+ i915_timeline_put(tl);
+ count++;
+
+ if (err)
+ goto out;
+ } while (!__igt_timeout(end_time, NULL));
+ }
+
+out:
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
+int i915_timeline_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_hwsp_recycle),
+ SUBTEST(live_hwsp_engine),
+ SUBTEST(live_hwsp_alternate),
+ };
+
+ return i915_subtests(tests, i915);
+}
@@ -34,12 +34,17 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
{
const unsigned long sz = PAGE_SIZE / 2;
struct mock_ring *ring;
+ int err;
ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
if (!ring)
return NULL;
- i915_timeline_init(engine->i915, &ring->timeline, engine->name);
+ err = i915_timeline_init(engine->i915, &ring->timeline, engine->name);
+ if (err) {
+ kfree(ring);
+ return NULL;
+ }
ring->base.size = sz;
ring->base.effective_size = sz;
@@ -209,7 +214,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.emit_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
- i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
+ if (i915_timeline_init(i915, &engine->base.timeline, engine->base.name))
+ goto err_free;
i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(&engine->base);
@@ -227,6 +233,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
err_breadcrumbs:
intel_engine_fini_breadcrumbs(&engine->base);
i915_timeline_fini(&engine->base.timeline);
+err_free:
kfree(engine);
return NULL;
}
Allocate a page for use as a status page by a group of timelines, as we only need a dword of storage for each (rounded up to the cacheline for safety) we can pack multiple timelines into the same page. Each timeline will then be able to track its own HW seqno. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_drv.h | 4 + drivers/gpu/drm/i915/i915_timeline.c | 149 ++++++- drivers/gpu/drm/i915/i915_timeline.h | 16 +- drivers/gpu/drm/i915/intel_engine_cs.c | 11 +- drivers/gpu/drm/i915/intel_lrc.c | 20 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 4 +- drivers/gpu/drm/i915/intel_ringbuffer.h | 2 +- .../drm/i915/selftests/i915_live_selftests.h | 1 + .../drm/i915/selftests/i915_mock_selftests.h | 2 +- .../gpu/drm/i915/selftests/i915_timeline.c | 373 +++++++++++++++++- drivers/gpu/drm/i915/selftests/mock_engine.c | 11 +- 11 files changed, 569 insertions(+), 24 deletions(-)