@@ -296,6 +296,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
return ERR_CAST(ce);
}
+ ce->ring = __intel_context_ring_size(SZ_16K);
+
e->engines[id] = ce;
}
e->num_engines = id;
@@ -434,8 +436,6 @@ __create_context(struct drm_i915_private *i915)
i915_gem_context_set_bannable(ctx);
i915_gem_context_set_recoverable(ctx);
- ctx->ring_size = 4 * PAGE_SIZE;
-
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -565,8 +565,15 @@ i915_gem_context_create_gvt(struct drm_device *dev)
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
- if (!USES_GUC_SUBMISSION(to_i915(dev)))
- ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
+ if (!USES_GUC_SUBMISSION(to_i915(dev))) {
+ const unsigned long ring_size = 512 * SZ_4K; /* max */
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ ce->ring = __intel_context_ring_size(ring_size);
+ i915_gem_context_unlock_engines(ctx);
+ }
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
out:
@@ -605,7 +612,6 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
i915_gem_context_clear_bannable(ctx);
ctx->sched.priority = I915_USER_PRIORITY(prio);
- ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -1589,6 +1595,7 @@ set_engines(struct i915_gem_context *ctx,
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
+ struct intel_context *ce;
if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
__free_engines(set.engines, n);
@@ -1611,11 +1618,15 @@ set_engines(struct i915_gem_context *ctx,
return -ENOENT;
}
- set.engines->engines[n] = intel_context_create(ctx, engine);
- if (!set.engines->engines[n]) {
+ ce = intel_context_create(ctx, engine);
+ if (!ce) {
__free_engines(set.engines, n);
return -ENOMEM;
}
+
+ ce->ring = __intel_context_ring_size(SZ_16K);
+
+ set.engines->engines[n] = ce;
}
set.engines->num_engines = num_engines;
@@ -169,9 +169,6 @@ struct i915_gem_context {
struct i915_sched_attr sched;
- /** ring_size: size for allocating the per-engine ring buffer */
- u32 ring_size;
-
/** guilty_count: How many times this context has caused a GPU hang. */
atomic_t guilty_count;
/**
@@ -214,6 +214,7 @@ intel_context_init(struct intel_context *ce,
ce->engine = engine;
ce->ops = engine->cops;
ce->sseu = engine->sseu;
+ ce->ring = __intel_context_ring_size(SZ_4K);
INIT_LIST_HEAD(&ce->signal_link);
INIT_LIST_HEAD(&ce->signals);
@@ -136,4 +136,9 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *intel_context_create_request(struct intel_context *ce);
+static inline struct intel_ring *__intel_context_ring_size(u64 sz)
+{
+ return u64_to_ptr(struct intel_ring, sz);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
@@ -3196,9 +3196,8 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
goto error_deref_obj;
}
- ring = intel_engine_create_ring(engine,
- timeline,
- ce->gem_context->ring_size);
+ ring = intel_engine_create_ring(engine, timeline,
+ (unsigned long)ce->ring);
intel_timeline_put(timeline);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
@@ -131,6 +131,7 @@ static void hw_delay_complete(struct timer_list *t)
static void mock_context_unpin(struct intel_context *ce)
{
mock_timeline_unpin(ce->ring->timeline);
+ mock_ring_free(ce->ring);
}
static void mock_context_destroy(struct kref *ref)
@@ -139,9 +140,6 @@ static void mock_context_destroy(struct kref *ref)
GEM_BUG_ON(intel_context_is_pinned(ce));
- if (ce->ring)
- mock_ring_free(ce->ring);
-
intel_context_fini(ce);
intel_context_free(ce);
}
@@ -150,11 +148,9 @@ static int mock_context_pin(struct intel_context *ce)
{
int ret;
- if (!ce->ring) {
- ce->ring = mock_ring(ce->engine);
- if (!ce->ring)
- return -ENOMEM;
- }
+ ce->ring = mock_ring(ce->engine);
+ if (!ce->ring)
+ return -ENOMEM;
ret = intel_context_active_acquire(ce);
if (ret)
@@ -328,10 +328,14 @@ static void print_context_stats(struct seq_file *m,
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- if (ce->state)
- per_file_stats(0, ce->state->obj, &kstats);
- if (ce->ring)
+ intel_context_lock_pinned(ce);
+ if (intel_context_is_pinned(ce)) {
+ if (ce->state)
+ per_file_stats(0,
+ ce->state->obj, &kstats);
per_file_stats(0, ce->ring->vma->obj, &kstats);
+ }
+ intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
@@ -1592,12 +1596,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
for_each_gem_engine(ce,
i915_gem_context_lock_engines(ctx), it) {
- seq_printf(m, "%s: ", ce->engine->name);
- if (ce->state)
- describe_obj(m, ce->state->obj);
- if (ce->ring)
+ intel_context_lock_pinned(ce);
+ if (intel_context_is_pinned(ce)) {
+ seq_printf(m, "%s: ", ce->engine->name);
+ if (ce->state)
+ describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring);
- seq_putc(m, '\n');
+ seq_putc(m, '\n');
+ }
+ intel_context_unlock_pinned(ce);
}
i915_gem_context_unlock_engines(ctx);
Push the ring creation flags from the outer GEM context to the inner intel_cotnext to avoid an unsightly back-reference from inside the backend. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/gem/i915_gem_context.c | 25 +++++++++++++------ .../gpu/drm/i915/gem/i915_gem_context_types.h | 3 --- drivers/gpu/drm/i915/gt/intel_context.c | 1 + drivers/gpu/drm/i915/gt/intel_context.h | 5 ++++ drivers/gpu/drm/i915/gt/intel_lrc.c | 5 ++-- drivers/gpu/drm/i915/gt/mock_engine.c | 12 +++------ drivers/gpu/drm/i915/i915_debugfs.c | 23 +++++++++++------ 7 files changed, 45 insertions(+), 29 deletions(-)