diff mbox

[14/50] drm/i915: Introduce one context backing object per engine

Message ID 1399637360-4277-15-git-send-email-oscar.mateo@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

oscar.mateo@intel.com May 9, 2014, 12:08 p.m. UTC
From: Oscar Mateo <oscar.mateo@intel.com>

When we start using Execlists, a context backing object only makes
sense for a given engine (because it will hold state data specific to
it) so multiplex the context struct to contain <no-of-engines> objects.

In legacy ringbuffer sumission mode, the only MI_SET_CONTEXT we really
perform is for the render engine, so the RCS backing object is the one
to use troughout the HW context code.

Originally, I colored this code by instantiating one new context for
every engine I wanted to use, but this change suggested by Brad makes
it more elegant.

No functional changes.

Cc: Brad Volkin <bradley.d.volkin@intel.com>
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c     |  2 +-
 drivers/gpu/drm/i915/i915_drv.h         |  4 +-
 drivers/gpu/drm/i915/i915_gem_context.c | 75 ++++++++++++++++++---------------
 3 files changed, 46 insertions(+), 35 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0052460..65a740e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1707,7 +1707,7 @@  static int i915_context_status(struct seq_file *m, void *unused)
 			if (ring->default_context == ctx)
 				seq_printf(m, "(default context %s) ", ring->name);
 
-		describe_obj(m, ctx->obj);
+		describe_obj(m, ctx->engine[RCS].obj);
 		seq_putc(m, '\n');
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 35b2ae4..5be09a0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -595,7 +595,9 @@  struct i915_hw_context {
 	uint8_t remap_slice;
 	struct drm_i915_file_private *file_priv;
 	struct intel_engine *last_ring;
-	struct drm_i915_gem_object *obj;
+	struct {
+		struct drm_i915_gem_object *obj;
+	} engine[I915_NUM_RINGS];
 	struct i915_ctx_hang_stats hang_stats;
 	struct i915_address_space *vm;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 50337ae..f92cba9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -181,15 +181,16 @@  void i915_gem_context_free(struct kref *ctx_ref)
 	struct i915_hw_context *ctx = container_of(ctx_ref,
 						   typeof(*ctx), ref);
 	struct i915_hw_ppgtt *ppgtt = NULL;
+	struct drm_i915_gem_object *ctx_obj = ctx->engine[RCS].obj;
 
-	if (ctx->obj) {
+	if (ctx_obj) {
 		/* We refcount even the aliasing PPGTT to keep the code symmetric */
-		if (USES_PPGTT(ctx->obj->base.dev))
+		if (USES_PPGTT(ctx_obj->base.dev))
 			ppgtt = ctx_to_ppgtt(ctx);
 
 		/* XXX: Free up the object before tearing down the address space, in
 		 * case we're bound in the PPGTT */
-		drm_gem_object_unreference(&ctx->obj->base);
+		drm_gem_object_unreference(&ctx_obj->base);
 	}
 
 	if (ppgtt)
@@ -224,6 +225,7 @@  __create_hw_context(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_hw_context *ctx;
+	struct drm_i915_gem_object *ctx_obj;
 	int ret;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -234,8 +236,9 @@  __create_hw_context(struct drm_device *dev,
 	list_add_tail(&ctx->link, &dev_priv->context_list);
 
 	if (dev_priv->hw_context_size) {
-		ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
-		if (ctx->obj == NULL) {
+		ctx->engine[RCS].obj = ctx_obj = i915_gem_alloc_object(dev,
+							dev_priv->hw_context_size);
+		if (ctx_obj == NULL) {
 			ret = -ENOMEM;
 			goto err_out;
 		}
@@ -249,7 +252,7 @@  __create_hw_context(struct drm_device *dev,
 		 * negative performance impact.
 		 */
 		if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) {
-			ret = i915_gem_object_set_cache_level(ctx->obj,
+			ret = i915_gem_object_set_cache_level(ctx_obj,
 							      I915_CACHE_L3_LLC);
 			/* Failure shouldn't ever happen this early */
 			if (WARN_ON(ret))
@@ -293,6 +296,7 @@  i915_gem_create_context(struct drm_device *dev,
 	const bool is_global_default_ctx = file_priv == NULL;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_hw_context *ctx;
+	struct drm_i915_gem_object *ctx_obj;
 	int ret = 0;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -301,7 +305,8 @@  i915_gem_create_context(struct drm_device *dev,
 	if (IS_ERR(ctx))
 		return ctx;
 
-	if (is_global_default_ctx && ctx->obj) {
+	ctx_obj = ctx->engine[RCS].obj;
+	if (is_global_default_ctx && ctx_obj) {
 		/* We may need to do things with the shrinker which
 		 * require us to immediately switch back to the default
 		 * context. This can cause a problem as pinning the
@@ -309,7 +314,7 @@  i915_gem_create_context(struct drm_device *dev,
 		 * be available. To avoid this we always pin the default
 		 * context.
 		 */
-		ret = i915_gem_obj_ggtt_pin(ctx->obj,
+		ret = i915_gem_obj_ggtt_pin(ctx_obj,
 					    get_context_alignment(dev), 0);
 		if (ret) {
 			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -349,8 +354,8 @@  i915_gem_create_context(struct drm_device *dev,
 	return ctx;
 
 err_unpin:
-	if (is_global_default_ctx && ctx->obj)
-		i915_gem_object_ggtt_unpin(ctx->obj);
+	if (is_global_default_ctx && ctx_obj)
+		i915_gem_object_ggtt_unpin(ctx_obj);
 err_destroy:
 	i915_gem_context_unreference(ctx);
 	return ERR_PTR(ret);
@@ -366,6 +371,7 @@  void i915_gem_context_reset(struct drm_device *dev)
 	 * the next switch */
 	for_each_ring(ring, dev_priv, i) {
 		struct i915_hw_context *dctx = ring->default_context;
+		struct drm_i915_gem_object *dctx_obj = dctx->engine[RCS].obj;
 
 		/* Do a fake switch to the default context */
 		if (ring->last_context == dctx)
@@ -374,12 +380,12 @@  void i915_gem_context_reset(struct drm_device *dev)
 		if (!ring->last_context)
 			continue;
 
-		if (dctx->obj && i == RCS) {
-			WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
+		if (dctx_obj && i == RCS) {
+			WARN_ON(i915_gem_obj_ggtt_pin(dctx_obj,
 						      get_context_alignment(dev), 0));
 			/* Fake a finish/inactive */
-			dctx->obj->base.write_domain = 0;
-			dctx->obj->active = 0;
+			dctx_obj->base.write_domain = 0;
+			dctx_obj->active = 0;
 		}
 
 		i915_gem_context_unreference(ring->last_context);
@@ -428,10 +434,11 @@  void i915_gem_context_fini(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct i915_hw_context *dctx = dev_priv->ring[RCS].default_context;
+	struct drm_i915_gem_object *dctx_obj = dctx->engine[RCS].obj;
 	struct intel_engine *ring;
 	int unused;
 
-	if (dctx->obj) {
+	if (dctx_obj) {
 		/* The only known way to stop the gpu from accessing the hw context is
 		 * to reset it. Do this as the very last operation to avoid confusing
 		 * other code, leading to spurious errors. */
@@ -446,8 +453,8 @@  void i915_gem_context_fini(struct drm_device *dev)
 		WARN_ON(!dev_priv->ring[RCS].last_context);
 		if (dev_priv->ring[RCS].last_context == dctx) {
 			/* Fake switch to NULL context */
-			WARN_ON(dctx->obj->active);
-			i915_gem_object_ggtt_unpin(dctx->obj);
+			WARN_ON(dctx->engine[RCS].obj->active);
+			i915_gem_object_ggtt_unpin(dctx_obj);
 			i915_gem_context_unreference(dctx);
 			dev_priv->ring[RCS].last_context = NULL;
 		}
@@ -461,7 +468,7 @@  void i915_gem_context_fini(struct drm_device *dev)
 		ring->last_context = NULL;
 	}
 
-	i915_gem_object_ggtt_unpin(dctx->obj);
+	i915_gem_object_ggtt_unpin(dctx_obj);
 	i915_gem_context_unreference(dctx);
 }
 
@@ -575,7 +582,7 @@  mi_set_context(struct intel_engine *ring,
 
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
+	intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->engine[RCS].obj) |
 			MI_MM_SPACE_GTT |
 			MI_SAVE_EXT_STATE_EN |
 			MI_RESTORE_EXT_STATE_EN |
@@ -602,12 +609,14 @@  static int do_switch(struct intel_engine *ring,
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	struct i915_hw_context *from = ring->last_context;
 	struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
+	struct drm_i915_gem_object *to_obj = to->engine[RCS].obj;
+	struct drm_i915_gem_object *from_obj = from ? from->engine[RCS].obj : NULL;
 	u32 hw_flags = 0;
 	int ret, i;
 
 	if (from != NULL && ring == &dev_priv->ring[RCS]) {
-		BUG_ON(from->obj == NULL);
-		BUG_ON(!i915_gem_obj_is_pinned(from->obj));
+		BUG_ON(from_obj == NULL);
+		BUG_ON(!i915_gem_obj_is_pinned(from_obj));
 	}
 
 	if (from == to && from->last_ring == ring && !to->remap_slice)
@@ -615,7 +624,7 @@  static int do_switch(struct intel_engine *ring,
 
 	/* Trying to pin first makes error handling easier. */
 	if (ring == &dev_priv->ring[RCS]) {
-		ret = i915_gem_obj_ggtt_pin(to->obj,
+		ret = i915_gem_obj_ggtt_pin(to_obj,
 					    get_context_alignment(ring->dev), 0);
 		if (ret)
 			return ret;
@@ -648,14 +657,14 @@  static int do_switch(struct intel_engine *ring,
 	 *
 	 * XXX: We need a real interface to do this instead of trickery.
 	 */
-	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
+	ret = i915_gem_object_set_to_gtt_domain(to_obj, false);
 	if (ret)
 		goto unpin_out;
 
-	if (!to->obj->has_global_gtt_mapping) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(to->obj,
+	if (!to_obj->has_global_gtt_mapping) {
+		struct i915_vma *vma = i915_gem_obj_to_vma(to_obj,
 							   &dev_priv->gtt.base);
-		vma->bind_vma(vma, to->obj->cache_level, GLOBAL_BIND);
+		vma->bind_vma(vma, to_obj->cache_level, GLOBAL_BIND);
 	}
 
 	if (!to->is_initialized || i915_gem_context_is_default(to))
@@ -684,8 +693,8 @@  static int do_switch(struct intel_engine *ring,
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
-		from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
+		from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from_obj), ring);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -693,11 +702,11 @@  static int do_switch(struct intel_engine *ring,
 		 * able to defer doing this until we know the object would be
 		 * swapped, but there is no way to do that yet.
 		 */
-		from->obj->dirty = 1;
-		BUG_ON(from->obj->ring != ring);
+		from_obj->dirty = 1;
+		BUG_ON(from_obj->ring != ring);
 
 		/* obj is kept alive until the next request by its active ref */
-		i915_gem_object_ggtt_unpin(from->obj);
+		i915_gem_object_ggtt_unpin(from_obj);
 		i915_gem_context_unreference(from);
 	}
 
@@ -712,7 +721,7 @@  done:
 
 unpin_out:
 	if (ring->id == RCS)
-		i915_gem_object_ggtt_unpin(to->obj);
+		i915_gem_object_ggtt_unpin(to_obj);
 	return ret;
 }
 
@@ -733,7 +742,7 @@  int i915_switch_context(struct intel_engine *ring,
 
 	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
-	if (to->obj == NULL) { /* We have the fake context */
+	if (to->engine[RCS].obj == NULL) { /* We have the fake context */
 		if (to != ring->last_context) {
 			i915_gem_context_reference(to);
 			if (ring->last_context)