diff mbox series

[30/38] drm/i915: Make context pinning part of intel_context_ops

Message ID 20190301140404.26690-30-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [01/38] drm/i915/execlists: Suppress redundant preemption | expand

Commit Message

Chris Wilson March 1, 2019, 2:03 p.m. UTC
Push the intel_context pin callback down from intel_engine_cs onto the
context itself by virtue of having a central caller for
intel_context_pin() being able to lookup the intel_context itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_context.c         |  34 ++++++
 drivers/gpu/drm/i915/intel_context.h         |   7 +-
 drivers/gpu/drm/i915/intel_context_types.h   |   1 +
 drivers/gpu/drm/i915/intel_engine_types.h    |   2 -
 drivers/gpu/drm/i915/intel_lrc.c             | 114 ++++++++-----------
 drivers/gpu/drm/i915/intel_ringbuffer.c      |  45 ++------
 drivers/gpu/drm/i915/selftests/mock_engine.c |  32 +-----
 7 files changed, 98 insertions(+), 137 deletions(-)

Comments

Tvrtko Ursulin March 5, 2019, 5:31 p.m. UTC | #1
On 01/03/2019 14:03, Chris Wilson wrote:
> Push the intel_context pin callback down from intel_engine_cs onto the
> context itself by virtue of having a central caller for
> intel_context_pin() being able to lookup the intel_context itself.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/intel_context.c         |  34 ++++++
>   drivers/gpu/drm/i915/intel_context.h         |   7 +-
>   drivers/gpu/drm/i915/intel_context_types.h   |   1 +
>   drivers/gpu/drm/i915/intel_engine_types.h    |   2 -
>   drivers/gpu/drm/i915/intel_lrc.c             | 114 ++++++++-----------
>   drivers/gpu/drm/i915/intel_ringbuffer.c      |  45 ++------
>   drivers/gpu/drm/i915/selftests/mock_engine.c |  32 +-----
>   7 files changed, 98 insertions(+), 137 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
> index 242b1b6ad253..7de02be0b552 100644
> --- a/drivers/gpu/drm/i915/intel_context.c
> +++ b/drivers/gpu/drm/i915/intel_context.c
> @@ -110,6 +110,40 @@ intel_context_instance(struct i915_gem_context *ctx,
>   	return pos;
>   }
>   
> +struct intel_context *
> +intel_context_pin(struct i915_gem_context *ctx,
> +		  struct intel_engine_cs *engine)
> +{
> +	struct intel_context *ce;
> +	int err;
> +
> +	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> +
> +	ce = intel_context_instance(ctx, engine);
> +	if (IS_ERR(ce))
> +		return ce;
> +
> +	if (unlikely(!ce->pin_count++)) {
> +		err = ce->ops->pin(ce);
> +		if (err)
> +			goto err_unpin;
> +
> +		mutex_lock(&ctx->mutex);
> +		list_add(&ce->active_link, &ctx->active_engines);
> +		mutex_unlock(&ctx->mutex);
> +
> +		i915_gem_context_get(ctx);
> +		GEM_BUG_ON(ce->gem_context != ctx);
> +	}
> +	GEM_BUG_ON(!ce->pin_count); /* no overflow! */
> +
> +	return ce;
> +
> +err_unpin:
> +	ce->pin_count = 0;
> +	return ERR_PTR(err);
> +}
> +
>   static void intel_context_retire(struct i915_active_request *active,
>   				 struct i915_request *rq)
>   {
> diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
> index c3fffd9b8ae4..aa34311a472e 100644
> --- a/drivers/gpu/drm/i915/intel_context.h
> +++ b/drivers/gpu/drm/i915/intel_context.h
> @@ -45,11 +45,8 @@ __intel_context_insert(struct i915_gem_context *ctx,
>   void
>   __intel_context_remove(struct intel_context *ce);
>   
> -static inline struct intel_context *
> -intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
> -{
> -	return engine->context_pin(engine, ctx);
> -}
> +struct intel_context *
> +intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
>   
>   static inline void __intel_context_pin(struct intel_context *ce)
>   {
> diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
> index 857f5c335324..804fa93de853 100644
> --- a/drivers/gpu/drm/i915/intel_context_types.h
> +++ b/drivers/gpu/drm/i915/intel_context_types.h
> @@ -19,6 +19,7 @@ struct intel_context;
>   struct intel_ring;
>   
>   struct intel_context_ops {
> +	int (*pin)(struct intel_context *ce);
>   	void (*unpin)(struct intel_context *ce);
>   	void (*destroy)(struct intel_context *ce);
>   };
> diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
> index 5dac6b439f95..aefc66605729 100644
> --- a/drivers/gpu/drm/i915/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/intel_engine_types.h
> @@ -357,8 +357,6 @@ struct intel_engine_cs {
>   	void		(*set_default_submission)(struct intel_engine_cs *engine);
>   
>   	const struct intel_context_ops *context;
> -	struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
> -					     struct i915_gem_context *ctx);
>   
>   	int		(*request_alloc)(struct i915_request *rq);
>   	int		(*init_context)(struct i915_request *rq);
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 0800f8edffeb..8dddea8e1c97 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -166,9 +166,8 @@
>   
>   #define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
>   
> -static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
> -					    struct intel_engine_cs *engine,
> -					    struct intel_context *ce);
> +static int execlists_context_deferred_alloc(struct intel_context *ce,
> +					    struct intel_engine_cs *engine);
>   static void execlists_init_reg_state(u32 *reg_state,
>   				     struct intel_context *ce,
>   				     struct intel_engine_cs *engine,
> @@ -330,11 +329,10 @@ assert_priority_queue(const struct i915_request *prev,
>    * engine info, SW context ID and SW counter need to form a unique number
>    * (Context ID) per lrc.
>    */
> -static void
> -intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
> -				   struct intel_engine_cs *engine,
> -				   struct intel_context *ce)
> +static u64
> +lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
>   {
> +	struct i915_gem_context *ctx = ce->gem_context;
>   	u64 desc;
>   
>   	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
> @@ -352,7 +350,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
>   	 * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
>   	 * anything below.
>   	 */
> -	if (INTEL_GEN(ctx->i915) >= 11) {
> +	if (INTEL_GEN(engine->i915) >= 11) {
>   		GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
>   		desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
>   								/* bits 37-47 */
> @@ -369,7 +367,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
>   		desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;	/* bits 32-52 */
>   	}
>   
> -	ce->lrc_desc = desc;
> +	return desc;
>   }
>   
>   static void unwind_wa_tail(struct i915_request *rq)
> @@ -1284,7 +1282,7 @@ static void execlists_context_unpin(struct intel_context *ce)
>   	i915_gem_context_put(ce->gem_context);
>   }
>   
> -static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
> +static int __context_pin(struct i915_vma *vma)
>   {
>   	unsigned int flags;
>   	int err;
> @@ -1307,11 +1305,14 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
>   }
>   
>   static void
> -__execlists_update_reg_state(struct intel_engine_cs *engine,
> -			     struct intel_context *ce)
> +__execlists_update_reg_state(struct intel_context *ce,
> +			     struct intel_engine_cs *engine)
>   {
> -	u32 *regs = ce->lrc_reg_state;
>   	struct intel_ring *ring = ce->ring;
> +	u32 *regs = ce->lrc_reg_state;
> +
> +	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
> +	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
>   
>   	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
>   	regs[CTX_RING_HEAD + 1] = ring->head;
> @@ -1324,25 +1325,26 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
>   	}
>   }
>   
> -static struct intel_context *
> -__execlists_context_pin(struct intel_engine_cs *engine,
> -			struct i915_gem_context *ctx,
> -			struct intel_context *ce)
> +static int
> +__execlists_context_pin(struct intel_context *ce,
> +			struct intel_engine_cs *engine)
>   {
>   	void *vaddr;
>   	int ret;
>   
> -	ret = execlists_context_deferred_alloc(ctx, engine, ce);
> +	GEM_BUG_ON(!ce->gem_context->ppgtt);
> +
> +	ret = execlists_context_deferred_alloc(ce, engine);
>   	if (ret)
>   		goto err;
>   	GEM_BUG_ON(!ce->state);
>   
> -	ret = __context_pin(ctx, ce->state);
> +	ret = __context_pin(ce->state);
>   	if (ret)
>   		goto err;
>   
>   	vaddr = i915_gem_object_pin_map(ce->state->obj,
> -					i915_coherent_map_type(ctx->i915) |
> +					i915_coherent_map_type(engine->i915) |
>   					I915_MAP_OVERRIDE);
>   	if (IS_ERR(vaddr)) {
>   		ret = PTR_ERR(vaddr);
> @@ -1353,26 +1355,16 @@ __execlists_context_pin(struct intel_engine_cs *engine,
>   	if (ret)
>   		goto unpin_map;
>   
> -	ret = i915_gem_context_pin_hw_id(ctx);
> +	ret = i915_gem_context_pin_hw_id(ce->gem_context);
>   	if (ret)
>   		goto unpin_ring;
>   
> -	intel_lr_context_descriptor_update(ctx, engine, ce);
> -
> -	GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
> -
> +	ce->lrc_desc = lrc_descriptor(ce, engine);
>   	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
> -
> -	__execlists_update_reg_state(engine, ce);
> +	__execlists_update_reg_state(ce, engine);
>   
>   	ce->state->obj->pin_global++;
> -
> -	mutex_lock(&ctx->mutex);
> -	list_add(&ce->active_link, &ctx->active_engines);
> -	mutex_unlock(&ctx->mutex);
> -
> -	i915_gem_context_get(ctx);
> -	return ce;
> +	return 0;
>   
>   unpin_ring:
>   	intel_ring_unpin(ce->ring);
> @@ -1381,31 +1373,16 @@ __execlists_context_pin(struct intel_engine_cs *engine,
>   unpin_vma:
>   	__i915_vma_unpin(ce->state);
>   err:
> -	ce->pin_count = 0;
> -	return ERR_PTR(ret);
> +	return ret;
>   }
>   
> -static struct intel_context *
> -execlists_context_pin(struct intel_engine_cs *engine,
> -		      struct i915_gem_context *ctx)
> +static int execlists_context_pin(struct intel_context *ce)
>   {
> -	struct intel_context *ce;
> -
> -	ce = intel_context_instance(ctx, engine);
> -	if (IS_ERR(ce))
> -		return ce;
> -
> -	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> -	GEM_BUG_ON(!ctx->ppgtt);
> -
> -	if (likely(ce->pin_count++))
> -		return ce;
> -	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
> -
> -	return __execlists_context_pin(engine, ctx, ce);
> +	return __execlists_context_pin(ce, ce->engine);
>   }

Do you need this trivial wrapper for later?

>   
>   static const struct intel_context_ops execlists_context_ops = {
> +	.pin = execlists_context_pin,
>   	.unpin = execlists_context_unpin,
>   	.destroy = execlists_context_destroy,
>   };
> @@ -2029,7 +2006,7 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
>   	intel_ring_update_space(rq->ring);
>   
>   	execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring);
> -	__execlists_update_reg_state(engine, rq->hw_context);
> +	__execlists_update_reg_state(rq->hw_context, engine);
>   
>   out_unlock:
>   	spin_unlock_irqrestore(&engine->timeline.lock, flags);
> @@ -2354,7 +2331,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>   	engine->reset.finish = execlists_reset_finish;
>   
>   	engine->context = &execlists_context_ops;
> -	engine->context_pin = execlists_context_pin;
>   	engine->request_alloc = execlists_request_alloc;
>   
>   	engine->emit_flush = gen8_emit_flush;
> @@ -2831,9 +2807,16 @@ populate_lr_context(struct intel_context *ce,
>   	return ret;
>   }
>   
> -static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
> -					    struct intel_engine_cs *engine,
> -					    struct intel_context *ce)
> +static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
> +{
> +	if (ctx->timeline)
> +		return i915_timeline_get(ctx->timeline);
> +	else
> +		return i915_timeline_create(ctx->i915, ctx->name, NULL);
> +}
> +
> +static int execlists_context_deferred_alloc(struct intel_context *ce,
> +					    struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_gem_object *ctx_obj;
>   	struct i915_vma *vma;
> @@ -2853,26 +2836,25 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
>   	 */
>   	context_size += LRC_HEADER_PAGES * PAGE_SIZE;
>   
> -	ctx_obj = i915_gem_object_create(ctx->i915, context_size);
> +	ctx_obj = i915_gem_object_create(engine->i915, context_size);
>   	if (IS_ERR(ctx_obj))
>   		return PTR_ERR(ctx_obj);
>   
> -	vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
> +	vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
>   	if (IS_ERR(vma)) {
>   		ret = PTR_ERR(vma);
>   		goto error_deref_obj;
>   	}
>   
> -	if (ctx->timeline)
> -		timeline = i915_timeline_get(ctx->timeline);
> -	else
> -		timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
> +	timeline = get_timeline(ce->gem_context);
>   	if (IS_ERR(timeline)) {
>   		ret = PTR_ERR(timeline);
>   		goto error_deref_obj;
>   	}
>   
> -	ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
> +	ring = intel_engine_create_ring(engine,
> +					timeline,
> +					ce->gem_context->ring_size);
>   	i915_timeline_put(timeline);
>   	if (IS_ERR(ring)) {
>   		ret = PTR_ERR(ring);
> @@ -2917,7 +2899,7 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
>   		list_for_each_entry(ce, &ctx->active_engines, active_link) {
>   			GEM_BUG_ON(!ce->ring);
>   			intel_ring_reset(ce->ring, 0);
> -			__execlists_update_reg_state(ce->engine, ce);
> +			__execlists_update_reg_state(ce, ce->engine);
>   		}
>   	}
>   }
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 9002f7f6d32e..6af2d393b7ff 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1508,11 +1508,9 @@ alloc_context_vma(struct intel_engine_cs *engine)
>   	return ERR_PTR(err);
>   }
>   
> -static struct intel_context *
> -__ring_context_pin(struct intel_engine_cs *engine,
> -		   struct i915_gem_context *ctx,
> -		   struct intel_context *ce)
> +static int ring_context_pin(struct intel_context *ce)
>   {
> +	struct intel_engine_cs *engine = ce->engine;
>   	int err;
>   
>   	/* One ringbuffer to rule them all */
> @@ -1523,55 +1521,29 @@ __ring_context_pin(struct intel_engine_cs *engine,
>   		struct i915_vma *vma;
>   
>   		vma = alloc_context_vma(engine);
> -		if (IS_ERR(vma)) {
> -			err = PTR_ERR(vma);
> -			goto err;
> -		}
> +		if (IS_ERR(vma))
> +			return PTR_ERR(vma);
>   
>   		ce->state = vma;
>   	}
>   
>   	err = __context_pin(ce);
>   	if (err)
> -		goto err;
> +		return err;
>   
>   	err = __context_pin_ppgtt(ce->gem_context);
>   	if (err)
>   		goto err_unpin;
>   
> -	mutex_lock(&ctx->mutex);
> -	list_add(&ce->active_link, &ctx->active_engines);
> -	mutex_unlock(&ctx->mutex);
> -
> -	i915_gem_context_get(ctx);
> -	return ce;
> +	return 0;
>   
>   err_unpin:
>   	__context_unpin(ce);
> -err:
> -	ce->pin_count = 0;
> -	return ERR_PTR(err);
> -}
> -
> -static struct intel_context *
> -ring_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
> -{
> -	struct intel_context *ce;
> -
> -	ce = intel_context_instance(ctx, engine);
> -	if (IS_ERR(ce))
> -		return ce;
> -
> -	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> -
> -	if (likely(ce->pin_count++))
> -		return ce;
> -	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
> -
> -	return __ring_context_pin(engine, ctx, ce);
> +	return err;
>   }
>   
>   static const struct intel_context_ops ring_context_ops = {
> +	.pin = ring_context_pin,
>   	.unpin = ring_context_unpin,
>   	.destroy = ring_context_destroy,
>   };
> @@ -2282,7 +2254,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
>   	engine->reset.finish = reset_finish;
>   
>   	engine->context = &ring_context_ops;
> -	engine->context_pin = ring_context_pin;
>   	engine->request_alloc = ring_request_alloc;
>   
>   	/*
> diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
> index f1a80006289d..5a5c3ba22061 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_engine.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
> @@ -137,41 +137,20 @@ static void mock_context_destroy(struct intel_context *ce)
>   		mock_ring_free(ce->ring);
>   }
>   
> -static struct intel_context *
> -mock_context_pin(struct intel_engine_cs *engine,
> -		 struct i915_gem_context *ctx)
> +static int mock_context_pin(struct intel_context *ce)
>   {
> -	struct intel_context *ce;
> -	int err = -ENOMEM;
> -
> -	ce = intel_context_instance(ctx, engine);
> -	if (IS_ERR(ce))
> -		return ce;
> -
> -	if (ce->pin_count++)
> -		return ce;
> -
>   	if (!ce->ring) {
> -		ce->ring = mock_ring(engine);
> +		ce->ring = mock_ring(ce->engine);
>   		if (!ce->ring)
> -			goto err;
> +			return -ENOMEM;
>   	}
>   
>   	mock_timeline_pin(ce->ring->timeline);
> -
> -	mutex_lock(&ctx->mutex);
> -	list_add(&ce->active_link, &ctx->active_engines);
> -	mutex_unlock(&ctx->mutex);
> -
> -	i915_gem_context_get(ctx);
> -	return ce;
> -
> -err:
> -	ce->pin_count = 0;
> -	return ERR_PTR(err);
> +	return 0;
>   }
>   
>   static const struct intel_context_ops mock_context_ops = {
> +	.pin = mock_context_pin,
>   	.unpin = mock_context_unpin,
>   	.destroy = mock_context_destroy,
>   };
> @@ -235,7 +214,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
>   	engine->base.status_page.addr = (void *)(engine + 1);
>   
>   	engine->base.context = &mock_context_ops;
> -	engine->base.context_pin = mock_context_pin;
>   	engine->base.request_alloc = mock_request_alloc;
>   	engine->base.emit_flush = mock_emit_flush;
>   	engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
Chris Wilson March 5, 2019, 6 p.m. UTC | #2
Quoting Tvrtko Ursulin (2019-03-05 17:31:23)
> 
> 
> On 01/03/2019 14:03, Chris Wilson wrote:
> > Push the intel_context pin callback down from intel_engine_cs onto the
> > context itself by virtue of having a central caller for
> > intel_context_pin() being able to lookup the intel_context itself.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> > -static struct intel_context *
> > -execlists_context_pin(struct intel_engine_cs *engine,
> > -                   struct i915_gem_context *ctx)
> > +static int execlists_context_pin(struct intel_context *ce)
> >   {
> > -     struct intel_context *ce;
> > -
> > -     ce = intel_context_instance(ctx, engine);
> > -     if (IS_ERR(ce))
> > -             return ce;
> > -
> > -     lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> > -     GEM_BUG_ON(!ctx->ppgtt);
> > -
> > -     if (likely(ce->pin_count++))
> > -             return ce;
> > -     GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
> > -
> > -     return __execlists_context_pin(engine, ctx, ce);
> > +     return __execlists_context_pin(ce, ce->engine);
> >   }
> 
> Do you need this trivial wrapper for later?

Yes, it's being kept because virtual engine wants to feed a different
engine.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
index 242b1b6ad253..7de02be0b552 100644
--- a/drivers/gpu/drm/i915/intel_context.c
+++ b/drivers/gpu/drm/i915/intel_context.c
@@ -110,6 +110,40 @@  intel_context_instance(struct i915_gem_context *ctx,
 	return pos;
 }
 
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx,
+		  struct intel_engine_cs *engine)
+{
+	struct intel_context *ce;
+	int err;
+
+	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+
+	ce = intel_context_instance(ctx, engine);
+	if (IS_ERR(ce))
+		return ce;
+
+	if (unlikely(!ce->pin_count++)) {
+		err = ce->ops->pin(ce);
+		if (err)
+			goto err_unpin;
+
+		mutex_lock(&ctx->mutex);
+		list_add(&ce->active_link, &ctx->active_engines);
+		mutex_unlock(&ctx->mutex);
+
+		i915_gem_context_get(ctx);
+		GEM_BUG_ON(ce->gem_context != ctx);
+	}
+	GEM_BUG_ON(!ce->pin_count); /* no overflow! */
+
+	return ce;
+
+err_unpin:
+	ce->pin_count = 0;
+	return ERR_PTR(err);
+}
+
 static void intel_context_retire(struct i915_active_request *active,
 				 struct i915_request *rq)
 {
diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
index c3fffd9b8ae4..aa34311a472e 100644
--- a/drivers/gpu/drm/i915/intel_context.h
+++ b/drivers/gpu/drm/i915/intel_context.h
@@ -45,11 +45,8 @@  __intel_context_insert(struct i915_gem_context *ctx,
 void
 __intel_context_remove(struct intel_context *ce);
 
-static inline struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
-{
-	return engine->context_pin(engine, ctx);
-}
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
 
 static inline void __intel_context_pin(struct intel_context *ce)
 {
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
index 857f5c335324..804fa93de853 100644
--- a/drivers/gpu/drm/i915/intel_context_types.h
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -19,6 +19,7 @@  struct intel_context;
 struct intel_ring;
 
 struct intel_context_ops {
+	int (*pin)(struct intel_context *ce);
 	void (*unpin)(struct intel_context *ce);
 	void (*destroy)(struct intel_context *ce);
 };
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
index 5dac6b439f95..aefc66605729 100644
--- a/drivers/gpu/drm/i915/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/intel_engine_types.h
@@ -357,8 +357,6 @@  struct intel_engine_cs {
 	void		(*set_default_submission)(struct intel_engine_cs *engine);
 
 	const struct intel_context_ops *context;
-	struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
-					     struct i915_gem_context *ctx);
 
 	int		(*request_alloc)(struct i915_request *rq);
 	int		(*init_context)(struct i915_request *rq);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0800f8edffeb..8dddea8e1c97 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -166,9 +166,8 @@ 
 
 #define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
 
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-					    struct intel_engine_cs *engine,
-					    struct intel_context *ce);
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+					    struct intel_engine_cs *engine);
 static void execlists_init_reg_state(u32 *reg_state,
 				     struct intel_context *ce,
 				     struct intel_engine_cs *engine,
@@ -330,11 +329,10 @@  assert_priority_queue(const struct i915_request *prev,
  * engine info, SW context ID and SW counter need to form a unique number
  * (Context ID) per lrc.
  */
-static void
-intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
-				   struct intel_engine_cs *engine,
-				   struct intel_context *ce)
+static u64
+lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
 {
+	struct i915_gem_context *ctx = ce->gem_context;
 	u64 desc;
 
 	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -352,7 +350,7 @@  intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 	 * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
 	 * anything below.
 	 */
-	if (INTEL_GEN(ctx->i915) >= 11) {
+	if (INTEL_GEN(engine->i915) >= 11) {
 		GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
 		desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
 								/* bits 37-47 */
@@ -369,7 +367,7 @@  intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 		desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;	/* bits 32-52 */
 	}
 
-	ce->lrc_desc = desc;
+	return desc;
 }
 
 static void unwind_wa_tail(struct i915_request *rq)
@@ -1284,7 +1282,7 @@  static void execlists_context_unpin(struct intel_context *ce)
 	i915_gem_context_put(ce->gem_context);
 }
 
-static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
+static int __context_pin(struct i915_vma *vma)
 {
 	unsigned int flags;
 	int err;
@@ -1307,11 +1305,14 @@  static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
 }
 
 static void
-__execlists_update_reg_state(struct intel_engine_cs *engine,
-			     struct intel_context *ce)
+__execlists_update_reg_state(struct intel_context *ce,
+			     struct intel_engine_cs *engine)
 {
-	u32 *regs = ce->lrc_reg_state;
 	struct intel_ring *ring = ce->ring;
+	u32 *regs = ce->lrc_reg_state;
+
+	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+	GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
 
 	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
 	regs[CTX_RING_HEAD + 1] = ring->head;
@@ -1324,25 +1325,26 @@  __execlists_update_reg_state(struct intel_engine_cs *engine,
 	}
 }
 
-static struct intel_context *
-__execlists_context_pin(struct intel_engine_cs *engine,
-			struct i915_gem_context *ctx,
-			struct intel_context *ce)
+static int
+__execlists_context_pin(struct intel_context *ce,
+			struct intel_engine_cs *engine)
 {
 	void *vaddr;
 	int ret;
 
-	ret = execlists_context_deferred_alloc(ctx, engine, ce);
+	GEM_BUG_ON(!ce->gem_context->ppgtt);
+
+	ret = execlists_context_deferred_alloc(ce, engine);
 	if (ret)
 		goto err;
 	GEM_BUG_ON(!ce->state);
 
-	ret = __context_pin(ctx, ce->state);
+	ret = __context_pin(ce->state);
 	if (ret)
 		goto err;
 
 	vaddr = i915_gem_object_pin_map(ce->state->obj,
-					i915_coherent_map_type(ctx->i915) |
+					i915_coherent_map_type(engine->i915) |
 					I915_MAP_OVERRIDE);
 	if (IS_ERR(vaddr)) {
 		ret = PTR_ERR(vaddr);
@@ -1353,26 +1355,16 @@  __execlists_context_pin(struct intel_engine_cs *engine,
 	if (ret)
 		goto unpin_map;
 
-	ret = i915_gem_context_pin_hw_id(ctx);
+	ret = i915_gem_context_pin_hw_id(ce->gem_context);
 	if (ret)
 		goto unpin_ring;
 
-	intel_lr_context_descriptor_update(ctx, engine, ce);
-
-	GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
-
+	ce->lrc_desc = lrc_descriptor(ce, engine);
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
-	__execlists_update_reg_state(engine, ce);
+	__execlists_update_reg_state(ce, engine);
 
 	ce->state->obj->pin_global++;
-
-	mutex_lock(&ctx->mutex);
-	list_add(&ce->active_link, &ctx->active_engines);
-	mutex_unlock(&ctx->mutex);
-
-	i915_gem_context_get(ctx);
-	return ce;
+	return 0;
 
 unpin_ring:
 	intel_ring_unpin(ce->ring);
@@ -1381,31 +1373,16 @@  __execlists_context_pin(struct intel_engine_cs *engine,
 unpin_vma:
 	__i915_vma_unpin(ce->state);
 err:
-	ce->pin_count = 0;
-	return ERR_PTR(ret);
+	return ret;
 }
 
-static struct intel_context *
-execlists_context_pin(struct intel_engine_cs *engine,
-		      struct i915_gem_context *ctx)
+static int execlists_context_pin(struct intel_context *ce)
 {
-	struct intel_context *ce;
-
-	ce = intel_context_instance(ctx, engine);
-	if (IS_ERR(ce))
-		return ce;
-
-	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-	GEM_BUG_ON(!ctx->ppgtt);
-
-	if (likely(ce->pin_count++))
-		return ce;
-	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
-	return __execlists_context_pin(engine, ctx, ce);
+	return __execlists_context_pin(ce, ce->engine);
 }
 
 static const struct intel_context_ops execlists_context_ops = {
+	.pin = execlists_context_pin,
 	.unpin = execlists_context_unpin,
 	.destroy = execlists_context_destroy,
 };
@@ -2029,7 +2006,7 @@  static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	intel_ring_update_space(rq->ring);
 
 	execlists_init_reg_state(regs, rq->hw_context, engine, rq->ring);
-	__execlists_update_reg_state(engine, rq->hw_context);
+	__execlists_update_reg_state(rq->hw_context, engine);
 
 out_unlock:
 	spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -2354,7 +2331,6 @@  logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	engine->reset.finish = execlists_reset_finish;
 
 	engine->context = &execlists_context_ops;
-	engine->context_pin = execlists_context_pin;
 	engine->request_alloc = execlists_request_alloc;
 
 	engine->emit_flush = gen8_emit_flush;
@@ -2831,9 +2807,16 @@  populate_lr_context(struct intel_context *ce,
 	return ret;
 }
 
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-					    struct intel_engine_cs *engine,
-					    struct intel_context *ce)
+static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
+{
+	if (ctx->timeline)
+		return i915_timeline_get(ctx->timeline);
+	else
+		return i915_timeline_create(ctx->i915, ctx->name, NULL);
+}
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+					    struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_object *ctx_obj;
 	struct i915_vma *vma;
@@ -2853,26 +2836,25 @@  static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	 */
 	context_size += LRC_HEADER_PAGES * PAGE_SIZE;
 
-	ctx_obj = i915_gem_object_create(ctx->i915, context_size);
+	ctx_obj = i915_gem_object_create(engine->i915, context_size);
 	if (IS_ERR(ctx_obj))
 		return PTR_ERR(ctx_obj);
 
-	vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
+	vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
 		goto error_deref_obj;
 	}
 
-	if (ctx->timeline)
-		timeline = i915_timeline_get(ctx->timeline);
-	else
-		timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
+	timeline = get_timeline(ce->gem_context);
 	if (IS_ERR(timeline)) {
 		ret = PTR_ERR(timeline);
 		goto error_deref_obj;
 	}
 
-	ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
+	ring = intel_engine_create_ring(engine,
+					timeline,
+					ce->gem_context->ring_size);
 	i915_timeline_put(timeline);
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
@@ -2917,7 +2899,7 @@  void intel_lr_context_resume(struct drm_i915_private *i915)
 		list_for_each_entry(ce, &ctx->active_engines, active_link) {
 			GEM_BUG_ON(!ce->ring);
 			intel_ring_reset(ce->ring, 0);
-			__execlists_update_reg_state(ce->engine, ce);
+			__execlists_update_reg_state(ce, ce->engine);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9002f7f6d32e..6af2d393b7ff 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1508,11 +1508,9 @@  alloc_context_vma(struct intel_engine_cs *engine)
 	return ERR_PTR(err);
 }
 
-static struct intel_context *
-__ring_context_pin(struct intel_engine_cs *engine,
-		   struct i915_gem_context *ctx,
-		   struct intel_context *ce)
+static int ring_context_pin(struct intel_context *ce)
 {
+	struct intel_engine_cs *engine = ce->engine;
 	int err;
 
 	/* One ringbuffer to rule them all */
@@ -1523,55 +1521,29 @@  __ring_context_pin(struct intel_engine_cs *engine,
 		struct i915_vma *vma;
 
 		vma = alloc_context_vma(engine);
-		if (IS_ERR(vma)) {
-			err = PTR_ERR(vma);
-			goto err;
-		}
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
 
 		ce->state = vma;
 	}
 
 	err = __context_pin(ce);
 	if (err)
-		goto err;
+		return err;
 
 	err = __context_pin_ppgtt(ce->gem_context);
 	if (err)
 		goto err_unpin;
 
-	mutex_lock(&ctx->mutex);
-	list_add(&ce->active_link, &ctx->active_engines);
-	mutex_unlock(&ctx->mutex);
-
-	i915_gem_context_get(ctx);
-	return ce;
+	return 0;
 
 err_unpin:
 	__context_unpin(ce);
-err:
-	ce->pin_count = 0;
-	return ERR_PTR(err);
-}
-
-static struct intel_context *
-ring_context_pin(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
-{
-	struct intel_context *ce;
-
-	ce = intel_context_instance(ctx, engine);
-	if (IS_ERR(ce))
-		return ce;
-
-	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
-	if (likely(ce->pin_count++))
-		return ce;
-	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
-	return __ring_context_pin(engine, ctx, ce);
+	return err;
 }
 
 static const struct intel_context_ops ring_context_ops = {
+	.pin = ring_context_pin,
 	.unpin = ring_context_unpin,
 	.destroy = ring_context_destroy,
 };
@@ -2282,7 +2254,6 @@  static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 	engine->reset.finish = reset_finish;
 
 	engine->context = &ring_context_ops;
-	engine->context_pin = ring_context_pin;
 	engine->request_alloc = ring_request_alloc;
 
 	/*
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index f1a80006289d..5a5c3ba22061 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -137,41 +137,20 @@  static void mock_context_destroy(struct intel_context *ce)
 		mock_ring_free(ce->ring);
 }
 
-static struct intel_context *
-mock_context_pin(struct intel_engine_cs *engine,
-		 struct i915_gem_context *ctx)
+static int mock_context_pin(struct intel_context *ce)
 {
-	struct intel_context *ce;
-	int err = -ENOMEM;
-
-	ce = intel_context_instance(ctx, engine);
-	if (IS_ERR(ce))
-		return ce;
-
-	if (ce->pin_count++)
-		return ce;
-
 	if (!ce->ring) {
-		ce->ring = mock_ring(engine);
+		ce->ring = mock_ring(ce->engine);
 		if (!ce->ring)
-			goto err;
+			return -ENOMEM;
 	}
 
 	mock_timeline_pin(ce->ring->timeline);
-
-	mutex_lock(&ctx->mutex);
-	list_add(&ce->active_link, &ctx->active_engines);
-	mutex_unlock(&ctx->mutex);
-
-	i915_gem_context_get(ctx);
-	return ce;
-
-err:
-	ce->pin_count = 0;
-	return ERR_PTR(err);
+	return 0;
 }
 
 static const struct intel_context_ops mock_context_ops = {
+	.pin = mock_context_pin,
 	.unpin = mock_context_unpin,
 	.destroy = mock_context_destroy,
 };
@@ -235,7 +214,6 @@  struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.status_page.addr = (void *)(engine + 1);
 
 	engine->base.context = &mock_context_ops;
-	engine->base.context_pin = mock_context_pin;
 	engine->base.request_alloc = mock_request_alloc;
 	engine->base.emit_flush = mock_emit_flush;
 	engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;