diff mbox

[02/12] drm/i915: Use the new rq->i915 field where appropriate

Message ID 1448023432-10726-2-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Nov. 20, 2015, 12:43 p.m. UTC
In a few frequent cases, having a direct pointer to the drm_i915_private
from the request is very useful.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c            | 22 +++++++-----------
 drivers/gpu/drm/i915/i915_gem_context.c    | 23 +++++++++----------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  3 +--
 drivers/gpu/drm/i915/intel_lrc.c           |  8 +++----
 drivers/gpu/drm/i915/intel_pm.c            |  3 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 36 +++++++++++++-----------------
 6 files changed, 39 insertions(+), 56 deletions(-)

Comments

Daniel Vetter Nov. 24, 2015, 1:57 p.m. UTC | #1
On Fri, Nov 20, 2015 at 12:43:42PM +0000, Chris Wilson wrote:
> In a few frequent cases, having a direct pointer to the drm_i915_private
> from the request is very useful.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

req->i915 is a bit inconsistent imo, i915_dev would be better. Anyway, not
something you started, just makes the code a bit harder to speed-read ;-)

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

> ---
>  drivers/gpu/drm/i915/i915_gem.c            | 22 +++++++-----------
>  drivers/gpu/drm/i915/i915_gem_context.c    | 23 +++++++++----------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |  3 +--
>  drivers/gpu/drm/i915/intel_lrc.c           |  8 +++----
>  drivers/gpu/drm/i915/intel_pm.c            |  3 +--
>  drivers/gpu/drm/i915/intel_ringbuffer.c    | 36 +++++++++++++-----------------
>  6 files changed, 39 insertions(+), 56 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index e6ac049a4698..5c3d11d020f0 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1221,8 +1221,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
>  			struct intel_rps_client *rps)
>  {
>  	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
> -	struct drm_device *dev = ring->dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = req->i915;
>  	const bool irq_test_in_progress =
>  		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
>  	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
> @@ -1336,7 +1335,6 @@ out:
>  int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
>  				   struct drm_file *file)
>  {
> -	struct drm_i915_private *dev_private;
>  	struct drm_i915_file_private *file_priv;
>  
>  	WARN_ON(!req || !file || req->file_priv);
> @@ -1347,7 +1345,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
>  	if (req->file_priv)
>  		return -EINVAL;
>  
> -	dev_private = req->ring->dev->dev_private;
>  	file_priv = file->driver_priv;
>  
>  	spin_lock(&file_priv->mm.lock);
> @@ -1425,18 +1422,16 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
>  int
>  i915_wait_request(struct drm_i915_gem_request *req)
>  {
> -	struct drm_device *dev;
>  	struct drm_i915_private *dev_priv;
>  	bool interruptible;
>  	int ret;
>  
>  	BUG_ON(req == NULL);
>  
> -	dev = req->ring->dev;
> -	dev_priv = dev->dev_private;
> +	dev_priv = req->i915;
>  	interruptible = dev_priv->mm.interruptible;
>  
> -	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
> +	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
>  
>  	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
>  	if (ret)
> @@ -2568,7 +2563,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  		return;
>  
>  	ring = request->ring;
> -	dev_priv = ring->dev->dev_private;
> +	dev_priv = request->i915;
>  	ringbuf = request->ringbuf;
>  
>  	/*
> @@ -3150,8 +3145,8 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  	if (i915_gem_request_completed(from_req, true))
>  		return 0;
>  
> -	if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +	if (!i915_semaphore_is_enabled(from_req->i915)) {
> +		struct drm_i915_private *i915 = from_req->i915;
>  		ret = __i915_wait_request(from_req,
>  					  atomic_read(&i915->gpu_error.reset_counter),
>  					  i915->mm.interruptible,
> @@ -4648,13 +4643,12 @@ err:
>  int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
>  {
>  	struct intel_engine_cs *ring = req->ring;
> -	struct drm_device *dev = ring->dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = req->i915;
>  	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
>  	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
>  	int i, ret;
>  
> -	if (!HAS_L3_DPF(dev) || !remap_info)
> +	if (!HAS_L3_DPF(dev_priv) || !remap_info)
>  		return 0;
>  
>  	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 55d2985c1dbb..82a9f7197d32 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -483,8 +483,8 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  	u32 flags = hw_flags | MI_MM_SPACE_GTT;
>  	const int num_rings =
>  		/* Use an extended w/a on ivb+ if signalling from other rings */
> -		i915_semaphore_is_enabled(to_i915(ring->dev)) ?
> -		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
> +		i915_semaphore_is_enabled(req->i915) ?
> +		hweight32(INTEL_INFO(req->i915)->ring_mask) - 1 :
>  		0;
>  	int len, i, ret;
>  
> @@ -493,21 +493,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  	 * explicitly, so we rely on the value at ring init, stored in
>  	 * itlb_before_ctx_switch.
>  	 */
> -	if (IS_GEN6(ring->dev)) {
> +	if (IS_GEN6(req->i915)) {
>  		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
>  		if (ret)
>  			return ret;
>  	}
>  
>  	/* These flags are for resource streamer on HSW+ */
> -	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
> +	if (IS_HASWELL(req->i915) || INTEL_INFO(req->i915)->gen >= 8)
>  		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
> -	else if (INTEL_INFO(ring->dev)->gen < 8)
> +	else if (INTEL_INFO(req->i915)->gen < 8)
>  		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
>  
>  
>  	len = 4;
> -	if (INTEL_INFO(ring->dev)->gen >= 7)
> +	if (INTEL_INFO(req->i915)->gen >= 7)
>  		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
>  
>  	ret = intel_ring_begin(req, len);
> @@ -515,13 +515,13 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  		return ret;
>  
>  	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
> -	if (INTEL_INFO(ring->dev)->gen >= 7) {
> +	if (INTEL_INFO(req->i915)->gen >= 7) {
>  		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
>  		if (num_rings) {
>  			struct intel_engine_cs *signaller;
>  
>  			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
> -			for_each_ring(signaller, to_i915(ring->dev), i) {
> +			for_each_ring(signaller, req->i915, i) {
>  				if (signaller == ring)
>  					continue;
>  
> @@ -541,12 +541,12 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  	 */
>  	intel_ring_emit(ring, MI_NOOP);
>  
> -	if (INTEL_INFO(ring->dev)->gen >= 7) {
> +	if (INTEL_INFO(req->i915)->gen >= 7) {
>  		if (num_rings) {
>  			struct intel_engine_cs *signaller;
>  
>  			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
> -			for_each_ring(signaller, to_i915(ring->dev), i) {
> +			for_each_ring(signaller, req->i915, i) {
>  				if (signaller == ring)
>  					continue;
>  
> @@ -785,10 +785,9 @@ unpin_out:
>  int i915_switch_context(struct drm_i915_gem_request *req)
>  {
>  	struct intel_engine_cs *ring = req->ring;
> -	struct drm_i915_private *dev_priv = ring->dev->dev_private;
>  
>  	WARN_ON(i915.enable_execlists);
> -	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
> +	WARN_ON(!mutex_is_locked(&req->i915->dev->struct_mutex));
>  
>  	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
>  		if (req->ctx != ring->last_context) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 7ac4685e0d3e..e8164b644e0e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1066,7 +1066,6 @@ void
>  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
>  				   struct drm_i915_gem_request *req)
>  {
> -	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
>  	struct i915_vma *vma;
>  
>  	list_for_each_entry(vma, vmas, exec_list) {
> @@ -1093,7 +1092,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
>  		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
>  			i915_gem_request_assign(&obj->last_fenced_req, req);
>  			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
> -				struct drm_i915_private *dev_priv = to_i915(ring->dev);
> +				struct drm_i915_private *dev_priv = req->i915;
>  				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
>  					       &dev_priv->mm.fence_list);
>  			}
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 10020505be75..cfa77a59b352 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -307,8 +307,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
>  {
>  
>  	struct intel_engine_cs *ring = rq0->ring;
> -	struct drm_device *dev = ring->dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = rq0->i915;
>  	uint64_t desc[2];
>  
>  	if (rq1) {
> @@ -787,7 +786,7 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
>  	int ret;
>  
>  	WARN_ON(req == NULL);
> -	dev_priv = req->ring->dev->dev_private;
> +	dev_priv = req->i915;
>  
>  	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
>  				   dev_priv->mm.interruptible);
> @@ -1027,8 +1026,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  	int ret, i;
>  	struct intel_engine_cs *ring = req->ring;
>  	struct intel_ringbuffer *ringbuf = req->ringbuf;
> -	struct drm_device *dev = ring->dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = req->i915;
>  	struct i915_workarounds *w = &dev_priv->workarounds;
>  
>  	if (WARN_ON_ONCE(w->count == 0))
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index aef9eb1c6c33..3d54f76ad176 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -7410,8 +7410,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
>  	struct drm_i915_gem_request *req = boost->req;
>  
>  	if (!i915_gem_request_completed(req, true))
> -		intel_rps_boost(to_i915(req->ring->dev), NULL,
> -			       	req->emitted_jiffies);
> +		intel_rps_boost(req->i915, NULL, req->emitted_jiffies);
>  
>  	i915_gem_request_unreference__unlocked(req);
>  	kfree(boost);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 8d8ba717a022..9fbe730aae47 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -123,7 +123,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32	flush_domains)
>  {
>  	struct intel_engine_cs *ring = req->ring;
> -	struct drm_device *dev = ring->dev;
>  	u32 cmd;
>  	int ret;
>  
> @@ -162,7 +161,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>  		cmd |= MI_EXE_FLUSH;
>  
>  	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
> -	    (IS_G4X(dev) || IS_GEN5(dev)))
> +	    (IS_G4X(req->i915) || IS_GEN5(req->i915)))
>  		cmd |= MI_INVALIDATE_ISP;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -715,8 +714,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  {
>  	int ret, i;
>  	struct intel_engine_cs *ring = req->ring;
> -	struct drm_device *dev = ring->dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = req->i915;
>  	struct i915_workarounds *w = &dev_priv->workarounds;
>  
>  	if (WARN_ON_ONCE(w->count == 0))
> @@ -1187,12 +1185,11 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
>  {
>  #define MBOX_UPDATE_DWORDS 8
>  	struct intel_engine_cs *signaller = signaller_req->ring;
> -	struct drm_device *dev = signaller->dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
>  	int i, ret, num_rings;
>  
> -	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
> +	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
>  	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
>  #undef MBOX_UPDATE_DWORDS
>  
> @@ -1228,12 +1225,11 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  {
>  #define MBOX_UPDATE_DWORDS 6
>  	struct intel_engine_cs *signaller = signaller_req->ring;
> -	struct drm_device *dev = signaller->dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
>  	int i, ret, num_rings;
>  
> -	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
> +	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
>  	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
>  #undef MBOX_UPDATE_DWORDS
>  
> @@ -1266,13 +1262,12 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  		       unsigned int num_dwords)
>  {
>  	struct intel_engine_cs *signaller = signaller_req->ring;
> -	struct drm_device *dev = signaller->dev;
> -	struct drm_i915_private *dev_priv = dev->dev_private;
> +	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *useless;
>  	int i, ret, num_rings;
>  
>  #define MBOX_UPDATE_DWORDS 3
> -	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
> +	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
>  	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
>  #undef MBOX_UPDATE_DWORDS
>  
> @@ -1349,7 +1344,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
>  	       u32 seqno)
>  {
>  	struct intel_engine_cs *waiter = waiter_req->ring;
> -	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
> +	struct drm_i915_private *dev_priv = waiter_req->i915;
>  	int ret;
>  
>  	ret = intel_ring_begin(waiter_req, 4);
> @@ -2201,8 +2196,8 @@ int intel_ring_idle(struct intel_engine_cs *ring)
>  
>  	/* Make sure we do not trigger any retires */
>  	return __i915_wait_request(req,
> -				   atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
> -				   to_i915(ring->dev)->mm.interruptible,
> +				   atomic_read(&req->i915->gpu_error.reset_counter),
> +				   req->i915->mm.interruptible,
>  				   NULL, NULL);
>  }
>  
> @@ -2330,7 +2325,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req,
>  
>  	WARN_ON(req == NULL);
>  	ring = req->ring;
> -	dev_priv = ring->dev->dev_private;
> +	dev_priv = req->i915;
>  
>  	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
>  				   dev_priv->mm.interruptible);
> @@ -2467,7 +2462,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			      unsigned dispatch_flags)
>  {
>  	struct intel_engine_cs *ring = req->ring;
> -	bool ppgtt = USES_PPGTT(ring->dev) &&
> +	bool ppgtt = USES_PPGTT(req->i915) &&
>  			!(dispatch_flags & I915_DISPATCH_SECURE);
>  	int ret;
>  
> @@ -2541,7 +2536,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
>  			   u32 invalidate, u32 flush)
>  {
>  	struct intel_engine_cs *ring = req->ring;
> -	struct drm_device *dev = ring->dev;
>  	uint32_t cmd;
>  	int ret;
>  
> @@ -2550,7 +2544,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
>  		return ret;
>  
>  	cmd = MI_FLUSH_DW;
> -	if (INTEL_INFO(dev)->gen >= 8)
> +	if (INTEL_INFO(req->i915)->gen >= 8)
>  		cmd += 1;
>  
>  	/* We always require a command barrier so that subsequent
> @@ -2570,7 +2564,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
>  		cmd |= MI_INVALIDATE_TLB;
>  	intel_ring_emit(ring, cmd);
>  	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
> -	if (INTEL_INFO(dev)->gen >= 8) {
> +	if (INTEL_INFO(req->i915)->gen >= 8) {
>  		intel_ring_emit(ring, 0); /* upper addr */
>  		intel_ring_emit(ring, 0); /* value */
>  	} else  {
> -- 
> 2.6.2
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Chris Wilson Nov. 24, 2015, 2:23 p.m. UTC | #2
On Tue, Nov 24, 2015 at 02:57:04PM +0100, Daniel Vetter wrote:
> On Fri, Nov 20, 2015 at 12:43:42PM +0000, Chris Wilson wrote:
> > In a few frequent cases, having a direct pointer to the drm_i915_private
> > from the request is very useful.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> 
> req->i915 is a bit inconsistent imo, i915_dev would be better. Anyway, not
> something you started, just makes the code a bit harder to speed-read ;-)

How does it strike you as inconsistent? rq->i915 was nicer :-p

The idea I have been trying to sell is i915->drm->dev for the struct
drm_i915_private / struct drm_device / struct device triplet.
-Chris
Daniel Vetter Nov. 24, 2015, 2:41 p.m. UTC | #3
On Tue, Nov 24, 2015 at 02:23:03PM +0000, Chris Wilson wrote:
> On Tue, Nov 24, 2015 at 02:57:04PM +0100, Daniel Vetter wrote:
> > On Fri, Nov 20, 2015 at 12:43:42PM +0000, Chris Wilson wrote:
> > > In a few frequent cases, having a direct pointer to the drm_i915_private
> > > from the request is very useful.
> > > 
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > 
> > req->i915 is a bit inconsistent imo, i915_dev would be better. Anyway, not
> > something you started, just makes the code a bit harder to speed-read ;-)
> 
> How does it strike you as inconsistent? rq->i915 was nicer :-p
> 
> The idea I have been trying to sell is i915->drm->dev for the struct
> drm_i915_private / struct drm_device / struct device triplet.

Hm, we should do an overview section in i915 docs about best practices in
variable naming. I'm serious about this, since we have similar fun with
modeset stuff:

- Preferred: crtc for struct intel_crtc, drm_crtc for struct drm_crtc.
- Transitinoal: crtc for struct drm_crtc, intel_crtc for struct
  intel_crtc. But if you have only one, then don't use that.

Maybe we should mention intel_ vs. i915_ vs. platform prefixes too?
-Daniel
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e6ac049a4698..5c3d11d020f0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1221,8 +1221,7 @@  int __i915_wait_request(struct drm_i915_gem_request *req,
 			struct intel_rps_client *rps)
 {
 	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = req->i915;
 	const bool irq_test_in_progress =
 		ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -1336,7 +1335,6 @@  out:
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
 				   struct drm_file *file)
 {
-	struct drm_i915_private *dev_private;
 	struct drm_i915_file_private *file_priv;
 
 	WARN_ON(!req || !file || req->file_priv);
@@ -1347,7 +1345,6 @@  int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
 	if (req->file_priv)
 		return -EINVAL;
 
-	dev_private = req->ring->dev->dev_private;
 	file_priv = file->driver_priv;
 
 	spin_lock(&file_priv->mm.lock);
@@ -1425,18 +1422,16 @@  __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
 int
 i915_wait_request(struct drm_i915_gem_request *req)
 {
-	struct drm_device *dev;
 	struct drm_i915_private *dev_priv;
 	bool interruptible;
 	int ret;
 
 	BUG_ON(req == NULL);
 
-	dev = req->ring->dev;
-	dev_priv = dev->dev_private;
+	dev_priv = req->i915;
 	interruptible = dev_priv->mm.interruptible;
 
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
 	if (ret)
@@ -2568,7 +2563,7 @@  void __i915_add_request(struct drm_i915_gem_request *request,
 		return;
 
 	ring = request->ring;
-	dev_priv = ring->dev->dev_private;
+	dev_priv = request->i915;
 	ringbuf = request->ringbuf;
 
 	/*
@@ -3150,8 +3145,8 @@  __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (i915_gem_request_completed(from_req, true))
 		return 0;
 
-	if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	if (!i915_semaphore_is_enabled(from_req->i915)) {
+		struct drm_i915_private *i915 = from_req->i915;
 		ret = __i915_wait_request(from_req,
 					  atomic_read(&i915->gpu_error.reset_counter),
 					  i915->mm.interruptible,
@@ -4648,13 +4643,12 @@  err:
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
 {
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = req->i915;
 	u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
 	u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
 	int i, ret;
 
-	if (!HAS_L3_DPF(dev) || !remap_info)
+	if (!HAS_L3_DPF(dev_priv) || !remap_info)
 		return 0;
 
 	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 55d2985c1dbb..82a9f7197d32 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -483,8 +483,8 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
-		i915_semaphore_is_enabled(to_i915(ring->dev)) ?
-		hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+		i915_semaphore_is_enabled(req->i915) ?
+		hweight32(INTEL_INFO(req->i915)->ring_mask) - 1 :
 		0;
 	int len, i, ret;
 
@@ -493,21 +493,21 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * explicitly, so we rely on the value at ring init, stored in
 	 * itlb_before_ctx_switch.
 	 */
-	if (IS_GEN6(ring->dev)) {
+	if (IS_GEN6(req->i915)) {
 		ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
 
 	/* These flags are for resource streamer on HSW+ */
-	if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8)
+	if (IS_HASWELL(req->i915) || INTEL_INFO(req->i915)->gen >= 8)
 		flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
-	else if (INTEL_INFO(ring->dev)->gen < 8)
+	else if (INTEL_INFO(req->i915)->gen < 8)
 		flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
 
 	len = 4;
-	if (INTEL_INFO(ring->dev)->gen >= 7)
+	if (INTEL_INFO(req->i915)->gen >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 2 : 0);
 
 	ret = intel_ring_begin(req, len);
@@ -515,13 +515,13 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 		return ret;
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-	if (INTEL_INFO(ring->dev)->gen >= 7) {
+	if (INTEL_INFO(req->i915)->gen >= 7) {
 		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_ring(signaller, to_i915(ring->dev), i) {
+			for_each_ring(signaller, req->i915, i) {
 				if (signaller == ring)
 					continue;
 
@@ -541,12 +541,12 @@  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 */
 	intel_ring_emit(ring, MI_NOOP);
 
-	if (INTEL_INFO(ring->dev)->gen >= 7) {
+	if (INTEL_INFO(req->i915)->gen >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
 			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
-			for_each_ring(signaller, to_i915(ring->dev), i) {
+			for_each_ring(signaller, req->i915, i) {
 				if (signaller == ring)
 					continue;
 
@@ -785,10 +785,9 @@  unpin_out:
 int i915_switch_context(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
 	WARN_ON(i915.enable_execlists);
-	WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&req->i915->dev->struct_mutex));
 
 	if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
 		if (req->ctx != ring->last_context) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7ac4685e0d3e..e8164b644e0e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1066,7 +1066,6 @@  void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
@@ -1093,7 +1092,7 @@  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
 			i915_gem_request_assign(&obj->last_fenced_req, req);
 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				struct drm_i915_private *dev_priv = to_i915(ring->dev);
+				struct drm_i915_private *dev_priv = req->i915;
 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
 					       &dev_priv->mm.fence_list);
 			}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 10020505be75..cfa77a59b352 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -307,8 +307,7 @@  static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
 {
 
 	struct intel_engine_cs *ring = rq0->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = rq0->i915;
 	uint64_t desc[2];
 
 	if (rq1) {
@@ -787,7 +786,7 @@  int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int ret;
 
 	WARN_ON(req == NULL);
-	dev_priv = req->ring->dev->dev_private;
+	dev_priv = req->i915;
 
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
@@ -1027,8 +1026,7 @@  static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	int ret, i;
 	struct intel_engine_cs *ring = req->ring;
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (WARN_ON_ONCE(w->count == 0))
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aef9eb1c6c33..3d54f76ad176 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7410,8 +7410,7 @@  static void __intel_rps_boost_work(struct work_struct *work)
 	struct drm_i915_gem_request *req = boost->req;
 
 	if (!i915_gem_request_completed(req, true))
-		intel_rps_boost(to_i915(req->ring->dev), NULL,
-			       	req->emitted_jiffies);
+		intel_rps_boost(req->i915, NULL, req->emitted_jiffies);
 
 	i915_gem_request_unreference__unlocked(req);
 	kfree(boost);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8d8ba717a022..9fbe730aae47 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -123,7 +123,6 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	flush_domains)
 {
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
 	u32 cmd;
 	int ret;
 
@@ -162,7 +161,7 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		cmd |= MI_EXE_FLUSH;
 
 	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-	    (IS_G4X(dev) || IS_GEN5(dev)))
+	    (IS_G4X(req->i915) || IS_GEN5(req->i915)))
 		cmd |= MI_INVALIDATE_ISP;
 
 	ret = intel_ring_begin(req, 2);
@@ -715,8 +714,7 @@  static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct i915_workarounds *w = &dev_priv->workarounds;
 
 	if (WARN_ON_ONCE(w->count == 0))
@@ -1187,12 +1185,11 @@  static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 8
 	struct intel_engine_cs *signaller = signaller_req->ring;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1228,12 +1225,11 @@  static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 {
 #define MBOX_UPDATE_DWORDS 6
 	struct intel_engine_cs *signaller = signaller_req->ring;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	int i, ret, num_rings;
 
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
 #undef MBOX_UPDATE_DWORDS
 
@@ -1266,13 +1262,12 @@  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
 	struct intel_engine_cs *signaller = signaller_req->ring;
-	struct drm_device *dev = signaller->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	int i, ret, num_rings;
 
 #define MBOX_UPDATE_DWORDS 3
-	num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
+	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
 	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
 #undef MBOX_UPDATE_DWORDS
 
@@ -1349,7 +1344,7 @@  gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       u32 seqno)
 {
 	struct intel_engine_cs *waiter = waiter_req->ring;
-	struct drm_i915_private *dev_priv = waiter->dev->dev_private;
+	struct drm_i915_private *dev_priv = waiter_req->i915;
 	int ret;
 
 	ret = intel_ring_begin(waiter_req, 4);
@@ -2201,8 +2196,8 @@  int intel_ring_idle(struct intel_engine_cs *ring)
 
 	/* Make sure we do not trigger any retires */
 	return __i915_wait_request(req,
-				   atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter),
-				   to_i915(ring->dev)->mm.interruptible,
+				   atomic_read(&req->i915->gpu_error.reset_counter),
+				   req->i915->mm.interruptible,
 				   NULL, NULL);
 }
 
@@ -2330,7 +2325,7 @@  int intel_ring_begin(struct drm_i915_gem_request *req,
 
 	WARN_ON(req == NULL);
 	ring = req->ring;
-	dev_priv = ring->dev->dev_private;
+	dev_priv = req->i915;
 
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error,
 				   dev_priv->mm.interruptible);
@@ -2467,7 +2462,7 @@  gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      unsigned dispatch_flags)
 {
 	struct intel_engine_cs *ring = req->ring;
-	bool ppgtt = USES_PPGTT(ring->dev) &&
+	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -2541,7 +2536,6 @@  static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
 	struct intel_engine_cs *ring = req->ring;
-	struct drm_device *dev = ring->dev;
 	uint32_t cmd;
 	int ret;
 
@@ -2550,7 +2544,7 @@  static int gen6_ring_flush(struct drm_i915_gem_request *req,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (INTEL_INFO(dev)->gen >= 8)
+	if (INTEL_INFO(req->i915)->gen >= 8)
 		cmd += 1;
 
 	/* We always require a command barrier so that subsequent
@@ -2570,7 +2564,7 @@  static int gen6_ring_flush(struct drm_i915_gem_request *req,
 		cmd |= MI_INVALIDATE_TLB;
 	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
-	if (INTEL_INFO(dev)->gen >= 8) {
+	if (INTEL_INFO(req->i915)->gen >= 8) {
 		intel_ring_emit(ring, 0); /* upper addr */
 		intel_ring_emit(ring, 0); /* value */
 	} else  {