diff mbox

[38/53] drm/i915: Update some flush helpers to take request structures

Message ID 1424366285-29232-39-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison Feb. 19, 2015, 5:17 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

Updated intel_emit_post_sync_nonzero_flush(), gen7_render_ring_cs_stall_wa(),
gen7_ring_fbc_flush() and gen8_emit_pipe_control() to take requests instead of
rings.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c |   29 ++++++++++++++++-------------
 1 file changed, 16 insertions(+), 13 deletions(-)

Comments

Tomas Elf March 9, 2015, 8:46 p.m. UTC | #1
On 19/02/2015 17:17, John.C.Harrison@Intel.com wrote:
> From: John Harrison <John.C.Harrison@Intel.com>
>
> Updated intel_emit_post_sync_nonzero_flush(), gen7_render_ring_cs_stall_wa(),
> gen7_ring_fbc_flush() and gen8_emit_pipe_control() to take requests instead of
> rings.
>
> For: VIZ-5115
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> ---
>   drivers/gpu/drm/i915/intel_ringbuffer.c |   29 ++++++++++++++++-------------
>   1 file changed, 16 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index ca8f039..470fa93 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -214,8 +214,9 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>    * really our business.  That leaves only stall at scoreboard.
>    */
>   static int
> -intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
> +intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
>   {
> +	struct intel_engine_cs *ring = req->ring;
>   	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>   	int ret;
>
> @@ -258,7 +259,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
>   	int ret;
>
>   	/* Force SNB workarounds for PIPE_CONTROL flushes */
> -	ret = intel_emit_post_sync_nonzero_flush(ring);
> +	ret = intel_emit_post_sync_nonzero_flush(req);
>   	if (ret)
>   		return ret;
>
> @@ -302,8 +303,9 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
>   }
>
>   static int
> -gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
> +gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
>   {
> +	struct intel_engine_cs *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(ring, 4);
> @@ -320,8 +322,9 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
>   	return 0;
>   }
>
> -static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
> +static int gen7_ring_fbc_flush(struct drm_i915_gem_request *req, u32 value)
>   {
> +	struct intel_engine_cs *ring = req->ring;
>   	int ret;
>
>   	if (!ring->fbc_dirty)
> @@ -389,7 +392,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
>   		/* Workaround: we must issue a pipe_control with CS-stall bit
>   		 * set before a pipe_control command that has the state cache
>   		 * invalidate bit set. */
> -		gen7_render_ring_cs_stall_wa(ring);
> +		gen7_render_ring_cs_stall_wa(req);
>   	}
>
>   	ret = intel_ring_begin(ring, 4);
> @@ -403,15 +406,16 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
>   	intel_ring_advance(ring);
>
>   	if (!invalidate_domains && flush_domains)
> -		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
> +		return gen7_ring_fbc_flush(req, FBC_REND_NUKE);
>
>   	return 0;
>   }
>
>   static int
> -gen8_emit_pipe_control(struct intel_engine_cs *ring,
> +gen8_emit_pipe_control(struct drm_i915_gem_request *req,
>   		       u32 flags, u32 scratch_addr)
>   {
> +	struct intel_engine_cs *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(ring, 6);
> @@ -433,9 +437,8 @@ static int
>   gen8_render_ring_flush(struct drm_i915_gem_request *req,
>   		       u32 invalidate_domains, u32 flush_domains)
>   {
> -	struct intel_engine_cs *ring = req->ring;
>   	u32 flags = 0;
> -	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
> +	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>   	int ret;
>
>   	flags |= PIPE_CONTROL_CS_STALL;
> @@ -455,7 +458,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
>   		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
>
>   		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
> -		ret = gen8_emit_pipe_control(ring,
> +		ret = gen8_emit_pipe_control(req,
>   					     PIPE_CONTROL_CS_STALL |
>   					     PIPE_CONTROL_STALL_AT_SCOREBOARD,
>   					     0);
> @@ -463,12 +466,12 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
>   			return ret;
>   	}
>
> -	ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
> +	ret = gen8_emit_pipe_control(req, flags, scratch_addr);
>   	if (ret)
>   		return ret;
>
>   	if (!invalidate_domains && flush_domains)
> -		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
> +		return gen7_ring_fbc_flush(req, FBC_REND_NUKE);
>
>   	return 0;
>   }
> @@ -2482,7 +2485,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
>
>   	if (!invalidate && flush) {
>   		if (IS_GEN7(dev))
> -			return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
> +			return gen7_ring_fbc_flush(req, FBC_REND_CACHE_CLEAN);
>   		else if (IS_BROADWELL(dev))
>   			dev_priv->fbc.need_sw_cache_clean = true;
>   	}
>

Reviewed-by: Tomas Elf <tomas.elf@intel.com>

Thanks,
Tomas
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ca8f039..470fa93 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -214,8 +214,9 @@  gen4_render_ring_flush(struct drm_i915_gem_request *req,
  * really our business.  That leaves only stall at scoreboard.
  */
 static int
-intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)
+intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
@@ -258,7 +259,7 @@  gen6_render_ring_flush(struct drm_i915_gem_request *req,
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
-	ret = intel_emit_post_sync_nonzero_flush(ring);
+	ret = intel_emit_post_sync_nonzero_flush(req);
 	if (ret)
 		return ret;
 
@@ -302,8 +303,9 @@  gen6_render_ring_flush(struct drm_i915_gem_request *req,
 }
 
 static int
-gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
+gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(ring, 4);
@@ -320,8 +322,9 @@  gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
 	return 0;
 }
 
-static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
+static int gen7_ring_fbc_flush(struct drm_i915_gem_request *req, u32 value)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	if (!ring->fbc_dirty)
@@ -389,7 +392,7 @@  gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		/* Workaround: we must issue a pipe_control with CS-stall bit
 		 * set before a pipe_control command that has the state cache
 		 * invalidate bit set. */
-		gen7_render_ring_cs_stall_wa(ring);
+		gen7_render_ring_cs_stall_wa(req);
 	}
 
 	ret = intel_ring_begin(ring, 4);
@@ -403,15 +406,16 @@  gen7_render_ring_flush(struct drm_i915_gem_request *req,
 	intel_ring_advance(ring);
 
 	if (!invalidate_domains && flush_domains)
-		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
+		return gen7_ring_fbc_flush(req, FBC_REND_NUKE);
 
 	return 0;
 }
 
 static int
-gen8_emit_pipe_control(struct intel_engine_cs *ring,
+gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
+	struct intel_engine_cs *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(ring, 6);
@@ -433,9 +437,8 @@  static int
 gen8_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *ring = req->ring;
 	u32 flags = 0;
-	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -455,7 +458,7 @@  gen8_render_ring_flush(struct drm_i915_gem_request *req,
 		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
 		/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
-		ret = gen8_emit_pipe_control(ring,
+		ret = gen8_emit_pipe_control(req,
 					     PIPE_CONTROL_CS_STALL |
 					     PIPE_CONTROL_STALL_AT_SCOREBOARD,
 					     0);
@@ -463,12 +466,12 @@  gen8_render_ring_flush(struct drm_i915_gem_request *req,
 			return ret;
 	}
 
-	ret = gen8_emit_pipe_control(ring, flags, scratch_addr);
+	ret = gen8_emit_pipe_control(req, flags, scratch_addr);
 	if (ret)
 		return ret;
 
 	if (!invalidate_domains && flush_domains)
-		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
+		return gen7_ring_fbc_flush(req, FBC_REND_NUKE);
 
 	return 0;
 }
@@ -2482,7 +2485,7 @@  static int gen6_ring_flush(struct drm_i915_gem_request *req,
 
 	if (!invalidate && flush) {
 		if (IS_GEN7(dev))
-			return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
+			return gen7_ring_fbc_flush(req, FBC_REND_CACHE_CLEAN);
 		else if (IS_BROADWELL(dev))
 			dev_priv->fbc.need_sw_cache_clean = true;
 	}