diff mbox

[11/53] drm/i915: Update move_to_gpu() to take a request structure

Message ID 1424366285-29232-12-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison Feb. 19, 2015, 5:17 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The plan is to pass requests around as the basic submission tracking structure
rather than rings and contexts. This patch updates the move_to_gpu() code paths.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   10 +++++-----
 drivers/gpu/drm/i915/intel_lrc.c           |   10 ++++------
 2 files changed, 9 insertions(+), 11 deletions(-)

Comments

Tomas Elf March 5, 2015, 3:54 p.m. UTC | #1
On 19/02/2015 17:17, John.C.Harrison@Intel.com wrote:
> From: John Harrison <John.C.Harrison@Intel.com>
>
> The plan is to pass requests around as the basic submission tracking structure
> rather than rings and contexts. This patch updates the move_to_gpu() code paths.
>
> For: VIZ-5115
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> ---
>   drivers/gpu/drm/i915/i915_gem_execbuffer.c |   10 +++++-----
>   drivers/gpu/drm/i915/intel_lrc.c           |   10 ++++------
>   2 files changed, 9 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 883cabd..da1e232 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -828,7 +828,7 @@ err:
>   }
>
>   static int
> -i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
> +i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
>   				struct list_head *vmas)
>   {
>   	struct i915_vma *vma;
> @@ -838,7 +838,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
>
>   	list_for_each_entry(vma, vmas, exec_list) {
>   		struct drm_i915_gem_object *obj = vma->obj;
> -		ret = i915_gem_object_sync(obj, ring);
> +		ret = i915_gem_object_sync(obj, req->ring);
>   		if (ret)
>   			return ret;
>
> @@ -849,7 +849,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
>   	}
>
>   	if (flush_chipset)
> -		i915_gem_chipset_flush(ring->dev);
> +		i915_gem_chipset_flush(req->ring->dev);
>
>   	if (flush_domains & I915_GEM_DOMAIN_GTT)
>   		wmb();
> @@ -857,7 +857,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
>   	/* Unconditionally invalidate gpu caches and ensure that we do flush
>   	 * any residual writes from the previous batch.
>   	 */
> -	return intel_ring_invalidate_all_caches(ring);
> +	return intel_ring_invalidate_all_caches(req->ring);
>   }
>
>   static bool
> @@ -1186,7 +1186,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>   		}
>   	}
>
> -	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
> +	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
>   	if (ret)
>   		goto error;
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index c42af08..efe970f 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -578,11 +578,9 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
>   	return 0;
>   }
>
> -static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
> -				 struct intel_context *ctx,
> +static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
>   				 struct list_head *vmas)
>   {
> -	struct intel_engine_cs *ring = ringbuf->ring;
>   	struct i915_vma *vma;
>   	uint32_t flush_domains = 0;
>   	bool flush_chipset = false;
> @@ -591,7 +589,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
>   	list_for_each_entry(vma, vmas, exec_list) {
>   		struct drm_i915_gem_object *obj = vma->obj;
>
> -		ret = i915_gem_object_sync(obj, ring);
> +		ret = i915_gem_object_sync(obj, req->ring);
>   		if (ret)
>   			return ret;
>
> @@ -607,7 +605,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
>   	/* Unconditionally invalidate gpu caches and ensure that we do flush
>   	 * any residual writes from the previous batch.
>   	 */
> -	return logical_ring_invalidate_all_caches(ringbuf, ctx);
> +	return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx);
>   }
>
>   /**
> @@ -686,7 +684,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>   		return -EINVAL;
>   	}
>
> -	ret = execlists_move_to_gpu(ringbuf, params->ctx, vmas);
> +	ret = execlists_move_to_gpu(params->request, vmas);
>   	if (ret)
>   		return ret;
>
>

Reviewed-by: Tomas Elf <tomas.elf@intel.com>

Thanks,
Tomas
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 883cabd..da1e232 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -828,7 +828,7 @@  err:
 }
 
 static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
 	struct i915_vma *vma;
@@ -838,7 +838,7 @@  i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 
 	list_for_each_entry(vma, vmas, exec_list) {
 		struct drm_i915_gem_object *obj = vma->obj;
-		ret = i915_gem_object_sync(obj, ring);
+		ret = i915_gem_object_sync(obj, req->ring);
 		if (ret)
 			return ret;
 
@@ -849,7 +849,7 @@  i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 	}
 
 	if (flush_chipset)
-		i915_gem_chipset_flush(ring->dev);
+		i915_gem_chipset_flush(req->ring->dev);
 
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
@@ -857,7 +857,7 @@  i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return intel_ring_invalidate_all_caches(ring);
+	return intel_ring_invalidate_all_caches(req->ring);
 }
 
 static bool
@@ -1186,7 +1186,7 @@  i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 		}
 	}
 
-	ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
 	if (ret)
 		goto error;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c42af08..efe970f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -578,11 +578,9 @@  static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
 	return 0;
 }
 
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
-				 struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 				 struct list_head *vmas)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -591,7 +589,7 @@  static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
 	list_for_each_entry(vma, vmas, exec_list) {
 		struct drm_i915_gem_object *obj = vma->obj;
 
-		ret = i915_gem_object_sync(obj, ring);
+		ret = i915_gem_object_sync(obj, req->ring);
 		if (ret)
 			return ret;
 
@@ -607,7 +605,7 @@  static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return logical_ring_invalidate_all_caches(ringbuf, ctx);
+	return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx);
 }
 
 /**
@@ -686,7 +684,7 @@  int intel_execlists_submission(struct i915_execbuffer_params *params,
 		return -EINVAL;
 	}
 
-	ret = execlists_move_to_gpu(ringbuf, params->ctx, vmas);
+	ret = execlists_move_to_gpu(params->request, vmas);
 	if (ret)
 		return ret;