diff mbox

[30/53] drm/i915: Update [vma|object]_move_to_active() to take request structures

Message ID 1424366285-29232-31-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison Feb. 19, 2015, 5:17 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

Now that everything above has been converted to use request structures, it is
possible to update the lower level move_to_active() functions to be request
based as well.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h              |    2 +-
 drivers/gpu/drm/i915/i915_gem.c              |   17 ++++++++---------
 drivers/gpu/drm/i915/i915_gem_context.c      |    2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |    2 +-
 drivers/gpu/drm/i915/i915_gem_render_state.c |    2 +-
 drivers/gpu/drm/i915/intel_lrc.c             |    2 +-
 6 files changed, 13 insertions(+), 14 deletions(-)

Comments

Tomas Elf March 5, 2015, 7:39 p.m. UTC | #1
On 19/02/2015 17:17, John.C.Harrison@Intel.com wrote:
> From: John Harrison <John.C.Harrison@Intel.com>
>
> Now that everything above has been converted to use request structures, it is
> possible to update the lower level move_to_active() functions to be request
> based as well.
>
> For: VIZ-5115
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> ---
>   drivers/gpu/drm/i915/i915_drv.h              |    2 +-
>   drivers/gpu/drm/i915/i915_gem.c              |   17 ++++++++---------
>   drivers/gpu/drm/i915/i915_gem_context.c      |    2 +-
>   drivers/gpu/drm/i915/i915_gem_execbuffer.c   |    2 +-
>   drivers/gpu/drm/i915/i915_gem_render_state.c |    2 +-
>   drivers/gpu/drm/i915/intel_lrc.c             |    2 +-
>   6 files changed, 13 insertions(+), 14 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 9206328..e9fef4c 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2747,7 +2747,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
>   int i915_gem_object_sync(struct drm_i915_gem_object *obj,
>   			 struct drm_i915_gem_request *to_req);
>   void i915_vma_move_to_active(struct i915_vma *vma,
> -			     struct intel_engine_cs *ring);
> +			     struct drm_i915_gem_request *req);
>   int i915_gem_dumb_create(struct drm_file *file_priv,
>   			 struct drm_device *dev,
>   			 struct drm_mode_create_dumb *args);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 24fb7b9..0ae9be2 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2265,17 +2265,16 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>
>   static void
>   i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
> -			       struct intel_engine_cs *ring)
> +			       struct drm_i915_gem_request *req)
>   {
> -	struct drm_i915_gem_request *req;
> -	struct intel_engine_cs *old_ring;
> +	struct intel_engine_cs *new_ring, *old_ring;
>
> -	BUG_ON(ring == NULL);
> +	BUG_ON(req == NULL);
>
> -	req = intel_ring_get_request(ring);
> +	new_ring = i915_gem_request_get_ring(req);
>   	old_ring = i915_gem_request_get_ring(obj->last_read_req);
>
> -	if (old_ring != ring && obj->last_write_req) {
> +	if (old_ring != new_ring && obj->last_write_req) {
>   		/* Keep the request relative to the current ring */
>   		i915_gem_request_assign(&obj->last_write_req, req);
>   	}
> @@ -2286,16 +2285,16 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
>   		obj->active = 1;
>   	}
>
> -	list_move_tail(&obj->ring_list, &ring->active_list);
> +	list_move_tail(&obj->ring_list, &new_ring->active_list);
>
>   	i915_gem_request_assign(&obj->last_read_req, req);
>   }
>
>   void i915_vma_move_to_active(struct i915_vma *vma,
> -			     struct intel_engine_cs *ring)
> +			     struct drm_i915_gem_request *req)
>   {
>   	list_move_tail(&vma->mm_list, &vma->vm->active_list);
> -	return i915_gem_object_move_to_active(vma->obj, ring);
> +	return i915_gem_object_move_to_active(vma->obj, req);
>   }
>
>   static void
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 938cd26..e4d75be 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -652,7 +652,7 @@ static int do_switch(struct drm_i915_gem_request *req)
>   	 */
>   	if (from != NULL) {
>   		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
> -		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req->ring);
> +		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
>   		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
>   		 * whole damn pipeline, we don't need to explicitly mark the
>   		 * object dirty. The only exception is that the context must be
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 15e33a9..dc13751 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -966,7 +966,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
>   			obj->base.pending_read_domains |= obj->base.read_domains;
>   		obj->base.read_domains = obj->base.pending_read_domains;
>
> -		i915_vma_move_to_active(vma, ring);
> +		i915_vma_move_to_active(vma, req);
>   		if (obj->base.write_domain) {
>   			obj->dirty = 1;
>   			i915_gem_request_assign(&obj->last_write_req, req);
> diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
> index 85cc746..866274c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_render_state.c
> +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
> @@ -171,7 +171,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
>   	if (ret)
>   		goto out;
>
> -	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring);
> +	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
>
>   	WARN_ON(req->batch_obj);
>   	req->batch_obj = so.obj;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 3b4393c..479365e 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -1364,7 +1364,7 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
>   	if (ret)
>   		goto out;
>
> -	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring);
> +	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
>
>   	WARN_ON(req->batch_obj);
>   	req->batch_obj = so.obj;
>

Reviewed-by: Tomas Elf <tomas.elf@intel.com>

Thanks,
Tomas
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9206328..e9fef4c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2747,7 +2747,7 @@  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct drm_i915_gem_request *to_req);
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring);
+			     struct drm_i915_gem_request *req);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 24fb7b9..0ae9be2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2265,17 +2265,16 @@  i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 
 static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-			       struct intel_engine_cs *ring)
+			       struct drm_i915_gem_request *req)
 {
-	struct drm_i915_gem_request *req;
-	struct intel_engine_cs *old_ring;
+	struct intel_engine_cs *new_ring, *old_ring;
 
-	BUG_ON(ring == NULL);
+	BUG_ON(req == NULL);
 
-	req = intel_ring_get_request(ring);
+	new_ring = i915_gem_request_get_ring(req);
 	old_ring = i915_gem_request_get_ring(obj->last_read_req);
 
-	if (old_ring != ring && obj->last_write_req) {
+	if (old_ring != new_ring && obj->last_write_req) {
 		/* Keep the request relative to the current ring */
 		i915_gem_request_assign(&obj->last_write_req, req);
 	}
@@ -2286,16 +2285,16 @@  i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 		obj->active = 1;
 	}
 
-	list_move_tail(&obj->ring_list, &ring->active_list);
+	list_move_tail(&obj->ring_list, &new_ring->active_list);
 
 	i915_gem_request_assign(&obj->last_read_req, req);
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct intel_engine_cs *ring)
+			     struct drm_i915_gem_request *req)
 {
 	list_move_tail(&vma->mm_list, &vma->vm->active_list);
-	return i915_gem_object_move_to_active(vma->obj, ring);
+	return i915_gem_object_move_to_active(vma->obj, req);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 938cd26..e4d75be 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -652,7 +652,7 @@  static int do_switch(struct drm_i915_gem_request *req)
 	 */
 	if (from != NULL) {
 		from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req->ring);
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req);
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 15e33a9..dc13751 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -966,7 +966,7 @@  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 			obj->base.pending_read_domains |= obj->base.read_domains;
 		obj->base.read_domains = obj->base.pending_read_domains;
 
-		i915_vma_move_to_active(vma, ring);
+		i915_vma_move_to_active(vma, req);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			i915_gem_request_assign(&obj->last_write_req, req);
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 85cc746..866274c 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -171,7 +171,7 @@  int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring);
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
 	WARN_ON(req->batch_obj);
 	req->batch_obj = so.obj;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3b4393c..479365e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1364,7 +1364,7 @@  static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 	if (ret)
 		goto out;
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req->ring);
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
 
 	WARN_ON(req->batch_obj);
 	req->batch_obj = so.obj;