diff mbox

[57/59] drm/i915: Update a bunch of LRC functions to take requests

Message ID 1426768264-16996-58-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison March 19, 2015, 12:31 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

A bunch of the low level LRC functions were passing around ringbuf and ctx
pairs. In a few cases, they took the r/c pair and a request as well. This is all
quite messy and unnecesary. The context_queue() call is especially bad since the
fake request code got removed - it takes a request and three extra things that
must be extracted from the request and then it checks them against what it finds
in the request. Removing all the derivable data makes the code much simpler all
round.

This patch updates those functions to just take the request structure.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c |   66 +++++++++++++++++---------------------
 1 file changed, 29 insertions(+), 37 deletions(-)

Comments

Tomas Elf March 31, 2015, 6:18 p.m. UTC | #1
On 19/03/2015 12:31, John.C.Harrison@Intel.com wrote:
> From: John Harrison <John.C.Harrison@Intel.com>
>
> A bunch of the low level LRC functions were passing around ringbuf and ctx
> pairs. In a few cases, they took the r/c pair and a request as well. This is all
> quite messy and unnecesary. The context_queue() call is especially bad since the
> fake request code got removed - it takes a request and three extra things that
> must be extracted from the request and then it checks them against what it finds
> in the request. Removing all the derivable data makes the code much simpler all
> round.
>
> This patch updates those functions to just take the request structure.
>
> For: VIZ-5115
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> ---
>   drivers/gpu/drm/i915/intel_lrc.c |   66 +++++++++++++++++---------------------
>   1 file changed, 29 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 82190ad..ae00054 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -494,25 +494,20 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
>   		   ((u32)ring->next_context_status_buffer & 0x07) << 8);
>   }
>
> -static int execlists_context_queue(struct intel_engine_cs *ring,
> -				   struct intel_context *to,
> -				   u32 tail,
> -				   struct drm_i915_gem_request *request)
> +static int execlists_context_queue(struct drm_i915_gem_request *request)
>   {
> +	struct intel_engine_cs *ring = request->ring;
>   	struct drm_i915_gem_request *cursor;
>   	struct drm_i915_private *dev_priv = ring->dev->dev_private;
>   	unsigned long flags;
>   	int num_elements = 0;
>
> -	if (to != ring->default_context)
> -		intel_lr_context_pin(ring, to);
> -
> -	WARN_ON(!request);
> -	WARN_ON(to != request->ctx);
> +	if (request->ctx != ring->default_context)
> +		intel_lr_context_pin(ring, request->ctx);
>
>   	i915_gem_request_reference(request);
>
> -	request->tail = tail;
> +	request->tail = request->ringbuf->tail;
>
>   	intel_runtime_pm_get(dev_priv);
>
> @@ -529,7 +524,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
>   					   struct drm_i915_gem_request,
>   					   execlist_link);
>
> -		if (to == tail_req->ctx) {
> +		if (request->ctx == tail_req->ctx) {
>   			WARN(tail_req->elsp_submitted != 0,
>   				"More than 2 already-submitted reqs queued\n");
>   			list_del(&tail_req->execlist_link);
> @@ -610,12 +605,12 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
>   	return 0;
>   }
>
> -static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
> -				       struct intel_context *ctx,
> +static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
>   				       int bytes)
>   {
> -	struct intel_engine_cs *ring = ringbuf->ring;
> -	struct drm_i915_gem_request *request;
> +	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_engine_cs *ring = req->ring;
> +	struct drm_i915_gem_request *target;

My only gripe here is that had you not renamed the request variable 
target you wouldn't have had to change anything in this function below 
this point. However, I do also agree that having one variable called req 
and another one called request in the same function is less than optimal.

With that noted...

Reviewed-by: Tomas Elf <tomas.elf@intel.com>

Thanks,
Tomas

>   	int ret, new_space;
>
>   	/* The whole point of reserving space is to not wait! */
> @@ -624,30 +619,30 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
>   	if (intel_ring_space(ringbuf) >= bytes)
>   		return 0;
>
> -	list_for_each_entry(request, &ring->request_list, list) {
> +	list_for_each_entry(target, &ring->request_list, list) {
>   		/*
>   		 * The request queue is per-engine, so can contain requests
>   		 * from multiple ringbuffers. Here, we must ignore any that
>   		 * aren't from the ringbuffer we're considering.
>   		 */
> -		struct intel_context *ctx = request->ctx;
> +		struct intel_context *ctx = target->ctx;
>   		if (ctx->engine[ring->id].ringbuf != ringbuf)
>   			continue;
>
>   		/* Would completion of this request free enough space? */
> -		new_space = __intel_ring_space(request->postfix, ringbuf->tail,
> +		new_space = __intel_ring_space(target->postfix, ringbuf->tail,
>   				       ringbuf->size);
>   		if (new_space >= bytes)
>   			break;
>   	}
>
>   	/* It should always be possible to find a suitable request! */
> -	if (&request->list == &ring->request_list) {
> +	if (&target->list == &ring->request_list) {
>   		WARN_ON(true);
>   		return -ENOSPC;
>   	}
>
> -	ret = i915_wait_request(request);
> +	ret = i915_wait_request(target);
>   	if (ret)
>   		return ret;
>
> @@ -660,7 +655,7 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
>
>   /*
>    * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
> - * @ringbuf: Logical Ringbuffer to advance.
> + * @request: Request to advance the logical ringbuffer of.
>    *
>    * The tail is updated in our logical ringbuffer struct, not in the actual context. What
>    * really happens during submission is that the context and current tail will be placed
> @@ -668,23 +663,21 @@ static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
>    * point, the tail *inside* the context is updated and the ELSP written to.
>    */
>   static void
> -intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
> -				      struct intel_context *ctx,
> -				      struct drm_i915_gem_request *request)
> +intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>   {
> -	struct intel_engine_cs *ring = ringbuf->ring;
> +	struct intel_engine_cs *ring = request->ring;
>
> -	intel_logical_ring_advance(ringbuf);
> +	intel_logical_ring_advance(request->ringbuf);
>
>   	if (intel_ring_stopped(ring))
>   		return;
>
> -	execlists_context_queue(ring, ctx, ringbuf->tail, request);
> +	execlists_context_queue(request);
>   }
>
> -static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
> -				    struct intel_context *ctx)
> +static int logical_ring_wrap_buffer(struct drm_i915_gem_request *req)
>   {
> +	struct intel_ringbuffer *ringbuf = req->ringbuf;
>   	uint32_t __iomem *virt;
>   	int rem = ringbuf->size - ringbuf->tail;
>
> @@ -692,7 +685,7 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
>   	WARN_ON(ringbuf->reserved_in_use);
>
>   	if (ringbuf->space < rem) {
> -		int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
> +		int ret = logical_ring_wait_for_space(req, rem);
>
>   		if (ret)
>   			return ret;
> @@ -709,22 +702,22 @@ static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
>   	return 0;
>   }
>
> -static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
> -				struct intel_context *ctx, int bytes)
> +static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
>   {
> +	struct intel_ringbuffer *ringbuf = req->ringbuf;
>   	int ret;
>
>   	if (!ringbuf->reserved_in_use)
>   		bytes += ringbuf->reserved_size;
>
>   	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
> -		ret = logical_ring_wrap_buffer(ringbuf, ctx);
> +		ret = logical_ring_wrap_buffer(req);
>   		if (unlikely(ret))
>   			return ret;
>   	}
>
>   	if (unlikely(ringbuf->space < bytes)) {
> -		ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
> +		ret = logical_ring_wait_for_space(req, bytes);
>   		if (unlikely(ret))
>   			return ret;
>   	}
> @@ -759,8 +752,7 @@ static int intel_logical_ring_begin(struct drm_i915_gem_request *req,
>   	if (ret)
>   		return ret;
>
> -	ret = logical_ring_prepare(req->ringbuf, req->ctx,
> -				   num_dwords * sizeof(uint32_t));
> +	ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
>   	if (ret)
>   		return ret;
>
> @@ -1266,7 +1258,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
>   	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
>   	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
>   	intel_logical_ring_emit(ringbuf, MI_NOOP);
> -	intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
> +	intel_logical_ring_advance_and_submit(request);
>
>   	return 0;
>   }
>
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 82190ad..ae00054 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -494,25 +494,20 @@  void intel_lrc_irq_handler(struct intel_engine_cs *ring)
 		   ((u32)ring->next_context_status_buffer & 0x07) << 8);
 }
 
-static int execlists_context_queue(struct intel_engine_cs *ring,
-				   struct intel_context *to,
-				   u32 tail,
-				   struct drm_i915_gem_request *request)
+static int execlists_context_queue(struct drm_i915_gem_request *request)
 {
+	struct intel_engine_cs *ring = request->ring;
 	struct drm_i915_gem_request *cursor;
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	unsigned long flags;
 	int num_elements = 0;
 
-	if (to != ring->default_context)
-		intel_lr_context_pin(ring, to);
-
-	WARN_ON(!request);
-	WARN_ON(to != request->ctx);
+	if (request->ctx != ring->default_context)
+		intel_lr_context_pin(ring, request->ctx);
 
 	i915_gem_request_reference(request);
 
-	request->tail = tail;
+	request->tail = request->ringbuf->tail;
 
 	intel_runtime_pm_get(dev_priv);
 
@@ -529,7 +524,7 @@  static int execlists_context_queue(struct intel_engine_cs *ring,
 					   struct drm_i915_gem_request,
 					   execlist_link);
 
-		if (to == tail_req->ctx) {
+		if (request->ctx == tail_req->ctx) {
 			WARN(tail_req->elsp_submitted != 0,
 				"More than 2 already-submitted reqs queued\n");
 			list_del(&tail_req->execlist_link);
@@ -610,12 +605,12 @@  int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 	return 0;
 }
 
-static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
-				       struct intel_context *ctx,
+static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 				       int bytes)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
-	struct drm_i915_gem_request *request;
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_engine_cs *ring = req->ring;
+	struct drm_i915_gem_request *target;
 	int ret, new_space;
 
 	/* The whole point of reserving space is to not wait! */
@@ -624,30 +619,30 @@  static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
 	if (intel_ring_space(ringbuf) >= bytes)
 		return 0;
 
-	list_for_each_entry(request, &ring->request_list, list) {
+	list_for_each_entry(target, &ring->request_list, list) {
 		/*
 		 * The request queue is per-engine, so can contain requests
 		 * from multiple ringbuffers. Here, we must ignore any that
 		 * aren't from the ringbuffer we're considering.
 		 */
-		struct intel_context *ctx = request->ctx;
+		struct intel_context *ctx = target->ctx;
 		if (ctx->engine[ring->id].ringbuf != ringbuf)
 			continue;
 
 		/* Would completion of this request free enough space? */
-		new_space = __intel_ring_space(request->postfix, ringbuf->tail,
+		new_space = __intel_ring_space(target->postfix, ringbuf->tail,
 				       ringbuf->size);
 		if (new_space >= bytes)
 			break;
 	}
 
 	/* It should always be possible to find a suitable request! */
-	if (&request->list == &ring->request_list) {
+	if (&target->list == &ring->request_list) {
 		WARN_ON(true);
 		return -ENOSPC;
 	}
 
-	ret = i915_wait_request(request);
+	ret = i915_wait_request(target);
 	if (ret)
 		return ret;
 
@@ -660,7 +655,7 @@  static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
 
 /*
  * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
- * @ringbuf: Logical Ringbuffer to advance.
+ * @request: Request to advance the logical ringbuffer of.
  *
  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
  * really happens during submission is that the context and current tail will be placed
@@ -668,23 +663,21 @@  static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 static void
-intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
-				      struct intel_context *ctx,
-				      struct drm_i915_gem_request *request)
+intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *ring = ringbuf->ring;
+	struct intel_engine_cs *ring = request->ring;
 
-	intel_logical_ring_advance(ringbuf);
+	intel_logical_ring_advance(request->ringbuf);
 
 	if (intel_ring_stopped(ring))
 		return;
 
-	execlists_context_queue(ring, ctx, ringbuf->tail, request);
+	execlists_context_queue(request);
 }
 
-static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
-				    struct intel_context *ctx)
+static int logical_ring_wrap_buffer(struct drm_i915_gem_request *req)
 {
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	uint32_t __iomem *virt;
 	int rem = ringbuf->size - ringbuf->tail;
 
@@ -692,7 +685,7 @@  static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
 	WARN_ON(ringbuf->reserved_in_use);
 
 	if (ringbuf->space < rem) {
-		int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
+		int ret = logical_ring_wait_for_space(req, rem);
 
 		if (ret)
 			return ret;
@@ -709,22 +702,22 @@  static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
 	return 0;
 }
 
-static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
-				struct intel_context *ctx, int bytes)
+static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
 {
+	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	int ret;
 
 	if (!ringbuf->reserved_in_use)
 		bytes += ringbuf->reserved_size;
 
 	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = logical_ring_wrap_buffer(ringbuf, ctx);
+		ret = logical_ring_wrap_buffer(req);
 		if (unlikely(ret))
 			return ret;
 	}
 
 	if (unlikely(ringbuf->space < bytes)) {
-		ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
+		ret = logical_ring_wait_for_space(req, bytes);
 		if (unlikely(ret))
 			return ret;
 	}
@@ -759,8 +752,7 @@  static int intel_logical_ring_begin(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	ret = logical_ring_prepare(req->ringbuf, req->ctx,
-				   num_dwords * sizeof(uint32_t));
+	ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
 	if (ret)
 		return ret;
 
@@ -1266,7 +1258,7 @@  static int gen8_emit_request(struct drm_i915_gem_request *request)
 	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
 	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
 	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance_and_submit(ringbuf, request->ctx, request);
+	intel_logical_ring_advance_and_submit(request);
 
 	return 0;
 }