diff mbox

[v4,7/7] drm/i915: Tidy up execbuffer command parsing code

Message ID 1415398927-16572-8-git-send-email-bradley.d.volkin@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

bradley.d.volkin@intel.com Nov. 7, 2014, 10:22 p.m. UTC
From: Brad Volkin <bradley.d.volkin@intel.com>

Move it to a separate function since the main do_execbuffer function
already has so much going on.

Signed-off-by: Brad Volkin <bradley.d.volkin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 136 +++++++++++++++++------------
 1 file changed, 79 insertions(+), 57 deletions(-)

Comments

Shuang He Nov. 7, 2014, 10:25 p.m. UTC | #1
Tested-By: PRC QA PRTS (Patch Regression Test System Contact: shuang.he@intel.com)
-------------------------------------Summary-------------------------------------
Platform: baseline_drm_intel_nightly_pass_rate->patch_applied_pass_rate
BYT: pass/total=348/348->348/348
PNV: pass/total=325/328->324/328
ILK: pass/total=329/330->330/330
IVB: pass/total=538/546->534/546
SNB: pass/total=531/541->532/541
HSW: pass/total=476/487->471/487
BDW: pass/total=430/435->430/435
-------------------------------------Detailed-------------------------------------
test_platform: test_suite, test_case, result_with_drm_intel_nightly(count, machine_id...)...->result_with_patch_applied(count, machine_id)...
PNV: Intel_gpu_tools, igt_gem_concurrent_blit_cpu-rcs-early-read-interruptible, DMESG_WARN(1, M25)PASS(6, M23M25) -> DMESG_WARN(1, M23)PASS(3, M23)
ILK: Intel_gpu_tools, igt_kms_flip_flip-vs-panning-vs-hang, DMESG_WARN(1, M26)PASS(6, M6) -> PASS(4, M6)
IVB: Intel_gpu_tools, igt_gem_exec_parse_cmd-crossing-page, FAIL(3, M34)PASS(1, M4) -> FAIL(1, M34)PASS(3, M34)
IVB: Intel_gpu_tools, igt_gem_madvise_dontneed-before-exec, FAIL(3, M34)PASS(1, M4) -> FAIL(1, M34)PASS(3, M34)
IVB: Intel_gpu_tools, igt_kms_pipe_crc_basic_hang-read-crc-pipe-B, PASS(4, M4M34) -> TIMEOUT(1, M34)PASS(3, M34)
IVB: Intel_gpu_tools, igt_kms_pipe_crc_basic_hang-read-crc-pipe-C, PASS(4, M4M34) -> TIMEOUT(1, M34)PASS(3, M34)
SNB: Intel_gpu_tools, igt_kms_cursor_crc_cursor-256x256-random, DMESG_WARN(1, M22)PASS(6, M35) -> PASS(4, M35)
HSW: Intel_gpu_tools, igt_gem_bad_reloc_negative-reloc, NSPT(1, M39)PASS(3, M39) -> NSPT(2, M39)PASS(2, M39)
HSW: Intel_gpu_tools, igt_gem_bad_reloc_negative-reloc-lut, PASS(4, M39) -> NSPT(1, M39)PASS(3, M39)
HSW: Intel_gpu_tools, igt_gem_exec_parse_cmd-crossing-page, FAIL(3, M39)PASS(1, M39) -> FAIL(1, M39)PASS(3, M39)
HSW: Intel_gpu_tools, igt_gem_madvise_dontneed-before-exec, FAIL(3, M39)PASS(1, M39) -> FAIL(1, M39)PASS(3, M39)
HSW: Intel_gpu_tools, igt_gem_reset_stats_close-pending-fork-reverse-render, PASS(4, M39) -> NO_RESULT(1, M39)PASS(3, M39)
HSW: Intel_gpu_tools, igt_kms_cursor_crc_cursor-128x128-onscreen, DMESG_WARN(2, M39)PASS(5, M19M39) -> PASS(4, M39)
HSW: Intel_gpu_tools, igt_kms_cursor_crc_cursor-256x256-offscreen, DMESG_WARN(1, M39)PASS(6, M19M39) -> DMESG_WARN(1, M39)PASS(3, M39)
HSW: Intel_gpu_tools, igt_kms_cursor_crc_cursor-256x256-onscreen, PASS(4, M39) -> DMESG_WARN(3, M39)PASS(1, M39)
HSW: Intel_gpu_tools, igt_kms_cursor_crc_cursor-64x64-offscreen, DMESG_WARN(2, M39)PASS(2, M39) -> DMESG_WARN(4, M39)
HSW: Intel_gpu_tools, igt_kms_cursor_crc_cursor-64x64-sliding, DMESG_WARN(1, M39)PASS(3, M39) -> DMESG_WARN(1, M39)PASS(3, M39)
HSW: Intel_gpu_tools, igt_kms_flip_dpms-vs-vblank-race, DMESG_WARN(2, M39)PASS(5, M19M39) -> PASS(4, M39)
Chris Wilson Nov. 12, 2014, 9:37 a.m. UTC | #2
On Fri, Nov 07, 2014 at 02:22:07PM -0800, bradley.d.volkin@intel.com wrote:
> From: Brad Volkin <bradley.d.volkin@intel.com>
> 
> Move it to a separate function since the main do_execbuffer function
> already has so much going on.
> 
> Signed-off-by: Brad Volkin <bradley.d.volkin@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 136 +++++++++++++++++------------
>  1 file changed, 79 insertions(+), 57 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index a271bc0..58f0a6c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1026,6 +1026,75 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
>  	return 0;
>  }
>  
> +static struct drm_i915_gem_object*
> +i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
> +			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
> +			  struct eb_vmas *eb,
> +			  struct drm_i915_gem_object *batch_obj,
> +			  u32 batch_start_offset,
> +			  u32 batch_len,
> +			  bool is_master,
> +			  u32 *flags)
> +{
> +	struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
> +	struct drm_i915_gem_object *shadow_batch_obj;
> +	int ret;
> +
> +	shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
> +						   batch_obj->base.size);
> +	if (IS_ERR(shadow_batch_obj))
> +		return shadow_batch_obj;
> +
> +	shadow_batch_obj->madv = I915_MADV_WILLNEED;
> +
> +	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
> +	if (ret)
> +		goto err;

Pardon? This feels an implementation issue of i915_parse_cmds() and should
be resolved there. Presumably you are not actually reading back through
the GTT? That would be insane...

> +	ret = i915_parse_cmds(ring,
> +			      batch_obj,
> +			      shadow_batch_obj,
> +			      batch_start_offset,
> +			      batch_len,
> +			      is_master);
> +	i915_gem_object_ggtt_unpin(shadow_batch_obj);

Yet pin+unpin around the parser seems to serve no other purpose.
-Chris
Michael H. Nguyen Nov. 22, 2014, 1:17 a.m. UTC | #3
Hi Chris,

>>
>> +static struct drm_i915_gem_object*
>> +i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
>> +			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
>> +			  struct eb_vmas *eb,
>> +			  struct drm_i915_gem_object *batch_obj,
>> +			  u32 batch_start_offset,
>> +			  u32 batch_len,
>> +			  bool is_master,
>> +			  u32 *flags)
>> +{
>> +	struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
>> +	struct drm_i915_gem_object *shadow_batch_obj;
>> +	int ret;
>> +
>> +	shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
>> +						   batch_obj->base.size);
>> +	if (IS_ERR(shadow_batch_obj))
>> +		return shadow_batch_obj;
>> +
>> +	shadow_batch_obj->madv = I915_MADV_WILLNEED;
>> +
>> +	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
>> +	if (ret)
>> +		goto err;
>
> Pardon? This feels an implementation issue of i915_parse_cmds() and should
> be resolved there. Presumably you are not actually reading back through
> the GTT? That would be insane...
>
>> +	ret = i915_parse_cmds(ring,
>> +			      batch_obj,
>> +			      shadow_batch_obj,
>> +			      batch_start_offset,
>> +			      batch_len,
>> +			      is_master);
>> +	i915_gem_object_ggtt_unpin(shadow_batch_obj);
>
> Yet pin+unpin around the parser seems to serve no other purpose.
Are you suggesting to remove the pin/unpin calls? If so, isn't pinning 
needed to ensure the backing store pages are available in vmap_batch()? 
i.e. obj->pages->sgl is populated w/ physical pages.

Or, are you suggesting to move the pin/unpin calls inside 
i915_parse_cmds() ?

Thx,
Mike
> -Chris
>
Daniel Vetter Nov. 24, 2014, 9:20 a.m. UTC | #4
On Fri, Nov 21, 2014 at 05:17:50PM -0800, Michael H. Nguyen wrote:
> Hi Chris,
> 
> >>
> >>+static struct drm_i915_gem_object*
> >>+i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
> >>+			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
> >>+			  struct eb_vmas *eb,
> >>+			  struct drm_i915_gem_object *batch_obj,
> >>+			  u32 batch_start_offset,
> >>+			  u32 batch_len,
> >>+			  bool is_master,
> >>+			  u32 *flags)
> >>+{
> >>+	struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
> >>+	struct drm_i915_gem_object *shadow_batch_obj;
> >>+	int ret;
> >>+
> >>+	shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
> >>+						   batch_obj->base.size);
> >>+	if (IS_ERR(shadow_batch_obj))
> >>+		return shadow_batch_obj;
> >>+
> >>+	shadow_batch_obj->madv = I915_MADV_WILLNEED;
> >>+
> >>+	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
> >>+	if (ret)
> >>+		goto err;
> >
> >Pardon? This feels an implementation issue of i915_parse_cmds() and should
> >be resolved there. Presumably you are not actually reading back through
> >the GTT? That would be insane...
> >
> >>+	ret = i915_parse_cmds(ring,
> >>+			      batch_obj,
> >>+			      shadow_batch_obj,
> >>+			      batch_start_offset,
> >>+			      batch_len,
> >>+			      is_master);
> >>+	i915_gem_object_ggtt_unpin(shadow_batch_obj);
> >
> >Yet pin+unpin around the parser seems to serve no other purpose.
> Are you suggesting to remove the pin/unpin calls? If so, isn't pinning
> needed to ensure the backing store pages are available in vmap_batch()? i.e.
> obj->pages->sgl is populated w/ physical pages.
> 
> Or, are you suggesting to move the pin/unpin calls inside i915_parse_cmds()
> ?

Yeah please push them down into the cmd parser around the part that
copies/checks the shadow batch. If we ever change the way we do that
copy/parsing (likely due to performance issues on vlv) then we also might
need to change the type of pinning. So better to keep things together.
-Daniel
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a271bc0..58f0a6c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1026,6 +1026,75 @@  i915_reset_gen7_sol_offsets(struct drm_device *dev,
 	return 0;
 }
 
+static struct drm_i915_gem_object*
+i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
+			  struct eb_vmas *eb,
+			  struct drm_i915_gem_object *batch_obj,
+			  u32 batch_start_offset,
+			  u32 batch_len,
+			  bool is_master,
+			  u32 *flags)
+{
+	struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
+	struct drm_i915_gem_object *shadow_batch_obj;
+	int ret;
+
+	shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
+						   batch_obj->base.size);
+	if (IS_ERR(shadow_batch_obj))
+		return shadow_batch_obj;
+
+	shadow_batch_obj->madv = I915_MADV_WILLNEED;
+
+	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
+	if (ret)
+		goto err;
+
+	ret = i915_parse_cmds(ring,
+			      batch_obj,
+			      shadow_batch_obj,
+			      batch_start_offset,
+			      batch_len,
+			      is_master);
+	i915_gem_object_ggtt_unpin(shadow_batch_obj);
+
+	if (ret) {
+		if (ret == -EACCES)
+			return batch_obj;
+	} else {
+		struct i915_vma *vma;
+
+		memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+
+		vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+		vma->exec_entry = shadow_exec_entry;
+		vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
+		drm_gem_object_reference(&shadow_batch_obj->base);
+		list_add_tail(&vma->exec_list, &eb->vmas);
+
+		shadow_batch_obj->base.pending_read_domains =
+			batch_obj->base.pending_read_domains;
+
+		/*
+		 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+		 * bit from MI_BATCH_BUFFER_START commands issued in the
+		 * dispatch_execbuffer implementations. We specifically
+		 * don't want that set when the command parser is
+		 * enabled.
+		 *
+		 * FIXME: with aliasing ppgtt, buffers that should only
+		 * be in ggtt still end up in the aliasing ppgtt. remove
+		 * this check when that is fixed.
+		 */
+		if (USES_FULL_PPGTT(dev))
+			*flags |= I915_DISPATCH_SECURE;
+	}
+
+err:
+	return ret ? ERR_PTR(ret) : shadow_batch_obj;
+}
+
 int
 i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 			       struct intel_engine_cs *ring,
@@ -1242,7 +1311,6 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
-	struct drm_i915_gem_object *shadow_batch_obj = NULL;
 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
 	struct intel_engine_cs *ring;
 	struct intel_context *ctx;
@@ -1369,63 +1437,17 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
 	if (i915_needs_cmd_parser(ring)) {
-		shadow_batch_obj =
-			i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
-						batch_obj->base.size);
-		if (IS_ERR(shadow_batch_obj)) {
-			ret = PTR_ERR(shadow_batch_obj);
-			/* Don't try to clean up the obj in the error path */
-			shadow_batch_obj = NULL;
-			goto err;
-		}
-
-		shadow_batch_obj->madv = I915_MADV_WILLNEED;
-
-		ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
-		if (ret)
+		batch_obj = i915_gem_execbuffer_parse(ring,
+						      &shadow_exec_entry,
+						      eb,
+						      batch_obj,
+						      args->batch_start_offset,
+						      args->batch_len,
+						      file->is_master,
+						      &flags);
+		if (IS_ERR(batch_obj)) {
+			ret = PTR_ERR(batch_obj);
 			goto err;
-
-		ret = i915_parse_cmds(ring,
-				      batch_obj,
-				      shadow_batch_obj,
-				      args->batch_start_offset,
-				      args->batch_len,
-				      file->is_master);
-		i915_gem_object_ggtt_unpin(shadow_batch_obj);
-
-		if (ret) {
-			if (ret != -EACCES)
-				goto err;
-		} else {
-			struct i915_vma *vma;
-
-			memset(&shadow_exec_entry, 0,
-			       sizeof(shadow_exec_entry));
-
-			vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
-			vma->exec_entry = &shadow_exec_entry;
-			vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
-			drm_gem_object_reference(&shadow_batch_obj->base);
-			list_add_tail(&vma->exec_list, &eb->vmas);
-
-			shadow_batch_obj->base.pending_read_domains =
-				batch_obj->base.pending_read_domains;
-
-			batch_obj = shadow_batch_obj;
-
-			/*
-			 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
-			 * bit from MI_BATCH_BUFFER_START commands issued in the
-			 * dispatch_execbuffer implementations. We specifically
-			 * don't want that set when the command parser is
-			 * enabled.
-			 *
-			 * FIXME: with aliasing ppgtt, buffers that should only
-			 * be in ggtt still end up in the aliasing ppgtt. remove
-			 * this check when that is fixed.
-			 */
-			if (USES_FULL_PPGTT(dev))
-				flags |= I915_DISPATCH_SECURE;
 		}
 	}