diff mbox series

[i-g-t,4/4] tests/gem_media_vme: Shut down half of subslices to avoid gpu hang on ICL

Message ID 20190108112450.29108-5-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Per context dynamic (sub)slice power-gating | expand

Commit Message

Tvrtko Ursulin Jan. 8, 2019, 11:24 a.m. UTC
From: Tony Ye <tony.ye@intel.com>

On Icelake we need to turn off subslices not containing the VME block or
the VME kernel will hang.

v2: (Tvrtko Ursulin)
 * Remove libdrm usage for setting context param.
 * Cleanup bitmask operation.
 * Only apply the workaround for ICL.

v3: (Tvrtko Ursulin)
 * Added hang detector. (Chris Wilson)

v4: (Tvrtko Ursulin)
 * Rebase for hang detector moved to previous patch.
 * Tidy curly braces.

Signed-off-by: Tony Ye <tony.ye@intel.com>
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Tony Ye <tony.ye@intel.com>
---
 lib/gpu_cmds.c             | 12 ++++++++
 lib/gpu_cmds.h             |  3 ++
 lib/media_fill.c           |  2 +-
 tests/i915/gem_media_vme.c | 60 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 76 insertions(+), 1 deletion(-)

Comments

Joonas Lahtinen Jan. 8, 2019, 2:53 p.m. UTC | #1
Quoting Tvrtko Ursulin (2019-01-08 13:24:50)
> From: Tony Ye <tony.ye@intel.com>
> 
> On Icelake we need to turn off subslices not containing the VME block or
> the VME kernel will hang.
> 
> v2: (Tvrtko Ursulin)
>  * Remove libdrm usage for setting context param.
>  * Cleanup bitmask operation.
>  * Only apply the workaround for ICL.
> 
> v3: (Tvrtko Ursulin)
>  * Added hang detector. (Chris Wilson)
> 
> v4: (Tvrtko Ursulin)
>  * Rebase for hang detector moved to previous patch.
>  * Tidy curly braces.
> 
> Signed-off-by: Tony Ye <tony.ye@intel.com>
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Tony Ye <tony.ye@intel.com>
> ---
>  lib/gpu_cmds.c             | 12 ++++++++
>  lib/gpu_cmds.h             |  3 ++
>  lib/media_fill.c           |  2 +-
>  tests/i915/gem_media_vme.c | 60 ++++++++++++++++++++++++++++++++++++++
>  4 files changed, 76 insertions(+), 1 deletion(-)
> 
> diff --git a/lib/gpu_cmds.c b/lib/gpu_cmds.c
> index b490a63bdfef..8d270ee86229 100644
> --- a/lib/gpu_cmds.c
> +++ b/lib/gpu_cmds.c
> @@ -36,6 +36,18 @@ gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
>         igt_assert(ret == 0);
>  }
>  
> +void
> +gen7_render_context_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
> +{
> +       int ret;
> +
> +       ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
> +       if (ret == 0)
> +               ret = drm_intel_gem_bo_context_exec(batch->bo, batch->ctx,
> +                               batch_end, 0);
> +       igt_assert(ret == 0);
> +}
> +
>  uint32_t
>  gen7_fill_curbe_buffer_data(struct intel_batchbuffer *batch,
>                             uint8_t color)
> diff --git a/lib/gpu_cmds.h b/lib/gpu_cmds.h
> index ca671fb52daf..1321af446161 100644
> --- a/lib/gpu_cmds.h
> +++ b/lib/gpu_cmds.h
> @@ -40,6 +40,9 @@
>  void
>  gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
>  
> +void
> +gen7_render_context_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
> +
>  uint32_t
>  gen7_fill_curbe_buffer_data(struct intel_batchbuffer *batch,
>                         uint8_t color);
> diff --git a/lib/media_fill.c b/lib/media_fill.c
> index b1e84727394a..03b5e71e101b 100644
> --- a/lib/media_fill.c
> +++ b/lib/media_fill.c
> @@ -338,7 +338,7 @@ __gen11_media_vme_func(struct intel_batchbuffer *batch,
>         batch_end = intel_batchbuffer_align(batch, 8);
>         assert(batch_end < BATCH_STATE_SPLIT);
>  
> -       gen7_render_flush(batch, batch_end);
> +       gen7_render_context_flush(batch, batch_end);
>         intel_batchbuffer_reset(batch);
>  }

Above hunks could be in the previous patch, too?

> +static void shut_non_vme_subslices(int drm_fd, uint32_t ctx)
> +{
> +       struct drm_i915_gem_context_param_sseu sseu = { };
> +       struct drm_i915_gem_context_param arg = {
> +               .param = I915_CONTEXT_PARAM_SSEU,
> +               .ctx_id = ctx,
> +               .size = sizeof(sseu),
> +               .value = to_user_pointer(&sseu),
> +       };
> +       int ret;
> +
> +       if (__gem_context_get_param(drm_fd, &arg))
> +               return; /* no sseu support */

I guess we could fail at this point already, but maybe there will be a
surprise ICL stepping to prove me wrong :)

> +
> +       ret = __gem_context_set_param(drm_fd, &arg);
> +       igt_assert(ret == 0 || ret == -ENODEV || ret == -EINVAL);
> +       if (ret)
> +               return; /* no sseu support */
> +
> +       /* shutdown half subslices*/

space before */

With the hunk moved or not.

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas

> +       sseu.subslice_mask =
> +               switch_off_n_bits(sseu.subslice_mask,
> +                                 __builtin_popcount(sseu.subslice_mask) / 2);
> +
> +       gem_context_set_param(drm_fd, &arg);
> +}
diff mbox series

Patch

diff --git a/lib/gpu_cmds.c b/lib/gpu_cmds.c
index b490a63bdfef..8d270ee86229 100644
--- a/lib/gpu_cmds.c
+++ b/lib/gpu_cmds.c
@@ -36,6 +36,18 @@  gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
 	igt_assert(ret == 0);
 }
 
+void
+gen7_render_context_flush(struct intel_batchbuffer *batch, uint32_t batch_end)
+{
+	int ret;
+
+	ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
+	if (ret == 0)
+		ret = drm_intel_gem_bo_context_exec(batch->bo, batch->ctx,
+				batch_end, 0);
+	igt_assert(ret == 0);
+}
+
 uint32_t
 gen7_fill_curbe_buffer_data(struct intel_batchbuffer *batch,
 			    uint8_t color)
diff --git a/lib/gpu_cmds.h b/lib/gpu_cmds.h
index ca671fb52daf..1321af446161 100644
--- a/lib/gpu_cmds.h
+++ b/lib/gpu_cmds.h
@@ -40,6 +40,9 @@ 
 void
 gen7_render_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
 
+void
+gen7_render_context_flush(struct intel_batchbuffer *batch, uint32_t batch_end);
+
 uint32_t
 gen7_fill_curbe_buffer_data(struct intel_batchbuffer *batch,
 			uint8_t color);
diff --git a/lib/media_fill.c b/lib/media_fill.c
index b1e84727394a..03b5e71e101b 100644
--- a/lib/media_fill.c
+++ b/lib/media_fill.c
@@ -338,7 +338,7 @@  __gen11_media_vme_func(struct intel_batchbuffer *batch,
 	batch_end = intel_batchbuffer_align(batch, 8);
 	assert(batch_end < BATCH_STATE_SPLIT);
 
-	gen7_render_flush(batch, batch_end);
+	gen7_render_context_flush(batch, batch_end);
 	intel_batchbuffer_reset(batch);
 }
 
diff --git a/tests/i915/gem_media_vme.c b/tests/i915/gem_media_vme.c
index 47e949c781f2..34383cb123d4 100644
--- a/tests/i915/gem_media_vme.c
+++ b/tests/i915/gem_media_vme.c
@@ -81,6 +81,52 @@  static void scratch_buf_init_dst(drm_intel_bufmgr *bufmgr, struct igt_buf *buf)
 	buf->stride = 1;
 }
 
+static uint64_t switch_off_n_bits(uint64_t mask, unsigned int n)
+{
+	unsigned int i;
+
+	igt_assert(n > 0 && n <= (sizeof(mask) * 8));
+	igt_assert(n <= __builtin_popcount(mask));
+
+	for (i = 0; n && i < (sizeof(mask) * 8); i++) {
+		uint64_t bit = 1ULL << i;
+
+		if (bit & mask) {
+			mask &= ~bit;
+			n--;
+		}
+	}
+
+	return mask;
+}
+
+static void shut_non_vme_subslices(int drm_fd, uint32_t ctx)
+{
+	struct drm_i915_gem_context_param_sseu sseu = { };
+	struct drm_i915_gem_context_param arg = {
+		.param = I915_CONTEXT_PARAM_SSEU,
+		.ctx_id = ctx,
+		.size = sizeof(sseu),
+		.value = to_user_pointer(&sseu),
+	};
+	int ret;
+
+	if (__gem_context_get_param(drm_fd, &arg))
+		return; /* no sseu support */
+
+	ret = __gem_context_set_param(drm_fd, &arg);
+	igt_assert(ret == 0 || ret == -ENODEV || ret == -EINVAL);
+	if (ret)
+		return; /* no sseu support */
+
+	/* shutdown half subslices*/
+	sseu.subslice_mask =
+		switch_off_n_bits(sseu.subslice_mask,
+				  __builtin_popcount(sseu.subslice_mask) / 2);
+
+	gem_context_set_param(drm_fd, &arg);
+}
+
 igt_simple_main
 {
 	int drm_fd;
@@ -108,6 +154,20 @@  igt_simple_main
 	scratch_buf_init_src(bufmgr, &src);
 	scratch_buf_init_dst(bufmgr, &dst);
 
+	batch->ctx = drm_intel_gem_context_create(bufmgr);
+	igt_assert(batch->ctx);
+
+	/* ICL hangs if non-VME enabled slices are enabled with a VME kernel. */
+	if (intel_gen(devid) == 11) {
+		uint32_t ctx_id;
+		int ret;
+
+		ret = drm_intel_gem_context_get_id(batch->ctx, &ctx_id);
+		igt_assert_eq(ret, 0);
+
+		shut_non_vme_subslices(drm_fd, ctx_id);
+	}
+
 	igt_fork_hang_detector(drm_fd);
 
 	media_vme(batch, &src, WIDTH, HEIGHT, &dst);