Message ID | 1435144388.15203.7.camel@jlahtine-mobl1 (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
This patch is for I-G-T, sorry for mishap, generated in remote machine. On ke, 2015-06-24 at 14:13 +0300, Joonas Lahtinen wrote: > Add a new test which executes dummy workloads on the GPU. Use same > method as with gem_concurrent_{blit,all} not to interfere with > standard command line parsing. > > Also adds {render,blt}-forked subtests to gem_ringfill. > > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> > Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> > --- > tests/Makefile.sources | 1 + > tests/gem_ringfill.c | 78 +++++++++++++++++++++++++++++++++++++++++------ > tests/gem_ringfill_noop.c | 2 ++ > 3 files changed, 72 insertions(+), 9 deletions(-) > create mode 100644 tests/gem_ringfill_noop.c > > diff --git a/tests/Makefile.sources b/tests/Makefile.sources > index 15d8382..7c2eec7 100644 > --- a/tests/Makefile.sources > +++ b/tests/Makefile.sources > @@ -53,6 +53,7 @@ TESTS_progs_M = \ > gem_render_copy_redux \ > gem_reset_stats \ > gem_ringfill \ > + gem_ringfill_noop \ > gem_set_tiling_vs_blt \ > gem_storedw_batches_loop \ > gem_streaming_writes \ > diff --git a/tests/gem_ringfill.c b/tests/gem_ringfill.c > index 85b01ea..ac823ba 100644 > --- a/tests/gem_ringfill.c > +++ b/tests/gem_ringfill.c > @@ -54,6 +54,8 @@ struct bo { > drm_intel_bo *src, *dst, *tmp; > }; > > +static bool opt_noop = false; > + > static const int width = 512, height = 512; > > static void create_bo(drm_intel_bufmgr *bufmgr, > @@ -75,11 +77,11 @@ static void create_bo(drm_intel_bufmgr *bufmgr, > map[i] = i; > drm_intel_bo_unmap(b->src); > > - /* Fill the dst with garbage. */ > + /* Fill the dst with another pattern. */ > drm_intel_bo_map(b->dst, true); > map = b->dst->virtual; > for (i = 0; i < width * height; i++) > - map[i] = 0xd0d0d0d0; > + map[i] = opt_noop ? 0xd0d0d0d0 ^ i : 0xd0d0d0d0; > drm_intel_bo_unmap(b->dst); > } > > @@ -91,12 +93,13 @@ static int check_bo(struct bo *b) > drm_intel_bo_map(b->dst, false); > map = b->dst->virtual; > for (i = 0; i < width*height; i++) { > - if (map[i] != i && ++fails <= 9) { > + uint32_t pattern = opt_noop ? 0xd0d0d0d0 ^ i : i; > + if (map[i] != pattern && ++fails <= 9) { > int x = i % width; > int y = i / width; > > - igt_info("%s: copy #%d at %d,%d failed: read 0x%08x\n", > - b->ring, i, x, y, map[i]); > + igt_info("%s: copy #%d at %d,%d failed: read 0x%08x (expected 0x%08x)\n", > + b->ring, i, x, y, map[i], pattern); > } > } > drm_intel_bo_unmap(b->dst); > @@ -193,6 +196,38 @@ static void blt_copy(struct intel_batchbuffer *batch, > intel_batchbuffer_flush(batch); > } > > +static void fill_noop(struct intel_batchbuffer *batch) > +{ > + /* Fill batch with NOOP. */ > + int nr_noop = (BATCH_SZ - BATCH_RESERVED)/4; > + int i; > + > + BEGIN_BATCH(nr_noop, 0); > + for (i = 0; i < nr_noop; ++i) > + OUT_BATCH(MI_NOOP); > + ADVANCE_BATCH(); > +} > + > +static void blt_noop(struct intel_batchbuffer *batch, > + drm_intel_context *context, > + struct igt_buf *src, unsigned src_x, unsigned src_y, > + unsigned w, unsigned h, > + struct igt_buf *dst, unsigned dst_x, unsigned dst_y) > +{ > + fill_noop(batch); > + intel_batchbuffer_flush_on_ring(batch, I915_EXEC_RENDER); > +} > + > +static void render_noop(struct intel_batchbuffer *batch, > + drm_intel_context *context, > + struct igt_buf *src, unsigned src_x, unsigned src_y, > + unsigned w, unsigned h, > + struct igt_buf *dst, unsigned dst_x, unsigned dst_y) > +{ > + fill_noop(batch); > + intel_batchbuffer_flush_on_ring(batch, I915_EXEC_BLT); > +} > + > drm_intel_bufmgr *bufmgr; > struct intel_batchbuffer *batch; > int fd; > @@ -201,6 +236,9 @@ igt_main > { > igt_skip_on_simulation(); > > + if (strstr(igt_test_name(), "noop")) > + opt_noop = true; > + > igt_fixture { > fd = drm_open_any(); > > @@ -210,7 +248,7 @@ igt_main > } > > igt_subtest("blitter") > - check_ring(bufmgr, batch, "blt", blt_copy); > + check_ring(bufmgr, batch, "blt", opt_noop ? blt_noop : blt_copy); > > /* Strictly only required on architectures with a separate BLT ring, > * but lets stress everybody. > @@ -221,12 +259,12 @@ igt_main > copy = igt_get_render_copyfunc(batch->devid); > igt_require(copy); > > - check_ring(bufmgr, batch, "render", copy); > + check_ring(bufmgr, batch, "render", opt_noop ? render_noop : copy); > } > > igt_fork_signal_helper(); > igt_subtest("blitter-interruptible") > - check_ring(bufmgr, batch, "blt", blt_copy); > + check_ring(bufmgr, batch, "blt", opt_noop ? blt_noop : blt_copy); > > /* Strictly only required on architectures with a separate BLT ring, > * but lets stress everybody. > @@ -237,10 +275,32 @@ igt_main > copy = igt_get_render_copyfunc(batch->devid); > igt_require(copy); > > - check_ring(bufmgr, batch, "render", copy); > + check_ring(bufmgr, batch, "render", opt_noop ? render_noop : copy); > } > igt_stop_signal_helper(); > > + igt_subtest("blitter-forked") { > + igt_fork(child, 1) { > + check_ring(bufmgr, batch, "blt", opt_noop ? blt_noop : blt_copy); > + } > + igt_waitchildren(); > + } > + > + /* Strictly only required on architectures with a separate BLT ring, > + * but lets stress everybody. > + */ > + igt_subtest("render-forked") { > + igt_render_copyfunc_t copy; > + > + copy = igt_get_render_copyfunc(batch->devid); > + igt_require(copy); > + > + igt_fork(child, 1) { > + check_ring(bufmgr, batch, "render", opt_noop ? render_noop : copy); > + } > + igt_waitchildren(); > + } > + > igt_fixture { > intel_batchbuffer_free(batch); > drm_intel_bufmgr_destroy(bufmgr); > diff --git a/tests/gem_ringfill_noop.c b/tests/gem_ringfill_noop.c > new file mode 100644 > index 0000000..1a4356a > --- /dev/null > +++ b/tests/gem_ringfill_noop.c > @@ -0,0 +1,2 @@ > +/* See gem_concurrent_blit.c for explanation. */ > +#include "gem_ringfill.c"
On Wed, Jun 24, 2015 at 02:13:08PM +0300, Joonas Lahtinen wrote: > Add a new test which executes dummy workloads on the GPU. Use same > method as with gem_concurrent_{blit,all} not to interfere with > standard command line parsing. Planning to add a combinatorial explosion? If not just add the few extra subtests to gem_ringfill.c, I only split gem_concurrent because people didn't like the idea of spending days to find kernel bugs. Please can you make this a loop over rings + test params for the benefit of the reader. -Chris
diff --git a/tests/Makefile.sources b/tests/Makefile.sources index 15d8382..7c2eec7 100644 --- a/tests/Makefile.sources +++ b/tests/Makefile.sources @@ -53,6 +53,7 @@ TESTS_progs_M = \ gem_render_copy_redux \ gem_reset_stats \ gem_ringfill \ + gem_ringfill_noop \ gem_set_tiling_vs_blt \ gem_storedw_batches_loop \ gem_streaming_writes \ diff --git a/tests/gem_ringfill.c b/tests/gem_ringfill.c index 85b01ea..ac823ba 100644 --- a/tests/gem_ringfill.c +++ b/tests/gem_ringfill.c @@ -54,6 +54,8 @@ struct bo { drm_intel_bo *src, *dst, *tmp; }; +static bool opt_noop = false; + static const int width = 512, height = 512; static void create_bo(drm_intel_bufmgr *bufmgr, @@ -75,11 +77,11 @@ static void create_bo(drm_intel_bufmgr *bufmgr, map[i] = i; drm_intel_bo_unmap(b->src); - /* Fill the dst with garbage. */ + /* Fill the dst with another pattern. */ drm_intel_bo_map(b->dst, true); map = b->dst->virtual; for (i = 0; i < width * height; i++) - map[i] = 0xd0d0d0d0; + map[i] = opt_noop ? 0xd0d0d0d0 ^ i : 0xd0d0d0d0; drm_intel_bo_unmap(b->dst); } @@ -91,12 +93,13 @@ static int check_bo(struct bo *b) drm_intel_bo_map(b->dst, false); map = b->dst->virtual; for (i = 0; i < width*height; i++) { - if (map[i] != i && ++fails <= 9) { + uint32_t pattern = opt_noop ? 0xd0d0d0d0 ^ i : i; + if (map[i] != pattern && ++fails <= 9) { int x = i % width; int y = i / width; - igt_info("%s: copy #%d at %d,%d failed: read 0x%08x\n", - b->ring, i, x, y, map[i]); + igt_info("%s: copy #%d at %d,%d failed: read 0x%08x (expected 0x%08x)\n", + b->ring, i, x, y, map[i], pattern); } } drm_intel_bo_unmap(b->dst); @@ -193,6 +196,38 @@ static void blt_copy(struct intel_batchbuffer *batch, intel_batchbuffer_flush(batch); } +static void fill_noop(struct intel_batchbuffer *batch) +{ + /* Fill batch with NOOP. */ + int nr_noop = (BATCH_SZ - BATCH_RESERVED)/4; + int i; + + BEGIN_BATCH(nr_noop, 0); + for (i = 0; i < nr_noop; ++i) + OUT_BATCH(MI_NOOP); + ADVANCE_BATCH(); +} + +static void blt_noop(struct intel_batchbuffer *batch, + drm_intel_context *context, + struct igt_buf *src, unsigned src_x, unsigned src_y, + unsigned w, unsigned h, + struct igt_buf *dst, unsigned dst_x, unsigned dst_y) +{ + fill_noop(batch); + intel_batchbuffer_flush_on_ring(batch, I915_EXEC_RENDER); +} + +static void render_noop(struct intel_batchbuffer *batch, + drm_intel_context *context, + struct igt_buf *src, unsigned src_x, unsigned src_y, + unsigned w, unsigned h, + struct igt_buf *dst, unsigned dst_x, unsigned dst_y) +{ + fill_noop(batch); + intel_batchbuffer_flush_on_ring(batch, I915_EXEC_BLT); +} + drm_intel_bufmgr *bufmgr; struct intel_batchbuffer *batch; int fd; @@ -201,6 +236,9 @@ igt_main { igt_skip_on_simulation(); + if (strstr(igt_test_name(), "noop")) + opt_noop = true; + igt_fixture { fd = drm_open_any(); @@ -210,7 +248,7 @@ igt_main } igt_subtest("blitter") - check_ring(bufmgr, batch, "blt", blt_copy); + check_ring(bufmgr, batch, "blt", opt_noop ? blt_noop : blt_copy); /* Strictly only required on architectures with a separate BLT ring, * but lets stress everybody. @@ -221,12 +259,12 @@ igt_main copy = igt_get_render_copyfunc(batch->devid); igt_require(copy); - check_ring(bufmgr, batch, "render", copy); + check_ring(bufmgr, batch, "render", opt_noop ? render_noop : copy); } igt_fork_signal_helper(); igt_subtest("blitter-interruptible") - check_ring(bufmgr, batch, "blt", blt_copy); + check_ring(bufmgr, batch, "blt", opt_noop ? blt_noop : blt_copy); /* Strictly only required on architectures with a separate BLT ring, * but lets stress everybody. @@ -237,10 +275,32 @@ igt_main copy = igt_get_render_copyfunc(batch->devid); igt_require(copy); - check_ring(bufmgr, batch, "render", copy); + check_ring(bufmgr, batch, "render", opt_noop ? render_noop : copy); } igt_stop_signal_helper(); + igt_subtest("blitter-forked") { + igt_fork(child, 1) { + check_ring(bufmgr, batch, "blt", opt_noop ? blt_noop : blt_copy); + } + igt_waitchildren(); + } + + /* Strictly only required on architectures with a separate BLT ring, + * but lets stress everybody. + */ + igt_subtest("render-forked") { + igt_render_copyfunc_t copy; + + copy = igt_get_render_copyfunc(batch->devid); + igt_require(copy); + + igt_fork(child, 1) { + check_ring(bufmgr, batch, "render", opt_noop ? render_noop : copy); + } + igt_waitchildren(); + } + igt_fixture { intel_batchbuffer_free(batch); drm_intel_bufmgr_destroy(bufmgr); diff --git a/tests/gem_ringfill_noop.c b/tests/gem_ringfill_noop.c new file mode 100644 index 0000000..1a4356a --- /dev/null +++ b/tests/gem_ringfill_noop.c @@ -0,0 +1,2 @@ +/* See gem_concurrent_blit.c for explanation. */ +#include "gem_ringfill.c"
Add a new test which executes dummy workloads on the GPU. Use same method as with gem_concurrent_{blit,all} not to interfere with standard command line parsing. Also adds {render,blt}-forked subtests to gem_ringfill. Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> --- tests/Makefile.sources | 1 + tests/gem_ringfill.c | 78 +++++++++++++++++++++++++++++++++++++++++------ tests/gem_ringfill_noop.c | 2 ++ 3 files changed, 72 insertions(+), 9 deletions(-) create mode 100644 tests/gem_ringfill_noop.c