Message ID | 20171219154543.10648-1-tvrtko.ursulin@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Quoting Tvrtko Ursulin (2017-12-19 15:45:41) > From: Tvrtko Ursulin <tvrtko.ursulin@intel.com> > > Support creating spin batches which return an output fence using new > __igt_spin_batch_new_fence / igt_spin_batch_new_fence API. > > This will be used fromthe perf_pmu@interrupts test to ensure user > interrupt generation from a batch with controlled duration. > > Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> > --- > lib/igt_dummyload.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++------- > lib/igt_dummyload.h | 10 +++++++++ > 2 files changed, 67 insertions(+), 8 deletions(-) > > diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c > index d19b4e5ea3d2..ef08ad580246 100644 > --- a/lib/igt_dummyload.c > +++ b/lib/igt_dummyload.c > @@ -70,9 +70,9 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc, > reloc->write_domain = write_domains; > } > > -static void emit_recursive_batch(igt_spin_t *spin, > - int fd, uint32_t ctx, unsigned engine, > - uint32_t dep) > +static int emit_recursive_batch(igt_spin_t *spin, > + int fd, uint32_t ctx, unsigned engine, > + uint32_t dep, bool out_fence) > { > #define SCRATCH 0 > #define BATCH 1 > @@ -87,6 +87,7 @@ static void emit_recursive_batch(igt_spin_t *spin, > > nengine = 0; > if (engine == -1) { > + igt_assert_eq(out_fence, false); Didn't fancy merging the fences together to return a composite out_fence? > for_each_engine(fd, engine) > if (engine) > engines[nengine++] = engine; > @@ -165,22 +166,31 @@ static void emit_recursive_batch(igt_spin_t *spin, > execbuf.buffers_ptr = to_user_pointer(obj + (2 - execbuf.buffer_count)); > execbuf.rsvd1 = ctx; > > + if (out_fence) > + execbuf.flags = I915_EXEC_FENCE_OUT; if (out_fence) execbuf.flags |= I915_EXEC_FENCE_OUT; Just to make future changes easier? Might also be good to insert a igt_require(gem_has_exec_fence(fd)) here as well. (Or earlier?) > +igt_spin_t *__igt_spin_batch_new_fence(int fd, > + uint32_t ctx, > + unsigned engine); > + > +igt_spin_t *igt_spin_batch_new_fence(int fd, > + uint32_t ctx, > + unsigned engine); Ok for now, I expect these will mangled into a new spin-batch factory later on. With an igt_require(), Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> If you want to merge the N engines' out_fences into one, that would save me a task. -Chris
On 19/12/2017 22:42, Chris Wilson wrote: > Quoting Tvrtko Ursulin (2017-12-19 15:45:41) >> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com> >> >> Support creating spin batches which return an output fence using new >> __igt_spin_batch_new_fence / igt_spin_batch_new_fence API. >> >> This will be used fromthe perf_pmu@interrupts test to ensure user >> interrupt generation from a batch with controlled duration. >> >> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> >> --- >> lib/igt_dummyload.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++------- >> lib/igt_dummyload.h | 10 +++++++++ >> 2 files changed, 67 insertions(+), 8 deletions(-) >> >> diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c >> index d19b4e5ea3d2..ef08ad580246 100644 >> --- a/lib/igt_dummyload.c >> +++ b/lib/igt_dummyload.c >> @@ -70,9 +70,9 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc, >> reloc->write_domain = write_domains; >> } >> >> -static void emit_recursive_batch(igt_spin_t *spin, >> - int fd, uint32_t ctx, unsigned engine, >> - uint32_t dep) >> +static int emit_recursive_batch(igt_spin_t *spin, >> + int fd, uint32_t ctx, unsigned engine, >> + uint32_t dep, bool out_fence) >> { >> #define SCRATCH 0 >> #define BATCH 1 >> @@ -87,6 +87,7 @@ static void emit_recursive_batch(igt_spin_t *spin, >> >> nengine = 0; >> if (engine == -1) { >> + igt_assert_eq(out_fence, false); > > Didn't fancy merging the fences together to return a composite out_fence? No, just did not think of it. Will do that. > >> for_each_engine(fd, engine) >> if (engine) >> engines[nengine++] = engine; >> @@ -165,22 +166,31 @@ static void emit_recursive_batch(igt_spin_t *spin, >> execbuf.buffers_ptr = to_user_pointer(obj + (2 - execbuf.buffer_count)); >> execbuf.rsvd1 = ctx; >> >> + if (out_fence) >> + execbuf.flags = I915_EXEC_FENCE_OUT; > > if (out_fence) > execbuf.flags |= I915_EXEC_FENCE_OUT; > > Just to make future changes easier? > > Might also be good to insert a igt_require(gem_has_exec_fence(fd)) here > as well. (Or earlier?) Ack. >> +igt_spin_t *__igt_spin_batch_new_fence(int fd, >> + uint32_t ctx, >> + unsigned engine); >> + >> +igt_spin_t *igt_spin_batch_new_fence(int fd, >> + uint32_t ctx, >> + unsigned engine); > > Ok for now, I expect these will mangled into a new spin-batch factory > later on. Yeah, I was thinking whether a more generic constructor would be better, but then decided against sprinkling changes all over the place. > With an igt_require(), > Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> > > If you want to merge the N engines' out_fences into one, that would save > me a task. Thanks, will do. Regards, Tvrtko
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c index d19b4e5ea3d2..ef08ad580246 100644 --- a/lib/igt_dummyload.c +++ b/lib/igt_dummyload.c @@ -70,9 +70,9 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc, reloc->write_domain = write_domains; } -static void emit_recursive_batch(igt_spin_t *spin, - int fd, uint32_t ctx, unsigned engine, - uint32_t dep) +static int emit_recursive_batch(igt_spin_t *spin, + int fd, uint32_t ctx, unsigned engine, + uint32_t dep, bool out_fence) { #define SCRATCH 0 #define BATCH 1 @@ -87,6 +87,7 @@ static void emit_recursive_batch(igt_spin_t *spin, nengine = 0; if (engine == -1) { + igt_assert_eq(out_fence, false); for_each_engine(fd, engine) if (engine) engines[nengine++] = engine; @@ -165,22 +166,31 @@ static void emit_recursive_batch(igt_spin_t *spin, execbuf.buffers_ptr = to_user_pointer(obj + (2 - execbuf.buffer_count)); execbuf.rsvd1 = ctx; + if (out_fence) + execbuf.flags = I915_EXEC_FENCE_OUT; + else + execbuf.flags = 0; + for (i = 0; i < nengine; i++) { execbuf.flags &= ~ENGINE_MASK; - execbuf.flags = engines[i]; - gem_execbuf(fd, &execbuf); + execbuf.flags |= engines[i]; + gem_execbuf_wr(fd, &execbuf); } + + return out_fence ? (execbuf.rsvd2 >> 32) : -1; } -igt_spin_t * -__igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep) +static igt_spin_t * +___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep, + int out_fence) { igt_spin_t *spin; spin = calloc(1, sizeof(struct igt_spin)); igt_assert(spin); - emit_recursive_batch(spin, fd, ctx, engine, dep); + spin->out_fence = emit_recursive_batch(spin, fd, ctx, engine, dep, + out_fence); igt_assert(gem_bo_busy(fd, spin->handle)); pthread_mutex_lock(&list_lock); @@ -190,6 +200,12 @@ __igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep) return spin; } +igt_spin_t * +__igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep) +{ + return ___igt_spin_batch_new(fd, ctx, engine, dep, false); +} + /** * igt_spin_batch_new: * @fd: open i915 drm file descriptor @@ -213,6 +229,35 @@ igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep) return __igt_spin_batch_new(fd, ctx, engine, dep); } +igt_spin_t * +__igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine) +{ + return ___igt_spin_batch_new(fd, ctx, engine, 0, true); +} + +/** + * igt_spin_batch_new_fence: + * @fd: open i915 drm file descriptor + * @engine: Ring to execute batch OR'd with execbuf flags. If value is less + * than 0, execute on all available rings. + * + * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that + * contains the batch's handle that can be waited upon. The returned structure + * must be passed to igt_spin_batch_free() for post-processing. + * + * igt_spin_t will contain an output fence associtated with this batch. + * + * Returns: + * Structure with helper internal state for igt_spin_batch_free(). + */ +igt_spin_t * +igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine) +{ + igt_require_gem(fd); + + return __igt_spin_batch_new_fence(fd, ctx, engine); +} + static void notify(union sigval arg) { igt_spin_t *spin = arg.sival_ptr; @@ -295,6 +340,10 @@ void igt_spin_batch_free(int fd, igt_spin_t *spin) gem_munmap(spin->batch, BATCH_SIZE); gem_close(fd, spin->handle); + + if (spin->out_fence >= 0) + close(spin->out_fence); + free(spin); } diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h index 215425f7c6c0..ffa7e351dea3 100644 --- a/lib/igt_dummyload.h +++ b/lib/igt_dummyload.h @@ -35,6 +35,7 @@ typedef struct igt_spin { timer_t timer; struct igt_list link; uint32_t *batch; + int out_fence; } igt_spin_t; igt_spin_t *__igt_spin_batch_new(int fd, @@ -45,6 +46,15 @@ igt_spin_t *igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep); + +igt_spin_t *__igt_spin_batch_new_fence(int fd, + uint32_t ctx, + unsigned engine); + +igt_spin_t *igt_spin_batch_new_fence(int fd, + uint32_t ctx, + unsigned engine); + void igt_spin_batch_set_timeout(igt_spin_t *spin, int64_t ns); void igt_spin_batch_end(igt_spin_t *spin); void igt_spin_batch_free(int fd, igt_spin_t *spin);