Message ID | 20171204172315.25487-3-antonio.argenziano@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Quoting Antonio Argenziano (2017-12-04 17:23:15) > This patch adds a test that will trigger a preemption of a low priority > batch by a 'bad' batch buffer which will hang. The test aims at making > sure that a hanging high priority batch will not disrupt the submission > flow of low priority contexts. > > Cc: Chris Wilson <chris@chris-wilson.co.uk> > Cc: Michal Winiarski <michal.winiarski@intel.com> > Signed-off-by: Antonio Argenziano <antonio.argenziano@intel.com> > --- > tests/gem_exec_schedule.c | 39 +++++++++++++++++++++++++++++++++++++++ > 1 file changed, 39 insertions(+) > > diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c > index ae44a6c0..5a4d63f9 100644 > --- a/tests/gem_exec_schedule.c > +++ b/tests/gem_exec_schedule.c > @@ -511,6 +511,39 @@ static void preempt_self(int fd, unsigned ring) > gem_close(fd, result); > } > > +static void bad_preemptive(int fd, unsigned ring) > +{ > + igt_spin_t *spin[16]; > + igt_spin_t *bad; > + uint32_t ctx[2]; > + > + ctx[LO] = gem_context_create(fd); > + gem_context_set_priority(fd, ctx[LO], MIN_PRIO); > + > + ctx[HI] = gem_context_create(fd); > + gem_context_set_priority(fd, ctx[HI], MAX_PRIO); > + > + for (int n = 0; n < 16; n++) { > + gem_context_destroy(fd, ctx[LO]); > + ctx[LO] = gem_context_create(fd); > + gem_context_set_priority(fd, ctx[LO], MIN_PRIO); > + > + spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0, true); > + igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle); > + } > + > + bad = igt_spin_batch_new(fd, ctx[HI], ring, 0, false); > + gem_wait(fd, bad->handle, NULL); > + > + for (int n = 0; n < 16; n++) { > + igt_assert(gem_bo_busy(fd, spin[n]->handle)); > + igt_spin_batch_free(fd, spin[n]); > + } Is this really policy you want to enforce? I certainly don't intend to mandate that the kernel isn't allowed to use RT throttling. > + > + gem_context_destroy(fd, ctx[LO]); > + gem_context_destroy(fd, ctx[HI]); > +} > + > static void deep(int fd, unsigned ring) > { > #define XS 8 > @@ -1033,6 +1066,12 @@ igt_main > preempt(fd, e->exec_id | e->flags, NEW_CTX | HANG_LP); > igt_fork_hang_detector(fd); > } > + > + igt_subtest_f("bad-preemptive-%s", e->name) { > + igt_stop_hang_detector(); > + bad_preemptive(fd, e->exec_id | e->flags); > + igt_fork_hang_detector(fd); > + } Split into non-hang/hang portions and use igt_subtest_group + igt_fixture. -Chris
On 04/12/17 09:42, Chris Wilson wrote: > Quoting Antonio Argenziano (2017-12-04 17:23:15) >> This patch adds a test that will trigger a preemption of a low priority >> batch by a 'bad' batch buffer which will hang. The test aims at making >> sure that a hanging high priority batch will not disrupt the submission >> flow of low priority contexts. >> >> Cc: Chris Wilson <chris@chris-wilson.co.uk> >> Cc: Michal Winiarski <michal.winiarski@intel.com> >> Signed-off-by: Antonio Argenziano <antonio.argenziano@intel.com> >> --- >> tests/gem_exec_schedule.c | 39 +++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 39 insertions(+) >> >> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c >> index ae44a6c0..5a4d63f9 100644 >> --- a/tests/gem_exec_schedule.c >> +++ b/tests/gem_exec_schedule.c >> @@ -511,6 +511,39 @@ static void preempt_self(int fd, unsigned ring) >> gem_close(fd, result); >> } >> >> +static void bad_preemptive(int fd, unsigned ring) >> +{ >> + igt_spin_t *spin[16]; >> + igt_spin_t *bad; >> + uint32_t ctx[2]; >> + >> + ctx[LO] = gem_context_create(fd); >> + gem_context_set_priority(fd, ctx[LO], MIN_PRIO); >> + >> + ctx[HI] = gem_context_create(fd); >> + gem_context_set_priority(fd, ctx[HI], MAX_PRIO); >> + >> + for (int n = 0; n < 16; n++) { >> + gem_context_destroy(fd, ctx[LO]); >> + ctx[LO] = gem_context_create(fd); >> + gem_context_set_priority(fd, ctx[LO], MIN_PRIO); >> + >> + spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0, true); >> + igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle); >> + } >> + >> + bad = igt_spin_batch_new(fd, ctx[HI], ring, 0, false); >> + gem_wait(fd, bad->handle, NULL); >> + >> + for (int n = 0; n < 16; n++) { >> + igt_assert(gem_bo_busy(fd, spin[n]->handle)); >> + igt_spin_batch_free(fd, spin[n]); >> + } > > Is this really policy you want to enforce? I certainly don't intend to > mandate that the kernel isn't allowed to use RT throttling. Yeah, I wasn't sure about that. Is a gem_wait on each of them enough or should I put a write at the end of each low priority batch to make sure they actually executed? > >> + >> + gem_context_destroy(fd, ctx[LO]); >> + gem_context_destroy(fd, ctx[HI]); >> +} >> + >> static void deep(int fd, unsigned ring) >> { >> #define XS 8 >> @@ -1033,6 +1066,12 @@ igt_main >> preempt(fd, e->exec_id | e->flags, NEW_CTX | HANG_LP); >> igt_fork_hang_detector(fd); >> } >> + >> + igt_subtest_f("bad-preemptive-%s", e->name) { >> + igt_stop_hang_detector(); >> + bad_preemptive(fd, e->exec_id | e->flags); >> + igt_fork_hang_detector(fd); >> + } > > Split into non-hang/hang portions and use igt_subtest_group + > igt_fixture. OK, will do. -Antonio > -Chris >
Quoting Antonio Argenziano (2017-12-04 18:25:16) > > > On 04/12/17 09:42, Chris Wilson wrote: > > Quoting Antonio Argenziano (2017-12-04 17:23:15) > >> This patch adds a test that will trigger a preemption of a low priority > >> batch by a 'bad' batch buffer which will hang. The test aims at making > >> sure that a hanging high priority batch will not disrupt the submission > >> flow of low priority contexts. > >> > >> Cc: Chris Wilson <chris@chris-wilson.co.uk> > >> Cc: Michal Winiarski <michal.winiarski@intel.com> > >> Signed-off-by: Antonio Argenziano <antonio.argenziano@intel.com> > >> --- > >> tests/gem_exec_schedule.c | 39 +++++++++++++++++++++++++++++++++++++++ > >> 1 file changed, 39 insertions(+) > >> > >> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c > >> index ae44a6c0..5a4d63f9 100644 > >> --- a/tests/gem_exec_schedule.c > >> +++ b/tests/gem_exec_schedule.c > >> @@ -511,6 +511,39 @@ static void preempt_self(int fd, unsigned ring) > >> gem_close(fd, result); > >> } > >> > >> +static void bad_preemptive(int fd, unsigned ring) > >> +{ > >> + igt_spin_t *spin[16]; > >> + igt_spin_t *bad; > >> + uint32_t ctx[2]; > >> + > >> + ctx[LO] = gem_context_create(fd); > >> + gem_context_set_priority(fd, ctx[LO], MIN_PRIO); > >> + > >> + ctx[HI] = gem_context_create(fd); > >> + gem_context_set_priority(fd, ctx[HI], MAX_PRIO); > >> + > >> + for (int n = 0; n < 16; n++) { > >> + gem_context_destroy(fd, ctx[LO]); > >> + ctx[LO] = gem_context_create(fd); > >> + gem_context_set_priority(fd, ctx[LO], MIN_PRIO); > >> + > >> + spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0, true); > >> + igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle); > >> + } > >> + > >> + bad = igt_spin_batch_new(fd, ctx[HI], ring, 0, false); > >> + gem_wait(fd, bad->handle, NULL); > >> + > >> + for (int n = 0; n < 16; n++) { > >> + igt_assert(gem_bo_busy(fd, spin[n]->handle)); > >> + igt_spin_batch_free(fd, spin[n]); > >> + } > > > > Is this really policy you want to enforce? I certainly don't intend to > > mandate that the kernel isn't allowed to use RT throttling. > > Yeah, I wasn't sure about that. Is a gem_wait on each of them enough or > should I put a write at the end of each low priority batch to make sure > they actually executed? Just a comment that this is what happens right now, but we may enact a different policy when we have a real scheduler. Part of the trick is remembering which tests are allowed to be broken, because on the whole igt define the behaviour which is expected/relied on by clients. (The converse is also true in that we may scope out the limits of user behaviour in igt and then fix the kernel to suite.) As regards gem_wait(), this here is gem_sync(), and you can neaten up ctx[LO] by scoping the context to the loop. And call this something like preemptive_hang; bad* brings bad memories of tests that were deliberately broken as opposed to demonstrating that the code survives abuse. -Chris
diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c index ae44a6c0..5a4d63f9 100644 --- a/tests/gem_exec_schedule.c +++ b/tests/gem_exec_schedule.c @@ -511,6 +511,39 @@ static void preempt_self(int fd, unsigned ring) gem_close(fd, result); } +static void bad_preemptive(int fd, unsigned ring) +{ + igt_spin_t *spin[16]; + igt_spin_t *bad; + uint32_t ctx[2]; + + ctx[LO] = gem_context_create(fd); + gem_context_set_priority(fd, ctx[LO], MIN_PRIO); + + ctx[HI] = gem_context_create(fd); + gem_context_set_priority(fd, ctx[HI], MAX_PRIO); + + for (int n = 0; n < 16; n++) { + gem_context_destroy(fd, ctx[LO]); + ctx[LO] = gem_context_create(fd); + gem_context_set_priority(fd, ctx[LO], MIN_PRIO); + + spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0, true); + igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle); + } + + bad = igt_spin_batch_new(fd, ctx[HI], ring, 0, false); + gem_wait(fd, bad->handle, NULL); + + for (int n = 0; n < 16; n++) { + igt_assert(gem_bo_busy(fd, spin[n]->handle)); + igt_spin_batch_free(fd, spin[n]); + } + + gem_context_destroy(fd, ctx[LO]); + gem_context_destroy(fd, ctx[HI]); +} + static void deep(int fd, unsigned ring) { #define XS 8 @@ -1033,6 +1066,12 @@ igt_main preempt(fd, e->exec_id | e->flags, NEW_CTX | HANG_LP); igt_fork_hang_detector(fd); } + + igt_subtest_f("bad-preemptive-%s", e->name) { + igt_stop_hang_detector(); + bad_preemptive(fd, e->exec_id | e->flags); + igt_fork_hang_detector(fd); + } } igt_subtest_f("deep-%s", e->name)
This patch adds a test that will trigger a preemption of a low priority batch by a 'bad' batch buffer which will hang. The test aims at making sure that a hanging high priority batch will not disrupt the submission flow of low priority contexts. Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michal Winiarski <michal.winiarski@intel.com> Signed-off-by: Antonio Argenziano <antonio.argenziano@intel.com> --- tests/gem_exec_schedule.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+)