diff mbox series

[2/8] drm/i915/selftests: Add tests for timeslicing virtual engines

Message ID 20200518081440.17948-2-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [1/8] drm/i915: Move saturated workload detection back to the context | expand

Commit Message

Chris Wilson May 18, 2020, 8:14 a.m. UTC
Make sure that we can execute a virtual request on an already busy
engine, and conversely that we can execute a normal request if the
engines are already fully occupied by virtual requests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/selftest_lrc.c | 179 +++++++++++++++++++++++++
 1 file changed, 179 insertions(+)

Comments

Tvrtko Ursulin May 18, 2020, 10:12 a.m. UTC | #1
On 18/05/2020 09:14, Chris Wilson wrote:
> Make sure that we can execute a virtual request on an already busy
> engine, and conversely that we can execute a normal request if the
> engines are already fully occupied by virtual requests.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/gt/selftest_lrc.c | 179 +++++++++++++++++++++++++
>   1 file changed, 179 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index 824f99c4cc7c..1fc54359bd53 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -3766,6 +3766,184 @@ static int live_virtual_mask(void *arg)
>   	return 0;
>   }
>   
> +static int slicein_virtual_engine(struct intel_gt *gt,
> +				  struct intel_engine_cs **siblings,
> +				  unsigned int nsibling)
> +{
> +	struct intel_context *ce;
> +	struct i915_request *rq;
> +	struct igt_spinner spin;
> +	unsigned int n;
> +	int err = 0;
> +
> +	/*
> +	 * Virtual requests must take part in timeslicing on the target engines.
> +	 */
> +
> +	if (igt_spinner_init(&spin, gt))
> +		return -ENOMEM;
> +
> +	for (n = 0; n < nsibling; n++) {
> +		ce = intel_context_create(siblings[n]);
> +		if (IS_ERR(ce)) {
> +			err = PTR_ERR(ce);
> +			goto out;
> +		}
> +
> +		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
> +		intel_context_put(ce);
> +
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto out;
> +		}
> +
> +		i915_request_add(rq);
> +	}
> +
> +	ce = intel_execlists_create_virtual(siblings, nsibling);
> +	if (IS_ERR(ce)) {
> +		err = PTR_ERR(ce);
> +		goto out;
> +	}
> +
> +	rq = intel_context_create_request(ce);
> +	intel_context_put(ce);
> +	if (IS_ERR(rq)) {
> +		err = PTR_ERR(rq);
> +		goto out;
> +	}
> +
> +	i915_request_get(rq);
> +	i915_request_add(rq);
> +	if (i915_request_wait(rq, 0, HZ / 10) < 0) {
> +		GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
> +			      __func__, rq->engine->name);
> +		GEM_TRACE_DUMP();
> +		intel_gt_set_wedged(gt);
> +		err = -EIO;
> +	}
> +	i915_request_put(rq);
> +
> +out:
> +	igt_spinner_end(&spin);
> +	if (igt_flush_test(gt->i915))
> +		err = -EIO;
> +	igt_spinner_fini(&spin);
> +	return err;
> +}
> +
> +static int sliceout_virtual_engine(struct intel_gt *gt,
> +				   struct intel_engine_cs **siblings,
> +				   unsigned int nsibling)
> +{
> +	struct intel_context *ce;
> +	struct i915_request *rq;
> +	struct igt_spinner spin;
> +	unsigned int n;
> +	int err = 0;
> +
> +	/*
> +	 * Virtual requests must allow others a fair timeslice.
> +	 */
> +
> +	if (igt_spinner_init(&spin, gt))
> +		return -ENOMEM;
> +
> +	for (n = 0; n <= nsibling; n++) { /* oversubscribed */
> +		ce = intel_execlists_create_virtual(siblings, nsibling);
> +		if (IS_ERR(ce)) {
> +			err = PTR_ERR(ce);
> +			goto out;
> +		}
> +
> +		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
> +		intel_context_put(ce);
> +
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto out;
> +		}
> +
> +		i915_request_add(rq);
> +	}
> +
> +	for (n = 0; !err && n < nsibling; n++) {
> +		ce = intel_context_create(siblings[n]);
> +		if (IS_ERR(ce)) {
> +			err = PTR_ERR(ce);
> +			goto out;
> +		}
> +
> +		rq = intel_context_create_request(ce);
> +		intel_context_put(ce);
> +
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto out;
> +		}
> +
> +		i915_request_get(rq);
> +		i915_request_add(rq);
> +		if (i915_request_wait(rq, 0, HZ / 10) < 0) {
> +			GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
> +				      __func__, siblings[n]->name);
> +			GEM_TRACE_DUMP();
> +			intel_gt_set_wedged(gt);
> +			err = -EIO;
> +		}
> +		i915_request_put(rq);
> +	}
> +
> +out:
> +	igt_spinner_end(&spin);
> +	if (igt_flush_test(gt->i915))
> +		err = -EIO;
> +	igt_spinner_fini(&spin);
> +	return err;
> +}
> +
> +static int live_virtual_slice(void *arg)
> +{
> +	struct intel_gt *gt = arg;
> +	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
> +	unsigned int class, inst;
> +	int err;
> +
> +	if (intel_uc_uses_guc_submission(&gt->uc))
> +		return 0;

Shouldn't the intel_engine_has_timeslices check below be enough? I am 
worried not to silently skip this seemingly pretty generic too much.

> +
> +	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
> +		unsigned int nsibling;
> +
> +		nsibling = 0;
> +		for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
> +			struct intel_engine_cs *engine;
> +
> +			engine = gt->engine_class[class][inst];
> +			if (!engine)
> +				break;

This should be continue I think, to account for vcs0 + vcs2 on Icelake.

> +
> +			if (!intel_engine_has_timeslices(engine))
> +				continue;
> +
> +			siblings[nsibling++] = engine;
> +		}
> +		if (nsibling < 2)
> +			continue;
> +
> +		err = slicein_virtual_engine(gt, siblings, nsibling);
> +		if (err)
> +			return err;
> +
> +		err = sliceout_virtual_engine(gt, siblings, nsibling);
> +		if (err)
> +			return err;
> +	}
> +
> +	return 0;
> +}
> +
>   static int preserved_virtual_engine(struct intel_gt *gt,
>   				    struct intel_engine_cs **siblings,
>   				    unsigned int nsibling)
> @@ -4329,6 +4507,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
>   		SUBTEST(live_virtual_engine),
>   		SUBTEST(live_virtual_mask),
>   		SUBTEST(live_virtual_preserved),
> +		SUBTEST(live_virtual_slice),
>   		SUBTEST(live_virtual_bond),
>   		SUBTEST(live_virtual_reset),
>   	};
> 

Regards,

Tvrtko
Chris Wilson May 18, 2020, 10:21 a.m. UTC | #2
Quoting Tvrtko Ursulin (2020-05-18 11:12:29)
> 
> On 18/05/2020 09:14, Chris Wilson wrote:
> > Make sure that we can execute a virtual request on an already busy
> > engine, and conversely that we can execute a normal request if the
> > engines are already fully occupied by virtual requests.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >   drivers/gpu/drm/i915/gt/selftest_lrc.c | 179 +++++++++++++++++++++++++
> >   1 file changed, 179 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> > index 824f99c4cc7c..1fc54359bd53 100644
> > --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> > +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> > @@ -3766,6 +3766,184 @@ static int live_virtual_mask(void *arg)
> >       return 0;
> >   }
> >   
> > +static int slicein_virtual_engine(struct intel_gt *gt,
> > +                               struct intel_engine_cs **siblings,
> > +                               unsigned int nsibling)
> > +{
> > +     struct intel_context *ce;
> > +     struct i915_request *rq;
> > +     struct igt_spinner spin;
> > +     unsigned int n;
> > +     int err = 0;
> > +
> > +     /*
> > +      * Virtual requests must take part in timeslicing on the target engines.
> > +      */
> > +
> > +     if (igt_spinner_init(&spin, gt))
> > +             return -ENOMEM;
> > +
> > +     for (n = 0; n < nsibling; n++) {
> > +             ce = intel_context_create(siblings[n]);
> > +             if (IS_ERR(ce)) {
> > +                     err = PTR_ERR(ce);
> > +                     goto out;
> > +             }
> > +
> > +             rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
> > +             intel_context_put(ce);
> > +
> > +             if (IS_ERR(rq)) {
> > +                     err = PTR_ERR(rq);
> > +                     goto out;
> > +             }
> > +
> > +             i915_request_add(rq);
> > +     }
> > +
> > +     ce = intel_execlists_create_virtual(siblings, nsibling);
> > +     if (IS_ERR(ce)) {
> > +             err = PTR_ERR(ce);
> > +             goto out;
> > +     }
> > +
> > +     rq = intel_context_create_request(ce);
> > +     intel_context_put(ce);
> > +     if (IS_ERR(rq)) {
> > +             err = PTR_ERR(rq);
> > +             goto out;
> > +     }
> > +
> > +     i915_request_get(rq);
> > +     i915_request_add(rq);
> > +     if (i915_request_wait(rq, 0, HZ / 10) < 0) {
> > +             GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
> > +                           __func__, rq->engine->name);
> > +             GEM_TRACE_DUMP();
> > +             intel_gt_set_wedged(gt);
> > +             err = -EIO;
> > +     }
> > +     i915_request_put(rq);
> > +
> > +out:
> > +     igt_spinner_end(&spin);
> > +     if (igt_flush_test(gt->i915))
> > +             err = -EIO;
> > +     igt_spinner_fini(&spin);
> > +     return err;
> > +}
> > +
> > +static int sliceout_virtual_engine(struct intel_gt *gt,
> > +                                struct intel_engine_cs **siblings,
> > +                                unsigned int nsibling)
> > +{
> > +     struct intel_context *ce;
> > +     struct i915_request *rq;
> > +     struct igt_spinner spin;
> > +     unsigned int n;
> > +     int err = 0;
> > +
> > +     /*
> > +      * Virtual requests must allow others a fair timeslice.
> > +      */
> > +
> > +     if (igt_spinner_init(&spin, gt))
> > +             return -ENOMEM;
> > +
> > +     for (n = 0; n <= nsibling; n++) { /* oversubscribed */
> > +             ce = intel_execlists_create_virtual(siblings, nsibling);
> > +             if (IS_ERR(ce)) {
> > +                     err = PTR_ERR(ce);
> > +                     goto out;
> > +             }
> > +
> > +             rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
> > +             intel_context_put(ce);
> > +
> > +             if (IS_ERR(rq)) {
> > +                     err = PTR_ERR(rq);
> > +                     goto out;
> > +             }
> > +
> > +             i915_request_add(rq);
> > +     }
> > +
> > +     for (n = 0; !err && n < nsibling; n++) {
> > +             ce = intel_context_create(siblings[n]);
> > +             if (IS_ERR(ce)) {
> > +                     err = PTR_ERR(ce);
> > +                     goto out;
> > +             }
> > +
> > +             rq = intel_context_create_request(ce);
> > +             intel_context_put(ce);
> > +
> > +             if (IS_ERR(rq)) {
> > +                     err = PTR_ERR(rq);
> > +                     goto out;
> > +             }
> > +
> > +             i915_request_get(rq);
> > +             i915_request_add(rq);
> > +             if (i915_request_wait(rq, 0, HZ / 10) < 0) {
> > +                     GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
> > +                                   __func__, siblings[n]->name);
> > +                     GEM_TRACE_DUMP();
> > +                     intel_gt_set_wedged(gt);
> > +                     err = -EIO;
> > +             }
> > +             i915_request_put(rq);
> > +     }
> > +
> > +out:
> > +     igt_spinner_end(&spin);
> > +     if (igt_flush_test(gt->i915))
> > +             err = -EIO;
> > +     igt_spinner_fini(&spin);
> > +     return err;
> > +}
> > +
> > +static int live_virtual_slice(void *arg)
> > +{
> > +     struct intel_gt *gt = arg;
> > +     struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
> > +     unsigned int class, inst;
> > +     int err;
> > +
> > +     if (intel_uc_uses_guc_submission(&gt->uc))
> > +             return 0;
> 
> Shouldn't the intel_engine_has_timeslices check below be enough? I am 
> worried not to silently skip this seemingly pretty generic too much.

I haven't looked too hard which of these are execlists specific. I have
a plan somewhere to recreate a bunch of these as functional tests for
the i915_request layer.

That would give us a rough progression like

low level live_execlists_selftests
-> mid level live_request_selftests
 -> uapi gem_exec_scheduler at al

> > +
> > +     for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
> > +             unsigned int nsibling;
> > +
> > +             nsibling = 0;
> > +             for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
> > +                     struct intel_engine_cs *engine;
> > +
> > +                     engine = gt->engine_class[class][inst];
> > +                     if (!engine)
> > +                             break;
> 
> This should be continue I think, to account for vcs0 + vcs2 on Icelake.

Oh, they all break atm iirc. Ah, no just the one I copied. Do I hear
the plea for refactoring the duplicated code :)
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 824f99c4cc7c..1fc54359bd53 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -3766,6 +3766,184 @@  static int live_virtual_mask(void *arg)
 	return 0;
 }
 
+static int slicein_virtual_engine(struct intel_gt *gt,
+				  struct intel_engine_cs **siblings,
+				  unsigned int nsibling)
+{
+	struct intel_context *ce;
+	struct i915_request *rq;
+	struct igt_spinner spin;
+	unsigned int n;
+	int err = 0;
+
+	/*
+	 * Virtual requests must take part in timeslicing on the target engines.
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for (n = 0; n < nsibling; n++) {
+		ce = intel_context_create(siblings[n]);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		intel_context_put(ce);
+
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+
+		i915_request_add(rq);
+	}
+
+	ce = intel_execlists_create_virtual(siblings, nsibling);
+	if (IS_ERR(ce)) {
+		err = PTR_ERR(ce);
+		goto out;
+	}
+
+	rq = intel_context_create_request(ce);
+	intel_context_put(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out;
+	}
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+		GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+			      __func__, rq->engine->name);
+		GEM_TRACE_DUMP();
+		intel_gt_set_wedged(gt);
+		err = -EIO;
+	}
+	i915_request_put(rq);
+
+out:
+	igt_spinner_end(&spin);
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+				   struct intel_engine_cs **siblings,
+				   unsigned int nsibling)
+{
+	struct intel_context *ce;
+	struct i915_request *rq;
+	struct igt_spinner spin;
+	unsigned int n;
+	int err = 0;
+
+	/*
+	 * Virtual requests must allow others a fair timeslice.
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for (n = 0; n <= nsibling; n++) { /* oversubscribed */
+		ce = intel_execlists_create_virtual(siblings, nsibling);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		intel_context_put(ce);
+
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+
+		i915_request_add(rq);
+	}
+
+	for (n = 0; !err && n < nsibling; n++) {
+		ce = intel_context_create(siblings[n]);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		rq = intel_context_create_request(ce);
+		intel_context_put(ce);
+
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+		if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+			GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+				      __func__, siblings[n]->name);
+			GEM_TRACE_DUMP();
+			intel_gt_set_wedged(gt);
+			err = -EIO;
+		}
+		i915_request_put(rq);
+	}
+
+out:
+	igt_spinner_end(&spin);
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	unsigned int class, inst;
+	int err;
+
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		return 0;
+
+	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+		unsigned int nsibling;
+
+		nsibling = 0;
+		for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+			struct intel_engine_cs *engine;
+
+			engine = gt->engine_class[class][inst];
+			if (!engine)
+				break;
+
+			if (!intel_engine_has_timeslices(engine))
+				continue;
+
+			siblings[nsibling++] = engine;
+		}
+		if (nsibling < 2)
+			continue;
+
+		err = slicein_virtual_engine(gt, siblings, nsibling);
+		if (err)
+			return err;
+
+		err = sliceout_virtual_engine(gt, siblings, nsibling);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int preserved_virtual_engine(struct intel_gt *gt,
 				    struct intel_engine_cs **siblings,
 				    unsigned int nsibling)
@@ -4329,6 +4507,7 @@  int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_virtual_engine),
 		SUBTEST(live_virtual_mask),
 		SUBTEST(live_virtual_preserved),
+		SUBTEST(live_virtual_slice),
 		SUBTEST(live_virtual_bond),
 		SUBTEST(live_virtual_reset),
 	};