Message ID | 20180919195544.1511-13-chris@chris-wilson.co.uk (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/40] drm: Use default dma_fence hooks where possible for null syncobj | expand |
On 19/09/2018 20:55, Chris Wilson wrote: > In the next few patches, we will want to give a small priority boost to > some requests/queues but not so much that we perturb the user controlled > order. As such we shift the user priority bits higher leaving ourselves Prepare for shifting actually. Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Regards, Tvrtko > a few low priority bits for our bumping. > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > --- > drivers/gpu/drm/i915/i915_drv.h | 2 +- > drivers/gpu/drm/i915/i915_gem_context.c | 9 +++++---- > drivers/gpu/drm/i915/i915_scheduler.h | 6 ++++++ > drivers/gpu/drm/i915/selftests/intel_lrc.c | 8 +++++--- > 4 files changed, 17 insertions(+), 8 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index 7d4daa7412f1..b902bb96e0be 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -3230,7 +3230,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj, > int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, > unsigned int flags, > const struct i915_sched_attr *attr); > -#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX > +#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) > > int __must_check > i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c > index f772593b99ab..150d7a6b2bd3 100644 > --- a/drivers/gpu/drm/i915/i915_gem_context.c > +++ b/drivers/gpu/drm/i915/i915_gem_context.c > @@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, > kref_init(&ctx->ref); > list_add_tail(&ctx->link, &dev_priv->contexts.list); > ctx->i915 = dev_priv; > - ctx->sched.priority = I915_PRIORITY_NORMAL; > + ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); > > for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { > struct intel_context *ce = &ctx->__engine[n]; > @@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) > } > > i915_gem_context_clear_bannable(ctx); > - ctx->sched.priority = prio; > + ctx->sched.priority = I915_USER_PRIORITY(prio); > ctx->ring_size = PAGE_SIZE; > > GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); > @@ -879,7 +879,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, > args->value = i915_gem_context_is_bannable(ctx); > break; > case I915_CONTEXT_PARAM_PRIORITY: > - args->value = ctx->sched.priority; > + args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; > break; > default: > ret = -EINVAL; > @@ -948,7 +948,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, > !capable(CAP_SYS_NICE)) > ret = -EPERM; > else > - ctx->sched.priority = priority; > + ctx->sched.priority = > + I915_USER_PRIORITY(priority); > } > break; > > diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h > index 70a42220358d..7edfad0abfd7 100644 > --- a/drivers/gpu/drm/i915/i915_scheduler.h > +++ b/drivers/gpu/drm/i915/i915_scheduler.h > @@ -19,6 +19,12 @@ enum { > I915_PRIORITY_INVALID = INT_MIN > }; > > +#define I915_USER_PRIORITY_SHIFT 0 > +#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT) > + > +#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT) > +#define I915_PRIORITY_MASK (-I915_PRIORITY_COUNT) > + > struct i915_sched_attr { > /** > * @priority: execution and service priority > diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c > index 1aea7a8f2224..94ceb5f6c507 100644 > --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c > +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c > @@ -291,12 +291,14 @@ static int live_preempt(void *arg) > ctx_hi = kernel_context(i915); > if (!ctx_hi) > goto err_spin_lo; > - ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; > + ctx_hi->sched.priority = > + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); > > ctx_lo = kernel_context(i915); > if (!ctx_lo) > goto err_ctx_hi; > - ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; > + ctx_lo->sched.priority = > + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); > > for_each_engine(engine, i915, id) { > struct i915_request *rq; > @@ -417,7 +419,7 @@ static int live_late_preempt(void *arg) > goto err_wedged; > } > > - attr.priority = I915_PRIORITY_MAX; > + attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); > engine->schedule(rq, &attr); > > if (!wait_for_spinner(&spin_hi, rq)) { >
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7d4daa7412f1..b902bb96e0be 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3230,7 +3230,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj, int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr); -#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX +#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) int __must_check i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index f772593b99ab..150d7a6b2bd3 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv, kref_init(&ctx->ref); list_add_tail(&ctx->link, &dev_priv->contexts.list); ctx->i915 = dev_priv; - ctx->sched.priority = I915_PRIORITY_NORMAL; + ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) { struct intel_context *ce = &ctx->__engine[n]; @@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) } i915_gem_context_clear_bannable(ctx); - ctx->sched.priority = prio; + ctx->sched.priority = I915_USER_PRIORITY(prio); ctx->ring_size = PAGE_SIZE; GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); @@ -879,7 +879,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, args->value = i915_gem_context_is_bannable(ctx); break; case I915_CONTEXT_PARAM_PRIORITY: - args->value = ctx->sched.priority; + args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; break; default: ret = -EINVAL; @@ -948,7 +948,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, !capable(CAP_SYS_NICE)) ret = -EPERM; else - ctx->sched.priority = priority; + ctx->sched.priority = + I915_USER_PRIORITY(priority); } break; diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index 70a42220358d..7edfad0abfd7 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -19,6 +19,12 @@ enum { I915_PRIORITY_INVALID = INT_MIN }; +#define I915_USER_PRIORITY_SHIFT 0 +#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT) + +#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT) +#define I915_PRIORITY_MASK (-I915_PRIORITY_COUNT) + struct i915_sched_attr { /** * @priority: execution and service priority diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 1aea7a8f2224..94ceb5f6c507 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -291,12 +291,14 @@ static int live_preempt(void *arg) ctx_hi = kernel_context(i915); if (!ctx_hi) goto err_spin_lo; - ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); ctx_lo = kernel_context(i915); if (!ctx_lo) goto err_ctx_hi; - ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); for_each_engine(engine, i915, id) { struct i915_request *rq; @@ -417,7 +419,7 @@ static int live_late_preempt(void *arg) goto err_wedged; } - attr.priority = I915_PRIORITY_MAX; + attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); engine->schedule(rq, &attr); if (!wait_for_spinner(&spin_hi, rq)) {
In the next few patches, we will want to give a small priority boost to some requests/queues but not so much that we perturb the user controlled order. As such we shift the user priority bits higher leaving ourselves a few low priority bits for our bumping. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_gem_context.c | 9 +++++---- drivers/gpu/drm/i915/i915_scheduler.h | 6 ++++++ drivers/gpu/drm/i915/selftests/intel_lrc.c | 8 +++++--- 4 files changed, 17 insertions(+), 8 deletions(-)