diff mbox

[v2] drm/i915: Pack params to engine->schedule() into a struct

Message ID 20180411230026.17909-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson April 11, 2018, 11 p.m. UTC
Today we only want to pass along the priority to engine->schedule(), but
in the future we want to have much more control over the various aspects
of the GPU during a context's execution, for example controlling the
frequency allowed. As we need an ever growing number of parameters for
scheduling, move those into a struct for convenience.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gvt/scheduler.c          |  2 +-
 drivers/gpu/drm/i915/i915_drv.h               |  3 +-
 drivers/gpu/drm/i915/i915_gem.c               | 18 ++---
 drivers/gpu/drm/i915/i915_gem_context.c       |  8 +--
 drivers/gpu/drm/i915/i915_gem_context.h       | 13 +---
 drivers/gpu/drm/i915/i915_gpu_error.c         |  8 +--
 drivers/gpu/drm/i915/i915_gpu_error.h         |  5 +-
 drivers/gpu/drm/i915/i915_request.c           |  4 +-
 drivers/gpu/drm/i915/i915_request.h           | 39 +----------
 drivers/gpu/drm/i915/i915_scheduler.h         | 65 +++++++++++++++++++
 drivers/gpu/drm/i915/intel_display.c          |  4 +-
 drivers/gpu/drm/i915/intel_engine_cs.c        | 18 ++++-
 drivers/gpu/drm/i915/intel_guc_submission.c   |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c              | 20 +++---
 drivers/gpu/drm/i915/intel_ringbuffer.h       |  4 +-
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |  4 +-
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |  8 ++-
 17 files changed, 133 insertions(+), 92 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_scheduler.h

Comments

Tvrtko Ursulin April 17, 2018, 9:12 a.m. UTC | #1
On 12/04/2018 00:00, Chris Wilson wrote:
> Today we only want to pass along the priority to engine->schedule(), but
> in the future we want to have much more control over the various aspects
> of the GPU during a context's execution, for example controlling the
> frequency allowed. As we need an ever growing number of parameters for
> scheduling, move those into a struct for convenience.

Sounds sensible in principle. Do you already have a tree implementing 
ctx freq on top of this so I can get a better idea how exactly it will 
be used?

For instance I am at the moment unsure if frequencies are not enough to 
be stored in the context which is available via requests at all 
scheduling sites.

> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/gvt/scheduler.c          |  2 +-
>   drivers/gpu/drm/i915/i915_drv.h               |  3 +-
>   drivers/gpu/drm/i915/i915_gem.c               | 18 ++---
>   drivers/gpu/drm/i915/i915_gem_context.c       |  8 +--
>   drivers/gpu/drm/i915/i915_gem_context.h       | 13 +---
>   drivers/gpu/drm/i915/i915_gpu_error.c         |  8 +--
>   drivers/gpu/drm/i915/i915_gpu_error.h         |  5 +-
>   drivers/gpu/drm/i915/i915_request.c           |  4 +-
>   drivers/gpu/drm/i915/i915_request.h           | 39 +----------
>   drivers/gpu/drm/i915/i915_scheduler.h         | 65 +++++++++++++++++++

Separating code movement and changes is our usual best practice, so if 
not too hard it would be good to do it like that.

Regards,

Tvrtko

>   drivers/gpu/drm/i915/intel_display.c          |  4 +-
>   drivers/gpu/drm/i915/intel_engine_cs.c        | 18 ++++-
>   drivers/gpu/drm/i915/intel_guc_submission.c   |  2 +-
>   drivers/gpu/drm/i915/intel_lrc.c              | 20 +++---
>   drivers/gpu/drm/i915/intel_ringbuffer.h       |  4 +-
>   .../gpu/drm/i915/selftests/intel_hangcheck.c  |  4 +-
>   drivers/gpu/drm/i915/selftests/intel_lrc.c    |  8 ++-
>   17 files changed, 133 insertions(+), 92 deletions(-)
>   create mode 100644 drivers/gpu/drm/i915/i915_scheduler.h
> 
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 638abe84857c..f3d21849b0cb 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -1135,7 +1135,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
>   		return PTR_ERR(s->shadow_ctx);
>   
>   	if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
> -		s->shadow_ctx->priority = INT_MAX;
> +		s->shadow_ctx->sched.priority = INT_MAX;
>   
>   	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
>   
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 649c0f2f3bae..fad860f54386 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -75,6 +75,7 @@
>   #include "i915_gem_timeline.h"
>   #include "i915_gpu_error.h"
>   #include "i915_request.h"
> +#include "i915_scheduler.h"
>   #include "i915_vma.h"
>   
>   #include "intel_gvt.h"
> @@ -3159,7 +3160,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
>   			 struct intel_rps_client *rps);
>   int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   				  unsigned int flags,
> -				  int priority);
> +				  const struct i915_sched_attr *attr);
>   #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
>   
>   int __must_check
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 60cf7cfc24ee..61e6be84362f 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -563,7 +563,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
>   	return timeout;
>   }
>   
> -static void __fence_set_priority(struct dma_fence *fence, int prio)
> +static void __fence_set_priority(struct dma_fence *fence,
> +				 const struct i915_sched_attr *attr)
>   {
>   	struct i915_request *rq;
>   	struct intel_engine_cs *engine;
> @@ -576,11 +577,12 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
>   
>   	rcu_read_lock();
>   	if (engine->schedule)
> -		engine->schedule(rq, prio);
> +		engine->schedule(rq, attr);
>   	rcu_read_unlock();
>   }
>   
> -static void fence_set_priority(struct dma_fence *fence, int prio)
> +static void fence_set_priority(struct dma_fence *fence,
> +			       const struct i915_sched_attr *attr)
>   {
>   	/* Recurse once into a fence-array */
>   	if (dma_fence_is_array(fence)) {
> @@ -588,16 +590,16 @@ static void fence_set_priority(struct dma_fence *fence, int prio)
>   		int i;
>   
>   		for (i = 0; i < array->num_fences; i++)
> -			__fence_set_priority(array->fences[i], prio);
> +			__fence_set_priority(array->fences[i], attr);
>   	} else {
> -		__fence_set_priority(fence, prio);
> +		__fence_set_priority(fence, attr);
>   	}
>   }
>   
>   int
>   i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   			      unsigned int flags,
> -			      int prio)
> +			      const struct i915_sched_attr *attr)
>   {
>   	struct dma_fence *excl;
>   
> @@ -612,7 +614,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   			return ret;
>   
>   		for (i = 0; i < count; i++) {
> -			fence_set_priority(shared[i], prio);
> +			fence_set_priority(shared[i], attr);
>   			dma_fence_put(shared[i]);
>   		}
>   
> @@ -622,7 +624,7 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   	}
>   
>   	if (excl) {
> -		fence_set_priority(excl, prio);
> +		fence_set_priority(excl, attr);
>   		dma_fence_put(excl);
>   	}
>   	return 0;
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 5cfac0255758..143e6bf0d545 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -280,7 +280,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
>   	kref_init(&ctx->ref);
>   	list_add_tail(&ctx->link, &dev_priv->contexts.list);
>   	ctx->i915 = dev_priv;
> -	ctx->priority = I915_PRIORITY_NORMAL;
> +	ctx->sched.priority = I915_PRIORITY_NORMAL;
>   
>   	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
>   	INIT_LIST_HEAD(&ctx->handles_list);
> @@ -430,7 +430,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
>   		return ctx;
>   
>   	i915_gem_context_clear_bannable(ctx);
> -	ctx->priority = prio;
> +	ctx->sched.priority = prio;
>   	ctx->ring_size = PAGE_SIZE;
>   
>   	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
> @@ -747,7 +747,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
>   		args->value = i915_gem_context_is_bannable(ctx);
>   		break;
>   	case I915_CONTEXT_PARAM_PRIORITY:
> -		args->value = ctx->priority;
> +		args->value = ctx->sched.priority;
>   		break;
>   	default:
>   		ret = -EINVAL;
> @@ -820,7 +820,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
>   				 !capable(CAP_SYS_NICE))
>   				ret = -EPERM;
>   			else
> -				ctx->priority = priority;
> +				ctx->sched.priority = priority;
>   		}
>   		break;
>   
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
> index 7854262ddfd9..b12a8a8c5af9 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/i915_gem_context.h
> @@ -137,18 +137,7 @@ struct i915_gem_context {
>   	 */
>   	u32 user_handle;
>   
> -	/**
> -	 * @priority: execution and service priority
> -	 *
> -	 * All clients are equal, but some are more equal than others!
> -	 *
> -	 * Requests from a context with a greater (more positive) value of
> -	 * @priority will be executed before those with a lower @priority
> -	 * value, forming a simple QoS.
> -	 *
> -	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
> -	 */
> -	int priority;
> +	struct i915_sched_attr sched;
>   
>   	/** ggtt_offset_bias: placement restriction for context objects */
>   	u32 ggtt_offset_bias;
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index effaf982b19b..35cf001ef26b 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -411,7 +411,7 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
>   
>   	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
>   		   prefix, erq->pid, erq->ban_score,
> -		   erq->context, erq->seqno, erq->priority,
> +		   erq->context, erq->seqno, erq->sched.priority,
>   		   jiffies_to_msecs(jiffies - erq->jiffies),
>   		   erq->head, erq->tail);
>   }
> @@ -422,7 +422,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
>   {
>   	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
>   		   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
> -		   ctx->priority, ctx->ban_score, bannable(ctx),
> +		   ctx->sched.priority, ctx->ban_score, bannable(ctx),
>   		   ctx->guilty, ctx->active);
>   }
>   
> @@ -1278,7 +1278,7 @@ static void record_request(struct i915_request *request,
>   			   struct drm_i915_error_request *erq)
>   {
>   	erq->context = request->ctx->hw_id;
> -	erq->priority = request->priotree.priority;
> +	erq->sched = request->priotree.attr;
>   	erq->ban_score = atomic_read(&request->ctx->ban_score);
>   	erq->seqno = request->global_seqno;
>   	erq->jiffies = request->emitted_jiffies;
> @@ -1372,7 +1372,7 @@ static void record_context(struct drm_i915_error_context *e,
>   
>   	e->handle = ctx->user_handle;
>   	e->hw_id = ctx->hw_id;
> -	e->priority = ctx->priority;
> +	e->sched = ctx->sched;
>   	e->ban_score = atomic_read(&ctx->ban_score);
>   	e->bannable = i915_gem_context_is_bannable(ctx);
>   	e->guilty = atomic_read(&ctx->guilty_count);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
> index c05b6034d718..cbf33aa59b22 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.h
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.h
> @@ -20,6 +20,7 @@
>   #include "i915_gem.h"
>   #include "i915_gem_gtt.h"
>   #include "i915_params.h"
> +#include "i915_scheduler.h"
>   
>   struct drm_i915_private;
>   struct intel_overlay_error_state;
> @@ -122,11 +123,11 @@ struct i915_gpu_state {
>   			pid_t pid;
>   			u32 handle;
>   			u32 hw_id;
> -			int priority;
>   			int ban_score;
>   			int active;
>   			int guilty;
>   			bool bannable;
> +			struct i915_sched_attr sched;
>   		} context;
>   
>   		struct drm_i915_error_object {
> @@ -147,11 +148,11 @@ struct i915_gpu_state {
>   			long jiffies;
>   			pid_t pid;
>   			u32 context;
> -			int priority;
>   			int ban_score;
>   			u32 seqno;
>   			u32 head;
>   			u32 tail;
> +			struct i915_sched_attr sched;
>   		} *requests, execlist[EXECLIST_MAX_PORTS];
>   		unsigned int num_ports;
>   
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 9ca9c24b4421..e1675d44ac92 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -191,7 +191,7 @@ i915_priotree_init(struct i915_priotree *pt)
>   	INIT_LIST_HEAD(&pt->signalers_list);
>   	INIT_LIST_HEAD(&pt->waiters_list);
>   	INIT_LIST_HEAD(&pt->link);
> -	pt->priority = I915_PRIORITY_INVALID;
> +	pt->attr.priority = I915_PRIORITY_INVALID;
>   }
>   
>   static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
> @@ -1062,7 +1062,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
>   	 */
>   	rcu_read_lock();
>   	if (engine->schedule)
> -		engine->schedule(request, request->ctx->priority);
> +		engine->schedule(request, &request->ctx->sched);
>   	rcu_read_unlock();
>   
>   	local_bh_disable();
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 7d6eb82eeb91..5435878addd6 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -29,6 +29,7 @@
>   
>   #include "i915_gem.h"
>   #include "i915_sw_fence.h"
> +#include "i915_scheduler.h"
>   
>   #include <uapi/drm/i915_drm.h>
>   
> @@ -48,44 +49,6 @@ struct intel_signal_node {
>   	struct list_head link;
>   };
>   
> -struct i915_dependency {
> -	struct i915_priotree *signaler;
> -	struct list_head signal_link;
> -	struct list_head wait_link;
> -	struct list_head dfs_link;
> -	unsigned long flags;
> -#define I915_DEPENDENCY_ALLOC BIT(0)
> -};
> -
> -/*
> - * "People assume that time is a strict progression of cause to effect, but
> - * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
> - * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
> - *
> - * Requests exist in a complex web of interdependencies. Each request
> - * has to wait for some other request to complete before it is ready to be run
> - * (e.g. we have to wait until the pixels have been rendering into a texture
> - * before we can copy from it). We track the readiness of a request in terms
> - * of fences, but we also need to keep the dependency tree for the lifetime
> - * of the request (beyond the life of an individual fence). We use the tree
> - * at various points to reorder the requests whilst keeping the requests
> - * in order with respect to their various dependencies.
> - */
> -struct i915_priotree {
> -	struct list_head signalers_list; /* those before us, we depend upon */
> -	struct list_head waiters_list; /* those after us, they depend upon us */
> -	struct list_head link;
> -	int priority;
> -};
> -
> -enum {
> -	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
> -	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
> -	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
> -
> -	I915_PRIORITY_INVALID = INT_MIN
> -};
> -
>   struct i915_capture_list {
>   	struct i915_capture_list *next;
>   	struct i915_vma *vma;
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
> new file mode 100644
> index 000000000000..0664caae314b
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/i915_scheduler.h
> @@ -0,0 +1,65 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2018 Intel Corporation
> + */
> +
> +#ifndef _I915_SCHEDULER_H_
> +#define _I915_SCHEDULER_H_
> +
> +#include <uapi/drm/i915_drm.h>
> +
> +enum {
> +	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
> +	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
> +	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
> +
> +	I915_PRIORITY_INVALID = INT_MIN
> +};
> +
> +struct i915_sched_attr {
> +	/**
> +	 * @priority: execution and service priority
> +	 *
> +	 * All clients are equal, but some are more equal than others!
> +	 *
> +	 * Requests from a context with a greater (more positive) value of
> +	 * @priority will be executed before those with a lower @priority
> +	 * value, forming a simple QoS.
> +	 *
> +	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
> +	 */
> +	int priority;
> +};
> +
> +/*
> + * "People assume that time is a strict progression of cause to effect, but
> + * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
> + * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
> + *
> + * Requests exist in a complex web of interdependencies. Each request
> + * has to wait for some other request to complete before it is ready to be run
> + * (e.g. we have to wait until the pixels have been rendering into a texture
> + * before we can copy from it). We track the readiness of a request in terms
> + * of fences, but we also need to keep the dependency tree for the lifetime
> + * of the request (beyond the life of an individual fence). We use the tree
> + * at various points to reorder the requests whilst keeping the requests
> + * in order with respect to their various dependencies.
> + */
> +struct i915_priotree {
> +	struct list_head signalers_list; /* those before us, we depend upon */
> +	struct list_head waiters_list; /* those after us, they depend upon us */
> +	struct list_head link;
> +	struct i915_sched_attr attr;
> +};
> +
> +struct i915_dependency {
> +	struct i915_priotree *signaler;
> +	struct list_head signal_link;
> +	struct list_head wait_link;
> +	struct list_head dfs_link;
> +	unsigned long flags;
> +#define I915_DEPENDENCY_ALLOC BIT(0)
> +};
> +
> +#endif /* _I915_SCHEDULER_H_ */
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 020900e08d42..7c34b8c854be 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -12839,7 +12839,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
>   
>   	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
>   
> -	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
> +	i915_gem_object_wait_priority(obj, 0, &(struct i915_sched_attr){
> +				      .priority = I915_PRIORITY_DISPLAY,
> +				      });
>   
>   	mutex_unlock(&dev_priv->drm.struct_mutex);
>   	i915_gem_object_unpin_pages(obj);
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index a217b3fe5f0b..7cc2892bf93b 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -1744,17 +1744,29 @@ unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
>   	return which;
>   }
>   
> +static void print_sched(struct drm_printer *m,
> +			const struct drm_i915_private *i915,
> +			const struct i915_sched_attr *sched)
> +{
> +	if (sched->priority == I915_PRIORITY_INVALID)
> +		return;
> +
> +	drm_printf(m, "prio=%d", sched->priority);
> +}
> +
>   static void print_request(struct drm_printer *m,
>   			  struct i915_request *rq,
>   			  const char *prefix)
>   {
>   	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
>   
> -	drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
> +	drm_printf(m, "%s%x%s [%llx:%x] ",
> +		   prefix,
>   		   rq->global_seqno,
>   		   i915_request_completed(rq) ? "!" : "",
> -		   rq->fence.context, rq->fence.seqno,
> -		   rq->priotree.priority,
> +		   rq->fence.context, rq->fence.seqno);
> +	print_sched(m, rq->i915, &rq->priotree.attr);
> +	drm_printf(m, " @ %dms: %s\n",
>   		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
>   		   name);
>   }
> diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
> index 97121230656c..accbbb57b0fe 100644
> --- a/drivers/gpu/drm/i915/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/intel_guc_submission.c
> @@ -659,7 +659,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
>   
>   static inline int rq_prio(const struct i915_request *rq)
>   {
> -	return rq->priotree.priority;
> +	return rq->priotree.attr.priority;
>   }
>   
>   static inline int port_prio(const struct execlist_port *port)
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 665d9e82e954..632d0a630a83 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -176,7 +176,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
>   
>   static inline int rq_prio(const struct i915_request *rq)
>   {
> -	return rq->priotree.priority;
> +	return rq->priotree.attr.priority;
>   }
>   
>   static inline bool need_preempt(const struct intel_engine_cs *engine,
> @@ -1170,11 +1170,13 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
>   	return engine;
>   }
>   
> -static void execlists_schedule(struct i915_request *request, int prio)
> +static void execlists_schedule(struct i915_request *request,
> +			       const struct i915_sched_attr *attr)
>   {
>   	struct intel_engine_cs *engine;
>   	struct i915_dependency *dep, *p;
>   	struct i915_dependency stack;
> +	const int prio = attr->priority;
>   	LIST_HEAD(dfs);
>   
>   	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
> @@ -1182,7 +1184,7 @@ static void execlists_schedule(struct i915_request *request, int prio)
>   	if (i915_request_completed(request))
>   		return;
>   
> -	if (prio <= READ_ONCE(request->priotree.priority))
> +	if (prio <= READ_ONCE(request->priotree.attr.priority))
>   		return;
>   
>   	/* Need BKL in order to use the temporary link inside i915_dependency */
> @@ -1224,8 +1226,8 @@ static void execlists_schedule(struct i915_request *request, int prio)
>   			if (i915_priotree_signaled(p->signaler))
>   				continue;
>   
> -			GEM_BUG_ON(p->signaler->priority < pt->priority);
> -			if (prio > READ_ONCE(p->signaler->priority))
> +			GEM_BUG_ON(p->signaler->attr.priority < pt->attr.priority);
> +			if (prio > READ_ONCE(p->signaler->attr.priority))
>   				list_move_tail(&p->dfs_link, &dfs);
>   		}
>   	}
> @@ -1236,9 +1238,9 @@ static void execlists_schedule(struct i915_request *request, int prio)
>   	 * execlists_submit_request()), we can set our own priority and skip
>   	 * acquiring the engine locks.
>   	 */
> -	if (request->priotree.priority == I915_PRIORITY_INVALID) {
> +	if (request->priotree.attr.priority == I915_PRIORITY_INVALID) {
>   		GEM_BUG_ON(!list_empty(&request->priotree.link));
> -		request->priotree.priority = prio;
> +		request->priotree.attr = *attr;
>   		if (stack.dfs_link.next == stack.dfs_link.prev)
>   			return;
>   		__list_del_entry(&stack.dfs_link);
> @@ -1255,10 +1257,10 @@ static void execlists_schedule(struct i915_request *request, int prio)
>   
>   		engine = pt_lock_engine(pt, engine);
>   
> -		if (prio <= pt->priority)
> +		if (prio <= pt->attr.priority)
>   			continue;
>   
> -		pt->priority = prio;
> +		pt->attr.priority = prio;
>   		if (!list_empty(&pt->link)) {
>   			__list_del_entry(&pt->link);
>   			queue_request(engine, pt, prio);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 256d58487559..5bd0815037a0 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -14,6 +14,7 @@
>   #include "intel_gpu_commands.h"
>   
>   struct drm_printer;
> +struct i915_sched_attr;
>   
>   #define I915_CMD_HASH_ORDER 9
>   
> @@ -460,7 +461,8 @@ struct intel_engine_cs {
>   	 *
>   	 * Called under the struct_mutex.
>   	 */
> -	void		(*schedule)(struct i915_request *request, int priority);
> +	void		(*schedule)(struct i915_request *request,
> +				    const struct i915_sched_attr *attr);
>   
>   	/*
>   	 * Cancel all requests on the hardware, or queued for execution.
> diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> index 24f913f26a7b..f7ee54e109ae 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
> @@ -628,7 +628,7 @@ static int active_engine(void *data)
>   		}
>   
>   		if (arg->flags & TEST_PRIORITY)
> -			ctx[idx]->priority =
> +			ctx[idx]->sched.priority =
>   				i915_prandom_u32_max_state(512, &prng);
>   
>   		rq[idx] = i915_request_get(new);
> @@ -683,7 +683,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
>   			return err;
>   
>   		if (flags & TEST_PRIORITY)
> -			h.ctx->priority = 1024;
> +			h.ctx->sched.priority = 1024;
>   	}
>   
>   	for_each_engine(engine, i915, id) {
> diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> index 0481e2e01146..ee7e22d18ff8 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> @@ -335,12 +335,12 @@ static int live_preempt(void *arg)
>   	ctx_hi = kernel_context(i915);
>   	if (!ctx_hi)
>   		goto err_spin_lo;
> -	ctx_hi->priority = I915_CONTEXT_MAX_USER_PRIORITY;
> +	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
>   
>   	ctx_lo = kernel_context(i915);
>   	if (!ctx_lo)
>   		goto err_ctx_hi;
> -	ctx_lo->priority = I915_CONTEXT_MIN_USER_PRIORITY;
> +	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
>   
>   	for_each_engine(engine, i915, id) {
>   		struct i915_request *rq;
> @@ -407,6 +407,7 @@ static int live_late_preempt(void *arg)
>   	struct i915_gem_context *ctx_hi, *ctx_lo;
>   	struct spinner spin_hi, spin_lo;
>   	struct intel_engine_cs *engine;
> +	struct i915_sched_attr attr = {};
>   	enum intel_engine_id id;
>   	int err = -ENOMEM;
>   
> @@ -458,7 +459,8 @@ static int live_late_preempt(void *arg)
>   			goto err_wedged;
>   		}
>   
> -		engine->schedule(rq, I915_PRIORITY_MAX);
> +		attr.priority = I915_PRIORITY_MAX;
> +		engine->schedule(rq, &attr);
>   
>   		if (!wait_for_spinner(&spin_hi, rq)) {
>   			pr_err("High priority context failed to preempt the low priority context\n");
>
Chris Wilson April 17, 2018, 9:49 a.m. UTC | #2
Quoting Tvrtko Ursulin (2018-04-17 10:12:05)
> 
> On 12/04/2018 00:00, Chris Wilson wrote:
> > Today we only want to pass along the priority to engine->schedule(), but
> > in the future we want to have much more control over the various aspects
> > of the GPU during a context's execution, for example controlling the
> > frequency allowed. As we need an ever growing number of parameters for
> > scheduling, move those into a struct for convenience.
> 
> Sounds sensible in principle. Do you already have a tree implementing 
> ctx freq on top of this so I can get a better idea how exactly it will 
> be used?

It's because I want to pass in frequency adjustments from the caller and
percolate those through the dependencies, rather than pull them from the
context at that later timer. It also gets around the thorny issue of what
to do when the context is changed at runtime. What I'm playing around with is
https://cgit.freedesktop.org/~ickle/linux-2.6/commit/?h=prescheduler&id=e734f100cd74b340ff9675811268a30cbe9a7959
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 638abe84857c..f3d21849b0cb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1135,7 +1135,7 @@  int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
 		return PTR_ERR(s->shadow_ctx);
 
 	if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
-		s->shadow_ctx->priority = INT_MAX;
+		s->shadow_ctx->sched.priority = INT_MAX;
 
 	bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 649c0f2f3bae..fad860f54386 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -75,6 +75,7 @@ 
 #include "i915_gem_timeline.h"
 #include "i915_gpu_error.h"
 #include "i915_request.h"
+#include "i915_scheduler.h"
 #include "i915_vma.h"
 
 #include "intel_gvt.h"
@@ -3159,7 +3160,7 @@  int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 			 struct intel_rps_client *rps);
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
-				  int priority);
+				  const struct i915_sched_attr *attr);
 #define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
 
 int __must_check
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 60cf7cfc24ee..61e6be84362f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -563,7 +563,8 @@  i915_gem_object_wait_reservation(struct reservation_object *resv,
 	return timeout;
 }
 
-static void __fence_set_priority(struct dma_fence *fence, int prio)
+static void __fence_set_priority(struct dma_fence *fence,
+				 const struct i915_sched_attr *attr)
 {
 	struct i915_request *rq;
 	struct intel_engine_cs *engine;
@@ -576,11 +577,12 @@  static void __fence_set_priority(struct dma_fence *fence, int prio)
 
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(rq, prio);
+		engine->schedule(rq, attr);
 	rcu_read_unlock();
 }
 
-static void fence_set_priority(struct dma_fence *fence, int prio)
+static void fence_set_priority(struct dma_fence *fence,
+			       const struct i915_sched_attr *attr)
 {
 	/* Recurse once into a fence-array */
 	if (dma_fence_is_array(fence)) {
@@ -588,16 +590,16 @@  static void fence_set_priority(struct dma_fence *fence, int prio)
 		int i;
 
 		for (i = 0; i < array->num_fences; i++)
-			__fence_set_priority(array->fences[i], prio);
+			__fence_set_priority(array->fences[i], attr);
 	} else {
-		__fence_set_priority(fence, prio);
+		__fence_set_priority(fence, attr);
 	}
 }
 
 int
 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			      unsigned int flags,
-			      int prio)
+			      const struct i915_sched_attr *attr)
 {
 	struct dma_fence *excl;
 
@@ -612,7 +614,7 @@  i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 			return ret;
 
 		for (i = 0; i < count; i++) {
-			fence_set_priority(shared[i], prio);
+			fence_set_priority(shared[i], attr);
 			dma_fence_put(shared[i]);
 		}
 
@@ -622,7 +624,7 @@  i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 	}
 
 	if (excl) {
-		fence_set_priority(excl, prio);
+		fence_set_priority(excl, attr);
 		dma_fence_put(excl);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5cfac0255758..143e6bf0d545 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -280,7 +280,7 @@  __create_hw_context(struct drm_i915_private *dev_priv,
 	kref_init(&ctx->ref);
 	list_add_tail(&ctx->link, &dev_priv->contexts.list);
 	ctx->i915 = dev_priv;
-	ctx->priority = I915_PRIORITY_NORMAL;
+	ctx->sched.priority = I915_PRIORITY_NORMAL;
 
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
@@ -430,7 +430,7 @@  i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 		return ctx;
 
 	i915_gem_context_clear_bannable(ctx);
-	ctx->priority = prio;
+	ctx->sched.priority = prio;
 	ctx->ring_size = PAGE_SIZE;
 
 	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -747,7 +747,7 @@  int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		args->value = i915_gem_context_is_bannable(ctx);
 		break;
 	case I915_CONTEXT_PARAM_PRIORITY:
-		args->value = ctx->priority;
+		args->value = ctx->sched.priority;
 		break;
 	default:
 		ret = -EINVAL;
@@ -820,7 +820,7 @@  int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 				 !capable(CAP_SYS_NICE))
 				ret = -EPERM;
 			else
-				ctx->priority = priority;
+				ctx->sched.priority = priority;
 		}
 		break;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 7854262ddfd9..b12a8a8c5af9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -137,18 +137,7 @@  struct i915_gem_context {
 	 */
 	u32 user_handle;
 
-	/**
-	 * @priority: execution and service priority
-	 *
-	 * All clients are equal, but some are more equal than others!
-	 *
-	 * Requests from a context with a greater (more positive) value of
-	 * @priority will be executed before those with a lower @priority
-	 * value, forming a simple QoS.
-	 *
-	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
-	 */
-	int priority;
+	struct i915_sched_attr sched;
 
 	/** ggtt_offset_bias: placement restriction for context objects */
 	u32 ggtt_offset_bias;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index effaf982b19b..35cf001ef26b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -411,7 +411,7 @@  static void error_print_request(struct drm_i915_error_state_buf *m,
 
 	err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x, prio %d, emitted %dms ago, head %08x, tail %08x\n",
 		   prefix, erq->pid, erq->ban_score,
-		   erq->context, erq->seqno, erq->priority,
+		   erq->context, erq->seqno, erq->sched.priority,
 		   jiffies_to_msecs(jiffies - erq->jiffies),
 		   erq->head, erq->tail);
 }
@@ -422,7 +422,7 @@  static void error_print_context(struct drm_i915_error_state_buf *m,
 {
 	err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
 		   header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
-		   ctx->priority, ctx->ban_score, bannable(ctx),
+		   ctx->sched.priority, ctx->ban_score, bannable(ctx),
 		   ctx->guilty, ctx->active);
 }
 
@@ -1278,7 +1278,7 @@  static void record_request(struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
 	erq->context = request->ctx->hw_id;
-	erq->priority = request->priotree.priority;
+	erq->sched = request->priotree.attr;
 	erq->ban_score = atomic_read(&request->ctx->ban_score);
 	erq->seqno = request->global_seqno;
 	erq->jiffies = request->emitted_jiffies;
@@ -1372,7 +1372,7 @@  static void record_context(struct drm_i915_error_context *e,
 
 	e->handle = ctx->user_handle;
 	e->hw_id = ctx->hw_id;
-	e->priority = ctx->priority;
+	e->sched = ctx->sched;
 	e->ban_score = atomic_read(&ctx->ban_score);
 	e->bannable = i915_gem_context_is_bannable(ctx);
 	e->guilty = atomic_read(&ctx->guilty_count);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index c05b6034d718..cbf33aa59b22 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -20,6 +20,7 @@ 
 #include "i915_gem.h"
 #include "i915_gem_gtt.h"
 #include "i915_params.h"
+#include "i915_scheduler.h"
 
 struct drm_i915_private;
 struct intel_overlay_error_state;
@@ -122,11 +123,11 @@  struct i915_gpu_state {
 			pid_t pid;
 			u32 handle;
 			u32 hw_id;
-			int priority;
 			int ban_score;
 			int active;
 			int guilty;
 			bool bannable;
+			struct i915_sched_attr sched;
 		} context;
 
 		struct drm_i915_error_object {
@@ -147,11 +148,11 @@  struct i915_gpu_state {
 			long jiffies;
 			pid_t pid;
 			u32 context;
-			int priority;
 			int ban_score;
 			u32 seqno;
 			u32 head;
 			u32 tail;
+			struct i915_sched_attr sched;
 		} *requests, execlist[EXECLIST_MAX_PORTS];
 		unsigned int num_ports;
 
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 9ca9c24b4421..e1675d44ac92 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -191,7 +191,7 @@  i915_priotree_init(struct i915_priotree *pt)
 	INIT_LIST_HEAD(&pt->signalers_list);
 	INIT_LIST_HEAD(&pt->waiters_list);
 	INIT_LIST_HEAD(&pt->link);
-	pt->priority = I915_PRIORITY_INVALID;
+	pt->attr.priority = I915_PRIORITY_INVALID;
 }
 
 static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
@@ -1062,7 +1062,7 @@  void __i915_request_add(struct i915_request *request, bool flush_caches)
 	 */
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(request, request->ctx->priority);
+		engine->schedule(request, &request->ctx->sched);
 	rcu_read_unlock();
 
 	local_bh_disable();
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7d6eb82eeb91..5435878addd6 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -29,6 +29,7 @@ 
 
 #include "i915_gem.h"
 #include "i915_sw_fence.h"
+#include "i915_scheduler.h"
 
 #include <uapi/drm/i915_drm.h>
 
@@ -48,44 +49,6 @@  struct intel_signal_node {
 	struct list_head link;
 };
 
-struct i915_dependency {
-	struct i915_priotree *signaler;
-	struct list_head signal_link;
-	struct list_head wait_link;
-	struct list_head dfs_link;
-	unsigned long flags;
-#define I915_DEPENDENCY_ALLOC BIT(0)
-};
-
-/*
- * "People assume that time is a strict progression of cause to effect, but
- * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
- * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
- *
- * Requests exist in a complex web of interdependencies. Each request
- * has to wait for some other request to complete before it is ready to be run
- * (e.g. we have to wait until the pixels have been rendering into a texture
- * before we can copy from it). We track the readiness of a request in terms
- * of fences, but we also need to keep the dependency tree for the lifetime
- * of the request (beyond the life of an individual fence). We use the tree
- * at various points to reorder the requests whilst keeping the requests
- * in order with respect to their various dependencies.
- */
-struct i915_priotree {
-	struct list_head signalers_list; /* those before us, we depend upon */
-	struct list_head waiters_list; /* those after us, they depend upon us */
-	struct list_head link;
-	int priority;
-};
-
-enum {
-	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
-	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
-	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
-	I915_PRIORITY_INVALID = INT_MIN
-};
-
 struct i915_capture_list {
 	struct i915_capture_list *next;
 	struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
new file mode 100644
index 000000000000..0664caae314b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -0,0 +1,65 @@ 
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_SCHEDULER_H_
+#define _I915_SCHEDULER_H_
+
+#include <uapi/drm/i915_drm.h>
+
+enum {
+	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+	I915_PRIORITY_INVALID = INT_MIN
+};
+
+struct i915_sched_attr {
+	/**
+	 * @priority: execution and service priority
+	 *
+	 * All clients are equal, but some are more equal than others!
+	 *
+	 * Requests from a context with a greater (more positive) value of
+	 * @priority will be executed before those with a lower @priority
+	 * value, forming a simple QoS.
+	 *
+	 * The &drm_i915_private.kernel_context is assigned the lowest priority.
+	 */
+	int priority;
+};
+
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ */
+struct i915_priotree {
+	struct list_head signalers_list; /* those before us, we depend upon */
+	struct list_head waiters_list; /* those after us, they depend upon us */
+	struct list_head link;
+	struct i915_sched_attr attr;
+};
+
+struct i915_dependency {
+	struct i915_priotree *signaler;
+	struct list_head signal_link;
+	struct list_head wait_link;
+	struct list_head dfs_link;
+	unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 020900e08d42..7c34b8c854be 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12839,7 +12839,9 @@  intel_prepare_plane_fb(struct drm_plane *plane,
 
 	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
-	i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
+	i915_gem_object_wait_priority(obj, 0, &(struct i915_sched_attr){
+				      .priority = I915_PRIORITY_DISPLAY,
+				      });
 
 	mutex_unlock(&dev_priv->drm.struct_mutex);
 	i915_gem_object_unpin_pages(obj);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a217b3fe5f0b..7cc2892bf93b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1744,17 +1744,29 @@  unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
 	return which;
 }
 
+static void print_sched(struct drm_printer *m,
+			const struct drm_i915_private *i915,
+			const struct i915_sched_attr *sched)
+{
+	if (sched->priority == I915_PRIORITY_INVALID)
+		return;
+
+	drm_printf(m, "prio=%d", sched->priority);
+}
+
 static void print_request(struct drm_printer *m,
 			  struct i915_request *rq,
 			  const char *prefix)
 {
 	const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
 
-	drm_printf(m, "%s%x%s [%llx:%x] prio=%d @ %dms: %s\n", prefix,
+	drm_printf(m, "%s%x%s [%llx:%x] ",
+		   prefix,
 		   rq->global_seqno,
 		   i915_request_completed(rq) ? "!" : "",
-		   rq->fence.context, rq->fence.seqno,
-		   rq->priotree.priority,
+		   rq->fence.context, rq->fence.seqno);
+	print_sched(m, rq->i915, &rq->priotree.attr);
+	drm_printf(m, " @ %dms: %s\n",
 		   jiffies_to_msecs(jiffies - rq->emitted_jiffies),
 		   name);
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 97121230656c..accbbb57b0fe 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -659,7 +659,7 @@  static void port_assign(struct execlist_port *port, struct i915_request *rq)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->priotree.priority;
+	return rq->priotree.attr.priority;
 }
 
 static inline int port_prio(const struct execlist_port *port)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 665d9e82e954..632d0a630a83 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -176,7 +176,7 @@  static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 
 static inline int rq_prio(const struct i915_request *rq)
 {
-	return rq->priotree.priority;
+	return rq->priotree.attr.priority;
 }
 
 static inline bool need_preempt(const struct intel_engine_cs *engine,
@@ -1170,11 +1170,13 @@  pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
 	return engine;
 }
 
-static void execlists_schedule(struct i915_request *request, int prio)
+static void execlists_schedule(struct i915_request *request,
+			       const struct i915_sched_attr *attr)
 {
 	struct intel_engine_cs *engine;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
+	const int prio = attr->priority;
 	LIST_HEAD(dfs);
 
 	GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
@@ -1182,7 +1184,7 @@  static void execlists_schedule(struct i915_request *request, int prio)
 	if (i915_request_completed(request))
 		return;
 
-	if (prio <= READ_ONCE(request->priotree.priority))
+	if (prio <= READ_ONCE(request->priotree.attr.priority))
 		return;
 
 	/* Need BKL in order to use the temporary link inside i915_dependency */
@@ -1224,8 +1226,8 @@  static void execlists_schedule(struct i915_request *request, int prio)
 			if (i915_priotree_signaled(p->signaler))
 				continue;
 
-			GEM_BUG_ON(p->signaler->priority < pt->priority);
-			if (prio > READ_ONCE(p->signaler->priority))
+			GEM_BUG_ON(p->signaler->attr.priority < pt->attr.priority);
+			if (prio > READ_ONCE(p->signaler->attr.priority))
 				list_move_tail(&p->dfs_link, &dfs);
 		}
 	}
@@ -1236,9 +1238,9 @@  static void execlists_schedule(struct i915_request *request, int prio)
 	 * execlists_submit_request()), we can set our own priority and skip
 	 * acquiring the engine locks.
 	 */
-	if (request->priotree.priority == I915_PRIORITY_INVALID) {
+	if (request->priotree.attr.priority == I915_PRIORITY_INVALID) {
 		GEM_BUG_ON(!list_empty(&request->priotree.link));
-		request->priotree.priority = prio;
+		request->priotree.attr = *attr;
 		if (stack.dfs_link.next == stack.dfs_link.prev)
 			return;
 		__list_del_entry(&stack.dfs_link);
@@ -1255,10 +1257,10 @@  static void execlists_schedule(struct i915_request *request, int prio)
 
 		engine = pt_lock_engine(pt, engine);
 
-		if (prio <= pt->priority)
+		if (prio <= pt->attr.priority)
 			continue;
 
-		pt->priority = prio;
+		pt->attr.priority = prio;
 		if (!list_empty(&pt->link)) {
 			__list_del_entry(&pt->link);
 			queue_request(engine, pt, prio);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 256d58487559..5bd0815037a0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,6 +14,7 @@ 
 #include "intel_gpu_commands.h"
 
 struct drm_printer;
+struct i915_sched_attr;
 
 #define I915_CMD_HASH_ORDER 9
 
@@ -460,7 +461,8 @@  struct intel_engine_cs {
 	 *
 	 * Called under the struct_mutex.
 	 */
-	void		(*schedule)(struct i915_request *request, int priority);
+	void		(*schedule)(struct i915_request *request,
+				    const struct i915_sched_attr *attr);
 
 	/*
 	 * Cancel all requests on the hardware, or queued for execution.
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 24f913f26a7b..f7ee54e109ae 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -628,7 +628,7 @@  static int active_engine(void *data)
 		}
 
 		if (arg->flags & TEST_PRIORITY)
-			ctx[idx]->priority =
+			ctx[idx]->sched.priority =
 				i915_prandom_u32_max_state(512, &prng);
 
 		rq[idx] = i915_request_get(new);
@@ -683,7 +683,7 @@  static int __igt_reset_engines(struct drm_i915_private *i915,
 			return err;
 
 		if (flags & TEST_PRIORITY)
-			h.ctx->priority = 1024;
+			h.ctx->sched.priority = 1024;
 	}
 
 	for_each_engine(engine, i915, id) {
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 0481e2e01146..ee7e22d18ff8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -335,12 +335,12 @@  static int live_preempt(void *arg)
 	ctx_hi = kernel_context(i915);
 	if (!ctx_hi)
 		goto err_spin_lo;
-	ctx_hi->priority = I915_CONTEXT_MAX_USER_PRIORITY;
+	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
 
 	ctx_lo = kernel_context(i915);
 	if (!ctx_lo)
 		goto err_ctx_hi;
-	ctx_lo->priority = I915_CONTEXT_MIN_USER_PRIORITY;
+	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
 
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
@@ -407,6 +407,7 @@  static int live_late_preempt(void *arg)
 	struct i915_gem_context *ctx_hi, *ctx_lo;
 	struct spinner spin_hi, spin_lo;
 	struct intel_engine_cs *engine;
+	struct i915_sched_attr attr = {};
 	enum intel_engine_id id;
 	int err = -ENOMEM;
 
@@ -458,7 +459,8 @@  static int live_late_preempt(void *arg)
 			goto err_wedged;
 		}
 
-		engine->schedule(rq, I915_PRIORITY_MAX);
+		attr.priority = I915_PRIORITY_MAX;
+		engine->schedule(rq, &attr);
 
 		if (!wait_for_spinner(&spin_hi, rq)) {
 			pr_err("High priority context failed to preempt the low priority context\n");