@@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
int i;
/* Signal all jobs not yet scheduled */
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
list_for_each_entry(s_entity, &rq->entities, list) {
@@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
* DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
* cases, so we don't use it (no need for kernel generated jobs).
*/
-#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW)
+#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
/**
* struct msm_file_private - per-drm_file context
@@ -82,13 +82,14 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
/* The "priority" of an entity cannot exceed the number of run-queues of a
- * scheduler. Protect against num_rqs being 0, by converting to signed.
+ * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
+ * the lowest priority available.
*/
if (entity->priority >= sched_list[0]->num_rqs) {
drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
entity->priority, sched_list[0]->num_rqs);
entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
- (s32) DRM_SCHED_PRIORITY_LOW);
+ (s32) DRM_SCHED_PRIORITY_KERNEL);
}
entity->rq = sched_list[0]->sched_rq[entity->priority];
}
@@ -1051,8 +1051,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
struct drm_sched_entity *entity;
int i;
- /* Kernel run queue has higher priority than normal run queue*/
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
+ /* Start with the highest priority.
+ */
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
@@ -1291,7 +1292,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
if (!sched->sched_rq)
goto Out_free;
sched->num_rqs = num_rqs;
- for (i = DRM_SCHED_PRIORITY_LOW; i < sched->num_rqs; i++) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
if (!sched->sched_rq[i])
goto Out_unroll;
@@ -1312,7 +1313,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->ready = true;
return 0;
Out_unroll:
- for (--i ; i >= DRM_SCHED_PRIORITY_LOW; i--)
+ for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
kfree(sched->sched_rq[i]);
Out_free:
kfree(sched->sched_rq);
@@ -1338,7 +1339,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
drm_sched_wqueue_stop(sched);
- for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
@@ -1390,9 +1391,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
- for (i = DRM_SCHED_PRIORITY_LOW;
- i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL);
- i++) {
+ for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
@@ -63,10 +63,10 @@ struct drm_file;
* to an array, and as such should start at 0.
*/
enum drm_sched_priority {
- DRM_SCHED_PRIORITY_LOW,
- DRM_SCHED_PRIORITY_NORMAL,
- DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
+ DRM_SCHED_PRIORITY_HIGH,
+ DRM_SCHED_PRIORITY_NORMAL,
+ DRM_SCHED_PRIORITY_LOW,
DRM_SCHED_PRIORITY_COUNT
};