diff mbox series

[2/2] drm/sched: Reverse run-queue priority enumeration

Message ID 20231124052752.6915-6-ltuikov89@gmail.com (mailing list archive)
State Not Applicable
Headers show
Series Make scheduling of the same index, the same | expand

Commit Message

Luben Tuikov Nov. 24, 2023, 5:27 a.m. UTC
Reverse run-queue priority enumeration such that the higest priority is now 0,
and for each consecutive integer the prioirty diminishes.

Run-queues correspond to priorities. To an external observer a scheduler
created with a single run-queue, and another created with
DRM_SCHED_PRIORITY_COUNT number of run-queues, should always schedule
sched->sched_rq[0] with the same "priority", as that index run-queue exists in
both schedulers, i.e. a scheduler with one run-queue or many. This patch makes
it so.

In other words, the "priority" of sched->sched_rq[n], n >= 0, is the same for
any scheduler created with any allowable number of run-queues (priorities), 0
to DRM_SCHED_PRIORITY_COUNT.

Cc: Rob Clark <robdclark@gmail.com>
Cc: Abhinav Kumar <quic_abhinavk@quicinc.com>
Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Cc: Danilo Krummrich <dakr@redhat.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 +-
 drivers/gpu/drm/msm/msm_gpu.h            |  2 +-
 drivers/gpu/drm/scheduler/sched_entity.c |  7 ++++---
 drivers/gpu/drm/scheduler/sched_main.c   | 15 +++++++--------
 include/drm/gpu_scheduler.h              |  6 +++---
 5 files changed, 16 insertions(+), 16 deletions(-)

Comments

Christian König Nov. 24, 2023, 8:04 a.m. UTC | #1
Am 24.11.23 um 06:27 schrieb Luben Tuikov:
> Reverse run-queue priority enumeration such that the higest priority is now 0,
> and for each consecutive integer the prioirty diminishes.
>
> Run-queues correspond to priorities. To an external observer a scheduler
> created with a single run-queue, and another created with
> DRM_SCHED_PRIORITY_COUNT number of run-queues, should always schedule
> sched->sched_rq[0] with the same "priority", as that index run-queue exists in
> both schedulers, i.e. a scheduler with one run-queue or many. This patch makes
> it so.
>
> In other words, the "priority" of sched->sched_rq[n], n >= 0, is the same for
> any scheduler created with any allowable number of run-queues (priorities), 0
> to DRM_SCHED_PRIORITY_COUNT.
>
> Cc: Rob Clark <robdclark@gmail.com>
> Cc: Abhinav Kumar <quic_abhinavk@quicinc.com>
> Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> Cc: Danilo Krummrich <dakr@redhat.com>
> Cc: Alex Deucher <alexander.deucher@amd.com>
> Cc: Christian König <christian.koenig@amd.com>
> Cc: linux-arm-msm@vger.kernel.org
> Cc: freedreno@lists.freedesktop.org
> Cc: dri-devel@lists.freedesktop.org
> Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 +-
>   drivers/gpu/drm/msm/msm_gpu.h            |  2 +-
>   drivers/gpu/drm/scheduler/sched_entity.c |  7 ++++---
>   drivers/gpu/drm/scheduler/sched_main.c   | 15 +++++++--------
>   include/drm/gpu_scheduler.h              |  6 +++---
>   5 files changed, 16 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 1a25931607c514..71a5cf37b472d4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
>   	int i;
>   
>   	/* Signal all jobs not yet scheduled */
> -	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
> +	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
>   		struct drm_sched_rq *rq = sched->sched_rq[i];
>   		spin_lock(&rq->lock);
>   		list_for_each_entry(s_entity, &rq->entities, list) {
> diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
> index eb0c97433e5f8a..2bfcb222e35338 100644
> --- a/drivers/gpu/drm/msm/msm_gpu.h
> +++ b/drivers/gpu/drm/msm/msm_gpu.h
> @@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
>    * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
>    * cases, so we don't use it (no need for kernel generated jobs).
>    */
> -#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW)
> +#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
>   
>   /**
>    * struct msm_file_private - per-drm_file context
> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
> index cb7445be3cbb4e..6e2b02e45e3a32 100644
> --- a/drivers/gpu/drm/scheduler/sched_entity.c
> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
> @@ -81,14 +81,15 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>   		 */
>   		pr_warn("%s: called with uninitialized scheduler\n", __func__);
>   	} else if (num_sched_list) {
> -		/* The "priority" of an entity cannot exceed the number
> -		 * of run-queues of a scheduler.
> +		/* The "priority" of an entity cannot exceed the number of
> +		 * run-queues of a scheduler. Choose the lowest priority
> +		 * available.
>   		 */
>   		if (entity->priority >= sched_list[0]->num_rqs) {
>   			drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
>   				entity->priority, sched_list[0]->num_rqs);
>   			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
> -						 (s32) DRM_SCHED_PRIORITY_LOW);
> +						 (s32) DRM_SCHED_PRIORITY_KERNEL);

That seems to be a no-op. You basically say max_T(.., num_rqs - 1, 0), 
this will always be num_rqs - 1

Apart from that looks good to me.

Christian.

>   		}
>   		entity->rq = sched_list[0]->sched_rq[entity->priority];
>   	}
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index b6d7bc49ff6ef4..682aebe96db781 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -1051,8 +1051,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
>   	struct drm_sched_entity *entity;
>   	int i;
>   
> -	/* Kernel run queue has higher priority than normal run queue*/
> -	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
> +	/* Start with the highest priority.
> +	 */
> +	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
>   		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
>   			drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
>   			drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
> @@ -1291,7 +1292,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>   	if (!sched->sched_rq)
>   		goto Out_free;
>   	sched->num_rqs = num_rqs;
> -	for (i = DRM_SCHED_PRIORITY_LOW; i < sched->num_rqs; i++) {
> +	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
>   		sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
>   		if (!sched->sched_rq[i])
>   			goto Out_unroll;
> @@ -1312,7 +1313,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>   	sched->ready = true;
>   	return 0;
>   Out_unroll:
> -	for (--i ; i >= DRM_SCHED_PRIORITY_LOW; i--)
> +	for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
>   		kfree(sched->sched_rq[i]);
>   Out_free:
>   	kfree(sched->sched_rq);
> @@ -1338,7 +1339,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
>   
>   	drm_sched_wqueue_stop(sched);
>   
> -	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
> +	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
>   		struct drm_sched_rq *rq = sched->sched_rq[i];
>   
>   		spin_lock(&rq->lock);
> @@ -1390,9 +1391,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
>   	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
>   		atomic_inc(&bad->karma);
>   
> -		for (i = DRM_SCHED_PRIORITY_LOW;
> -		     i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL);
> -		     i++) {
> +		for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
>   			struct drm_sched_rq *rq = sched->sched_rq[i];
>   
>   			spin_lock(&rq->lock);
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index d8e2d84d9223e3..5acc64954a8830 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -63,10 +63,10 @@ struct drm_file;
>    * to an array, and as such should start at 0.
>    */
>   enum drm_sched_priority {
> -	DRM_SCHED_PRIORITY_LOW,
> -	DRM_SCHED_PRIORITY_NORMAL,
> -	DRM_SCHED_PRIORITY_HIGH,
>   	DRM_SCHED_PRIORITY_KERNEL,
> +	DRM_SCHED_PRIORITY_HIGH,
> +	DRM_SCHED_PRIORITY_NORMAL,
> +	DRM_SCHED_PRIORITY_LOW,
>   
>   	DRM_SCHED_PRIORITY_COUNT
>   };
Luben Tuikov Nov. 24, 2023, 8:22 a.m. UTC | #2
On 2023-11-24 03:04, Christian König wrote:
> Am 24.11.23 um 06:27 schrieb Luben Tuikov:
>> Reverse run-queue priority enumeration such that the higest priority is now 0,
>> and for each consecutive integer the prioirty diminishes.
>>
>> Run-queues correspond to priorities. To an external observer a scheduler
>> created with a single run-queue, and another created with
>> DRM_SCHED_PRIORITY_COUNT number of run-queues, should always schedule
>> sched->sched_rq[0] with the same "priority", as that index run-queue exists in
>> both schedulers, i.e. a scheduler with one run-queue or many. This patch makes
>> it so.
>>
>> In other words, the "priority" of sched->sched_rq[n], n >= 0, is the same for
>> any scheduler created with any allowable number of run-queues (priorities), 0
>> to DRM_SCHED_PRIORITY_COUNT.
>>
>> Cc: Rob Clark <robdclark@gmail.com>
>> Cc: Abhinav Kumar <quic_abhinavk@quicinc.com>
>> Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
>> Cc: Danilo Krummrich <dakr@redhat.com>
>> Cc: Alex Deucher <alexander.deucher@amd.com>
>> Cc: Christian König <christian.koenig@amd.com>
>> Cc: linux-arm-msm@vger.kernel.org
>> Cc: freedreno@lists.freedesktop.org
>> Cc: dri-devel@lists.freedesktop.org
>> Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 +-
>>   drivers/gpu/drm/msm/msm_gpu.h            |  2 +-
>>   drivers/gpu/drm/scheduler/sched_entity.c |  7 ++++---
>>   drivers/gpu/drm/scheduler/sched_main.c   | 15 +++++++--------
>>   include/drm/gpu_scheduler.h              |  6 +++---
>>   5 files changed, 16 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> index 1a25931607c514..71a5cf37b472d4 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
>>   	int i;
>>   
>>   	/* Signal all jobs not yet scheduled */
>> -	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
>> +	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
>>   		struct drm_sched_rq *rq = sched->sched_rq[i];
>>   		spin_lock(&rq->lock);
>>   		list_for_each_entry(s_entity, &rq->entities, list) {
>> diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
>> index eb0c97433e5f8a..2bfcb222e35338 100644
>> --- a/drivers/gpu/drm/msm/msm_gpu.h
>> +++ b/drivers/gpu/drm/msm/msm_gpu.h
>> @@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
>>    * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
>>    * cases, so we don't use it (no need for kernel generated jobs).
>>    */
>> -#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW)
>> +#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
>>   
>>   /**
>>    * struct msm_file_private - per-drm_file context
>> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
>> index cb7445be3cbb4e..6e2b02e45e3a32 100644
>> --- a/drivers/gpu/drm/scheduler/sched_entity.c
>> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
>> @@ -81,14 +81,15 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>>   		 */
>>   		pr_warn("%s: called with uninitialized scheduler\n", __func__);
>>   	} else if (num_sched_list) {
>> -		/* The "priority" of an entity cannot exceed the number
>> -		 * of run-queues of a scheduler.
>> +		/* The "priority" of an entity cannot exceed the number of
>> +		 * run-queues of a scheduler. Choose the lowest priority
>> +		 * available.
>>   		 */
>>   		if (entity->priority >= sched_list[0]->num_rqs) {
>>   			drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
>>   				entity->priority, sched_list[0]->num_rqs);
>>   			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
>> -						 (s32) DRM_SCHED_PRIORITY_LOW);
>> +						 (s32) DRM_SCHED_PRIORITY_KERNEL);
> 
> That seems to be a no-op. You basically say max_T(.., num_rqs - 1, 0), 
> this will always be num_rqs - 1

This protects against num_rqs being equal to 0, in which case we select KERNEL (0).

This comes from "[PATCH] drm/sched: Fix bounds limiting when given a malformed entity"
which I sent yesterday (Message-ID: <20231123122422.167832-2-ltuikov89@gmail.com>).

Could you R-B that patch too?

> 
> Apart from that looks good to me.

Okay, could you R-B this patch then.
Christian König Nov. 24, 2023, 9:38 a.m. UTC | #3
Am 24.11.23 um 09:22 schrieb Luben Tuikov:
> On 2023-11-24 03:04, Christian König wrote:
>> Am 24.11.23 um 06:27 schrieb Luben Tuikov:
>>> Reverse run-queue priority enumeration such that the higest priority is now 0,
>>> and for each consecutive integer the prioirty diminishes.
>>>
>>> Run-queues correspond to priorities. To an external observer a scheduler
>>> created with a single run-queue, and another created with
>>> DRM_SCHED_PRIORITY_COUNT number of run-queues, should always schedule
>>> sched->sched_rq[0] with the same "priority", as that index run-queue exists in
>>> both schedulers, i.e. a scheduler with one run-queue or many. This patch makes
>>> it so.
>>>
>>> In other words, the "priority" of sched->sched_rq[n], n >= 0, is the same for
>>> any scheduler created with any allowable number of run-queues (priorities), 0
>>> to DRM_SCHED_PRIORITY_COUNT.
>>>
>>> Cc: Rob Clark <robdclark@gmail.com>
>>> Cc: Abhinav Kumar <quic_abhinavk@quicinc.com>
>>> Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
>>> Cc: Danilo Krummrich <dakr@redhat.com>
>>> Cc: Alex Deucher <alexander.deucher@amd.com>
>>> Cc: Christian König <christian.koenig@amd.com>
>>> Cc: linux-arm-msm@vger.kernel.org
>>> Cc: freedreno@lists.freedesktop.org
>>> Cc: dri-devel@lists.freedesktop.org
>>> Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
>>> ---
>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 +-
>>>    drivers/gpu/drm/msm/msm_gpu.h            |  2 +-
>>>    drivers/gpu/drm/scheduler/sched_entity.c |  7 ++++---
>>>    drivers/gpu/drm/scheduler/sched_main.c   | 15 +++++++--------
>>>    include/drm/gpu_scheduler.h              |  6 +++---
>>>    5 files changed, 16 insertions(+), 16 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> index 1a25931607c514..71a5cf37b472d4 100644
>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>> @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
>>>    	int i;
>>>    
>>>    	/* Signal all jobs not yet scheduled */
>>> -	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
>>> +	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
>>>    		struct drm_sched_rq *rq = sched->sched_rq[i];
>>>    		spin_lock(&rq->lock);
>>>    		list_for_each_entry(s_entity, &rq->entities, list) {
>>> diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
>>> index eb0c97433e5f8a..2bfcb222e35338 100644
>>> --- a/drivers/gpu/drm/msm/msm_gpu.h
>>> +++ b/drivers/gpu/drm/msm/msm_gpu.h
>>> @@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
>>>     * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
>>>     * cases, so we don't use it (no need for kernel generated jobs).
>>>     */
>>> -#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW)
>>> +#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
>>>    
>>>    /**
>>>     * struct msm_file_private - per-drm_file context
>>> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
>>> index cb7445be3cbb4e..6e2b02e45e3a32 100644
>>> --- a/drivers/gpu/drm/scheduler/sched_entity.c
>>> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
>>> @@ -81,14 +81,15 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>>>    		 */
>>>    		pr_warn("%s: called with uninitialized scheduler\n", __func__);
>>>    	} else if (num_sched_list) {
>>> -		/* The "priority" of an entity cannot exceed the number
>>> -		 * of run-queues of a scheduler.
>>> +		/* The "priority" of an entity cannot exceed the number of
>>> +		 * run-queues of a scheduler. Choose the lowest priority
>>> +		 * available.
>>>    		 */
>>>    		if (entity->priority >= sched_list[0]->num_rqs) {
>>>    			drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
>>>    				entity->priority, sched_list[0]->num_rqs);
>>>    			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
>>> -						 (s32) DRM_SCHED_PRIORITY_LOW);
>>> +						 (s32) DRM_SCHED_PRIORITY_KERNEL);
>> That seems to be a no-op. You basically say max_T(.., num_rqs - 1, 0),
>> this will always be num_rqs - 1
> This protects against num_rqs being equal to 0, in which case we select KERNEL (0).

Ah! That's also why convert it to signed! I was already wondering why 
you do this.

>
> This comes from "[PATCH] drm/sched: Fix bounds limiting when given a malformed entity"
> which I sent yesterday (Message-ID: <20231123122422.167832-2-ltuikov89@gmail.com>).

I can't find that one in my inbox anywhere, but was able to find it in 
patchwork.

> Could you R-B that patch too?

I would add a comment cause the intention of max_t(s32 is really not 
obvious here.

With that done feel free to add my rb to both patches.

Regards,
Christian.


>
>> Apart from that looks good to me.
> Okay, could you R-B this patch then.
Luben Tuikov Nov. 25, 2023, 4:22 a.m. UTC | #4
On 2023-11-24 04:38, Christian König wrote:
> Am 24.11.23 um 09:22 schrieb Luben Tuikov:
>> On 2023-11-24 03:04, Christian König wrote:
>>> Am 24.11.23 um 06:27 schrieb Luben Tuikov:
>>>> Reverse run-queue priority enumeration such that the higest priority is now 0,
>>>> and for each consecutive integer the prioirty diminishes.
>>>>
>>>> Run-queues correspond to priorities. To an external observer a scheduler
>>>> created with a single run-queue, and another created with
>>>> DRM_SCHED_PRIORITY_COUNT number of run-queues, should always schedule
>>>> sched->sched_rq[0] with the same "priority", as that index run-queue exists in
>>>> both schedulers, i.e. a scheduler with one run-queue or many. This patch makes
>>>> it so.
>>>>
>>>> In other words, the "priority" of sched->sched_rq[n], n >= 0, is the same for
>>>> any scheduler created with any allowable number of run-queues (priorities), 0
>>>> to DRM_SCHED_PRIORITY_COUNT.
>>>>
>>>> Cc: Rob Clark <robdclark@gmail.com>
>>>> Cc: Abhinav Kumar <quic_abhinavk@quicinc.com>
>>>> Cc: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
>>>> Cc: Danilo Krummrich <dakr@redhat.com>
>>>> Cc: Alex Deucher <alexander.deucher@amd.com>
>>>> Cc: Christian König <christian.koenig@amd.com>
>>>> Cc: linux-arm-msm@vger.kernel.org
>>>> Cc: freedreno@lists.freedesktop.org
>>>> Cc: dri-devel@lists.freedesktop.org
>>>> Signed-off-by: Luben Tuikov <ltuikov89@gmail.com>
>>>> ---
>>>>    drivers/gpu/drm/amd/amdgpu/amdgpu_job.c  |  2 +-
>>>>    drivers/gpu/drm/msm/msm_gpu.h            |  2 +-
>>>>    drivers/gpu/drm/scheduler/sched_entity.c |  7 ++++---
>>>>    drivers/gpu/drm/scheduler/sched_main.c   | 15 +++++++--------
>>>>    include/drm/gpu_scheduler.h              |  6 +++---
>>>>    5 files changed, 16 insertions(+), 16 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> index 1a25931607c514..71a5cf37b472d4 100644
>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>>>> @@ -325,7 +325,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
>>>>    	int i;
>>>>    
>>>>    	/* Signal all jobs not yet scheduled */
>>>> -	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
>>>> +	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
>>>>    		struct drm_sched_rq *rq = sched->sched_rq[i];
>>>>    		spin_lock(&rq->lock);
>>>>    		list_for_each_entry(s_entity, &rq->entities, list) {
>>>> diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
>>>> index eb0c97433e5f8a..2bfcb222e35338 100644
>>>> --- a/drivers/gpu/drm/msm/msm_gpu.h
>>>> +++ b/drivers/gpu/drm/msm/msm_gpu.h
>>>> @@ -347,7 +347,7 @@ struct msm_gpu_perfcntr {
>>>>     * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
>>>>     * cases, so we don't use it (no need for kernel generated jobs).
>>>>     */
>>>> -#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW)
>>>> +#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
>>>>    
>>>>    /**
>>>>     * struct msm_file_private - per-drm_file context
>>>> diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
>>>> index cb7445be3cbb4e..6e2b02e45e3a32 100644
>>>> --- a/drivers/gpu/drm/scheduler/sched_entity.c
>>>> +++ b/drivers/gpu/drm/scheduler/sched_entity.c
>>>> @@ -81,14 +81,15 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>>>>    		 */
>>>>    		pr_warn("%s: called with uninitialized scheduler\n", __func__);
>>>>    	} else if (num_sched_list) {
>>>> -		/* The "priority" of an entity cannot exceed the number
>>>> -		 * of run-queues of a scheduler.
>>>> +		/* The "priority" of an entity cannot exceed the number of
>>>> +		 * run-queues of a scheduler. Choose the lowest priority
>>>> +		 * available.
>>>>    		 */
>>>>    		if (entity->priority >= sched_list[0]->num_rqs) {
>>>>    			drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
>>>>    				entity->priority, sched_list[0]->num_rqs);
>>>>    			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
>>>> -						 (s32) DRM_SCHED_PRIORITY_LOW);
>>>> +						 (s32) DRM_SCHED_PRIORITY_KERNEL);
>>> That seems to be a no-op. You basically say max_T(.., num_rqs - 1, 0),
>>> this will always be num_rqs - 1
>> This protects against num_rqs being equal to 0, in which case we select KERNEL (0).
> 
> Ah! That's also why convert it to signed! I was already wondering why 
> you do this.

I thought it was clear since we're doing,

	num_rqs - 1

and there's no guarantee that the result would be non-negative even if the variable
num_rqs was non-negative. But I've added an explanation of what's going on here.

>>
>> This comes from "[PATCH] drm/sched: Fix bounds limiting when given a malformed entity"
>> which I sent yesterday (Message-ID: <20231123122422.167832-2-ltuikov89@gmail.com>).
> 
> I can't find that one in my inbox anywhere, but was able to find it in 
> patchwork.

Did you check your gmail?
 
>> Could you R-B that patch too?
> 
> I would add a comment cause the intention of max_t(s32 is really not 
> obvious here.
> 
> With that done feel free to add my rb to both patches.

Done.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 1a25931607c514..71a5cf37b472d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -325,7 +325,7 @@  void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
 	int i;
 
 	/* Signal all jobs not yet scheduled */
-	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
+	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
 		struct drm_sched_rq *rq = sched->sched_rq[i];
 		spin_lock(&rq->lock);
 		list_for_each_entry(s_entity, &rq->entities, list) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index eb0c97433e5f8a..2bfcb222e35338 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -347,7 +347,7 @@  struct msm_gpu_perfcntr {
  * DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
  * cases, so we don't use it (no need for kernel generated jobs).
  */
-#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_LOW)
+#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
 
 /**
  * struct msm_file_private - per-drm_file context
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index cb7445be3cbb4e..6e2b02e45e3a32 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -81,14 +81,15 @@  int drm_sched_entity_init(struct drm_sched_entity *entity,
 		 */
 		pr_warn("%s: called with uninitialized scheduler\n", __func__);
 	} else if (num_sched_list) {
-		/* The "priority" of an entity cannot exceed the number
-		 * of run-queues of a scheduler.
+		/* The "priority" of an entity cannot exceed the number of
+		 * run-queues of a scheduler. Choose the lowest priority
+		 * available.
 		 */
 		if (entity->priority >= sched_list[0]->num_rqs) {
 			drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
 				entity->priority, sched_list[0]->num_rqs);
 			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
-						 (s32) DRM_SCHED_PRIORITY_LOW);
+						 (s32) DRM_SCHED_PRIORITY_KERNEL);
 		}
 		entity->rq = sched_list[0]->sched_rq[entity->priority];
 	}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index b6d7bc49ff6ef4..682aebe96db781 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -1051,8 +1051,9 @@  drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 	struct drm_sched_entity *entity;
 	int i;
 
-	/* Kernel run queue has higher priority than normal run queue*/
-	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
+	/* Start with the highest priority.
+	 */
+	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
 		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
 			drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
 			drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
@@ -1291,7 +1292,7 @@  int drm_sched_init(struct drm_gpu_scheduler *sched,
 	if (!sched->sched_rq)
 		goto Out_free;
 	sched->num_rqs = num_rqs;
-	for (i = DRM_SCHED_PRIORITY_LOW; i < sched->num_rqs; i++) {
+	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
 		sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
 		if (!sched->sched_rq[i])
 			goto Out_unroll;
@@ -1312,7 +1313,7 @@  int drm_sched_init(struct drm_gpu_scheduler *sched,
 	sched->ready = true;
 	return 0;
 Out_unroll:
-	for (--i ; i >= DRM_SCHED_PRIORITY_LOW; i--)
+	for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
 		kfree(sched->sched_rq[i]);
 Out_free:
 	kfree(sched->sched_rq);
@@ -1338,7 +1339,7 @@  void drm_sched_fini(struct drm_gpu_scheduler *sched)
 
 	drm_sched_wqueue_stop(sched);
 
-	for (i = sched->num_rqs - 1; i >= DRM_SCHED_PRIORITY_LOW; i--) {
+	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
 		struct drm_sched_rq *rq = sched->sched_rq[i];
 
 		spin_lock(&rq->lock);
@@ -1390,9 +1391,7 @@  void drm_sched_increase_karma(struct drm_sched_job *bad)
 	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
 		atomic_inc(&bad->karma);
 
-		for (i = DRM_SCHED_PRIORITY_LOW;
-		     i < min_t(typeof(sched->num_rqs), sched->num_rqs, DRM_SCHED_PRIORITY_KERNEL);
-		     i++) {
+		for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
 			struct drm_sched_rq *rq = sched->sched_rq[i];
 
 			spin_lock(&rq->lock);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d8e2d84d9223e3..5acc64954a8830 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -63,10 +63,10 @@  struct drm_file;
  * to an array, and as such should start at 0.
  */
 enum drm_sched_priority {
-	DRM_SCHED_PRIORITY_LOW,
-	DRM_SCHED_PRIORITY_NORMAL,
-	DRM_SCHED_PRIORITY_HIGH,
 	DRM_SCHED_PRIORITY_KERNEL,
+	DRM_SCHED_PRIORITY_HIGH,
+	DRM_SCHED_PRIORITY_NORMAL,
+	DRM_SCHED_PRIORITY_LOW,
 
 	DRM_SCHED_PRIORITY_COUNT
 };