diff mbox

[v2] drm/scheduler: Remove obsolete spinlock.

Message ID 1526484793-14368-1-git-send-email-andrey.grodzovsky@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrey Grodzovsky May 16, 2018, 3:33 p.m. UTC
This spinlock is superfluous, any call to drm_sched_entity_push_job
should already be under a lock together with matching drm_sched_job_init
to match the order of insertion into queue with job's fence seqence
number.

v2:
Improve patch description.
Add functions documentation describing the locking considerations

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 15 ++++++++++-----
 include/drm/gpu_scheduler.h               |  1 -
 2 files changed, 10 insertions(+), 6 deletions(-)

Comments

Chunming Zhou May 17, 2018, 2:32 a.m. UTC | #1
Yes, every thing is clear, Acked-by: Chunming Zhou <david1.zhou@amd.com>


On 2018年05月16日 23:33, Andrey Grodzovsky wrote:
> This spinlock is superfluous, any call to drm_sched_entity_push_job
> should already be under a lock together with matching drm_sched_job_init
> to match the order of insertion into queue with job's fence seqence
> number.
>
> v2:
> Improve patch description.
> Add functions documentation describing the locking considerations
>
> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
> ---
>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 15 ++++++++++-----
>   include/drm/gpu_scheduler.h               |  1 -
>   2 files changed, 10 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 1f1dd70..80dd66c 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -140,7 +140,6 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
>   	entity->last_scheduled = NULL;
>   
>   	spin_lock_init(&entity->rq_lock);
> -	spin_lock_init(&entity->queue_lock);
>   	spsc_queue_init(&entity->job_queue);
>   
>   	atomic_set(&entity->fence_seq, 0);
> @@ -414,6 +413,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity *entity)
>    *
>    * @sched_job		The pointer to job required to submit
>    *
> + * Note: To guarantee that the order of insertion to queue matches
> + * the job's fence sequence number this function should be
> + * called with drm_sched_job_init under common lock.
> + *
>    * Returns 0 for success, negative error code otherwise.
>    */
>   void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
> @@ -424,11 +427,8 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
>   
>   	trace_drm_sched_job(sched_job, entity);
>   
> -	spin_lock(&entity->queue_lock);
>   	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
>   
> -	spin_unlock(&entity->queue_lock);
> -
>   	/* first job wakes up scheduler */
>   	if (first) {
>   		/* Add the entity to the run queue */
> @@ -594,7 +594,12 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
>   }
>   EXPORT_SYMBOL(drm_sched_job_recovery);
>   
> -/* init a sched_job with basic field */
> +/**
> + * Init a sched_job with basic field
> + *
> + * Note: Refer to drm_sched_entity_push_job documentation
> + * for locking considerations.
> + */
>   int drm_sched_job_init(struct drm_sched_job *job,
>   		       struct drm_gpu_scheduler *sched,
>   		       struct drm_sched_entity *entity,
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 350a62c..683eb65 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -56,7 +56,6 @@ struct drm_sched_entity {
>   	spinlock_t			rq_lock;
>   	struct drm_gpu_scheduler	*sched;
>   
> -	spinlock_t			queue_lock;
>   	struct spsc_queue		job_queue;
>   
>   	atomic_t			fence_seq;
Christian König May 17, 2018, 8:34 a.m. UTC | #2
Reviewed-by: Christian König <christian.koenig@amd.com>

Am 17.05.2018 um 04:32 schrieb zhoucm1:
> Yes, every thing is clear, Acked-by: Chunming Zhou <david1.zhou@amd.com>
>
>
> On 2018年05月16日 23:33, Andrey Grodzovsky wrote:
>> This spinlock is superfluous, any call to drm_sched_entity_push_job
>> should already be under a lock together with matching drm_sched_job_init
>> to match the order of insertion into queue with job's fence seqence
>> number.
>>
>> v2:
>> Improve patch description.
>> Add functions documentation describing the locking considerations
>>
>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
>> ---
>>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 15 ++++++++++-----
>>   include/drm/gpu_scheduler.h               |  1 -
>>   2 files changed, 10 insertions(+), 6 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c 
>> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> index 1f1dd70..80dd66c 100644
>> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> @@ -140,7 +140,6 @@ int drm_sched_entity_init(struct 
>> drm_gpu_scheduler *sched,
>>       entity->last_scheduled = NULL;
>>         spin_lock_init(&entity->rq_lock);
>> -    spin_lock_init(&entity->queue_lock);
>>       spsc_queue_init(&entity->job_queue);
>>         atomic_set(&entity->fence_seq, 0);
>> @@ -414,6 +413,10 @@ drm_sched_entity_pop_job(struct drm_sched_entity 
>> *entity)
>>    *
>>    * @sched_job        The pointer to job required to submit
>>    *
>> + * Note: To guarantee that the order of insertion to queue matches
>> + * the job's fence sequence number this function should be
>> + * called with drm_sched_job_init under common lock.
>> + *
>>    * Returns 0 for success, negative error code otherwise.
>>    */
>>   void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
>> @@ -424,11 +427,8 @@ void drm_sched_entity_push_job(struct 
>> drm_sched_job *sched_job,
>>         trace_drm_sched_job(sched_job, entity);
>>   -    spin_lock(&entity->queue_lock);
>>       first = spsc_queue_push(&entity->job_queue, 
>> &sched_job->queue_node);
>>   -    spin_unlock(&entity->queue_lock);
>> -
>>       /* first job wakes up scheduler */
>>       if (first) {
>>           /* Add the entity to the run queue */
>> @@ -594,7 +594,12 @@ void drm_sched_job_recovery(struct 
>> drm_gpu_scheduler *sched)
>>   }
>>   EXPORT_SYMBOL(drm_sched_job_recovery);
>>   -/* init a sched_job with basic field */
>> +/**
>> + * Init a sched_job with basic field
>> + *
>> + * Note: Refer to drm_sched_entity_push_job documentation
>> + * for locking considerations.
>> + */
>>   int drm_sched_job_init(struct drm_sched_job *job,
>>                  struct drm_gpu_scheduler *sched,
>>                  struct drm_sched_entity *entity,
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index 350a62c..683eb65 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -56,7 +56,6 @@ struct drm_sched_entity {
>>       spinlock_t            rq_lock;
>>       struct drm_gpu_scheduler    *sched;
>>   -    spinlock_t            queue_lock;
>>       struct spsc_queue        job_queue;
>>         atomic_t            fence_seq;
>
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
diff mbox

Patch

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 1f1dd70..80dd66c 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -140,7 +140,6 @@  int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
 	entity->last_scheduled = NULL;
 
 	spin_lock_init(&entity->rq_lock);
-	spin_lock_init(&entity->queue_lock);
 	spsc_queue_init(&entity->job_queue);
 
 	atomic_set(&entity->fence_seq, 0);
@@ -414,6 +413,10 @@  drm_sched_entity_pop_job(struct drm_sched_entity *entity)
  *
  * @sched_job		The pointer to job required to submit
  *
+ * Note: To guarantee that the order of insertion to queue matches
+ * the job's fence sequence number this function should be
+ * called with drm_sched_job_init under common lock.
+ *
  * Returns 0 for success, negative error code otherwise.
  */
 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
@@ -424,11 +427,8 @@  void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 
 	trace_drm_sched_job(sched_job, entity);
 
-	spin_lock(&entity->queue_lock);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
-	spin_unlock(&entity->queue_lock);
-
 	/* first job wakes up scheduler */
 	if (first) {
 		/* Add the entity to the run queue */
@@ -594,7 +594,12 @@  void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 }
 EXPORT_SYMBOL(drm_sched_job_recovery);
 
-/* init a sched_job with basic field */
+/**
+ * Init a sched_job with basic field
+ *
+ * Note: Refer to drm_sched_entity_push_job documentation
+ * for locking considerations.
+ */
 int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_gpu_scheduler *sched,
 		       struct drm_sched_entity *entity,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 350a62c..683eb65 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -56,7 +56,6 @@  struct drm_sched_entity {
 	spinlock_t			rq_lock;
 	struct drm_gpu_scheduler	*sched;
 
-	spinlock_t			queue_lock;
 	struct spsc_queue		job_queue;
 
 	atomic_t			fence_seq;