diff mbox series

[v5,6/7] drm/sched: Add drm_sched_start_timeout_unlocked helper

Message ID 20231011235826.585624-7-matthew.brost@intel.com (mailing list archive)
State New, archived
Headers show
Series DRM scheduler changes for Xe | expand

Commit Message

Matthew Brost Oct. 11, 2023, 11:58 p.m. UTC
Also add a lockdep assert to drm_sched_start_timeout.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

Comments

Luben Tuikov Oct. 14, 2023, 2:52 a.m. UTC | #1
On 2023-10-11 19:58, Matthew Brost wrote:
> Also add a lockdep assert to drm_sched_start_timeout.
> 
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>

I don't remember sending a Reviewed-by email to this patch.

I'll add the R-V to the commit when I apply and push this patch,
after replying with a R-V email.

Regards,
Luben

> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 23 +++++++++++++----------
>  1 file changed, 13 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index cf4c23db7547..c4d5c3d265a8 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -427,11 +427,20 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
>   */
>  static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
>  {
> +	lockdep_assert_held(&sched->job_list_lock);
> +
>  	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
>  	    !list_empty(&sched->pending_list))
>  		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
>  }
>  
> +static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
> +{
> +	spin_lock(&sched->job_list_lock);
> +	drm_sched_start_timeout(sched);
> +	spin_unlock(&sched->job_list_lock);
> +}
> +
>  /**
>   * drm_sched_fault - immediately start timeout handler
>   *
> @@ -544,11 +553,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
>  		spin_unlock(&sched->job_list_lock);
>  	}
>  
> -	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
> -		spin_lock(&sched->job_list_lock);
> -		drm_sched_start_timeout(sched);
> -		spin_unlock(&sched->job_list_lock);
> -	}
> +	if (status != DRM_GPU_SCHED_STAT_ENODEV)
> +		drm_sched_start_timeout_unlocked(sched);
>  }
>  
>  /**
> @@ -674,11 +680,8 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
>  			drm_sched_job_done(s_job, -ECANCELED);
>  	}
>  
> -	if (full_recovery) {
> -		spin_lock(&sched->job_list_lock);
> -		drm_sched_start_timeout(sched);
> -		spin_unlock(&sched->job_list_lock);
> -	}
> +	if (full_recovery)
> +		drm_sched_start_timeout_unlocked(sched);
>  
>  	drm_sched_wqueue_start(sched);
>  }
Matthew Brost Oct. 16, 2023, 2:57 p.m. UTC | #2
On Fri, Oct 13, 2023 at 10:52:22PM -0400, Luben Tuikov wrote:
> On 2023-10-11 19:58, Matthew Brost wrote:
> > Also add a lockdep assert to drm_sched_start_timeout.
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
> 
> I don't remember sending a Reviewed-by email to this patch.
> 

I believe you did send a RB:
https://patchwork.freedesktop.org/patch/558222/?series=121745&rev=6

> I'll add the R-V to the commit when I apply and push this patch,
> after replying with a R-V email.
>

Is manually adding a RB ok if it is in the correct place like it is in this patch?

Matt

> Regards,
> Luben
> 
> > ---
> >  drivers/gpu/drm/scheduler/sched_main.c | 23 +++++++++++++----------
> >  1 file changed, 13 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> > index cf4c23db7547..c4d5c3d265a8 100644
> > --- a/drivers/gpu/drm/scheduler/sched_main.c
> > +++ b/drivers/gpu/drm/scheduler/sched_main.c
> > @@ -427,11 +427,20 @@ static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
> >   */
> >  static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
> >  {
> > +	lockdep_assert_held(&sched->job_list_lock);
> > +
> >  	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> >  	    !list_empty(&sched->pending_list))
> >  		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
> >  }
> >  
> > +static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
> > +{
> > +	spin_lock(&sched->job_list_lock);
> > +	drm_sched_start_timeout(sched);
> > +	spin_unlock(&sched->job_list_lock);
> > +}
> > +
> >  /**
> >   * drm_sched_fault - immediately start timeout handler
> >   *
> > @@ -544,11 +553,8 @@ static void drm_sched_job_timedout(struct work_struct *work)
> >  		spin_unlock(&sched->job_list_lock);
> >  	}
> >  
> > -	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
> > -		spin_lock(&sched->job_list_lock);
> > -		drm_sched_start_timeout(sched);
> > -		spin_unlock(&sched->job_list_lock);
> > -	}
> > +	if (status != DRM_GPU_SCHED_STAT_ENODEV)
> > +		drm_sched_start_timeout_unlocked(sched);
> >  }
> >  
> >  /**
> > @@ -674,11 +680,8 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
> >  			drm_sched_job_done(s_job, -ECANCELED);
> >  	}
> >  
> > -	if (full_recovery) {
> > -		spin_lock(&sched->job_list_lock);
> > -		drm_sched_start_timeout(sched);
> > -		spin_unlock(&sched->job_list_lock);
> > -	}
> > +	if (full_recovery)
> > +		drm_sched_start_timeout_unlocked(sched);
> >  
> >  	drm_sched_wqueue_start(sched);
> >  }
>
Luben Tuikov Oct. 16, 2023, 3:15 p.m. UTC | #3
On 2023-10-16 10:57, Matthew Brost wrote:
> On Fri, Oct 13, 2023 at 10:52:22PM -0400, Luben Tuikov wrote:
>> On 2023-10-11 19:58, Matthew Brost wrote:
>>> Also add a lockdep assert to drm_sched_start_timeout.
>>>
>>> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
>>> Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
>>
>> I don't remember sending a Reviewed-by email to this patch.
>>
> 
> I believe you did send a RB:
> https://patchwork.freedesktop.org/patch/558222/?series=121745&rev=6
> 
>> I'll add the R-V to the commit when I apply and push this patch,
>> after replying with a R-V email.
>>
> 
> Is manually adding a RB ok if it is in the correct place like it is in this patch?

If you've received an R-B email and you're reposting the patch, you should
append the R-B tag at the bottom of the other tags, like a tool would do it.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index cf4c23db7547..c4d5c3d265a8 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -427,11 +427,20 @@  static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
  */
 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
 {
+	lockdep_assert_held(&sched->job_list_lock);
+
 	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
 	    !list_empty(&sched->pending_list))
 		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
 }
 
+static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
+{
+	spin_lock(&sched->job_list_lock);
+	drm_sched_start_timeout(sched);
+	spin_unlock(&sched->job_list_lock);
+}
+
 /**
  * drm_sched_fault - immediately start timeout handler
  *
@@ -544,11 +553,8 @@  static void drm_sched_job_timedout(struct work_struct *work)
 		spin_unlock(&sched->job_list_lock);
 	}
 
-	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
-		spin_lock(&sched->job_list_lock);
-		drm_sched_start_timeout(sched);
-		spin_unlock(&sched->job_list_lock);
-	}
+	if (status != DRM_GPU_SCHED_STAT_ENODEV)
+		drm_sched_start_timeout_unlocked(sched);
 }
 
 /**
@@ -674,11 +680,8 @@  void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 			drm_sched_job_done(s_job, -ECANCELED);
 	}
 
-	if (full_recovery) {
-		spin_lock(&sched->job_list_lock);
-		drm_sched_start_timeout(sched);
-		spin_unlock(&sched->job_list_lock);
-	}
+	if (full_recovery)
+		drm_sched_start_timeout_unlocked(sched);
 
 	drm_sched_wqueue_start(sched);
 }