diff mbox series

[1/2] drm/sched: add drm_sched_start_timeout helper

Message ID 20181009111938.6872-1-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [1/2] drm/sched: add drm_sched_start_timeout helper | expand

Commit Message

Christian König Oct. 9, 2018, 11:19 a.m. UTC
Cleanup starting the timeout a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/scheduler/sched_main.c | 29 +++++++++++++++++------------
 1 file changed, 17 insertions(+), 12 deletions(-)

Comments

Nayan Deshmukh Oct. 9, 2018, 4:25 p.m. UTC | #1
On Tue, Oct 9, 2018 at 8:20 PM Christian König
<ckoenig.leichtzumerken@gmail.com> wrote:
>
> Cleanup starting the timeout a bit.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> ---
>  drivers/gpu/drm/scheduler/sched_main.c | 29 +++++++++++++++++------------
>  1 file changed, 17 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
> index 4e8505d51795..bd7d11c47202 100644
> --- a/drivers/gpu/drm/scheduler/sched_main.c
> +++ b/drivers/gpu/drm/scheduler/sched_main.c
> @@ -182,6 +182,20 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence,
>  }
>  EXPORT_SYMBOL(drm_sched_dependency_optimized);
>
> +/**
> + * drm_sched_start_timeout - start timeout for reset worker
> + *
> + * @sched: scheduler instance to start the worker for
> + *
> + * Start the timeout for the given scheduler.
> + */
> +static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
> +{
> +       if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> +           !list_empty(&sched->ring_mirror_list))
> +               schedule_delayed_work(&sched->work_tdr, sched->timeout);
> +}
> +
>  /* job_finish is called after hw fence signaled
>   */
>  static void drm_sched_job_finish(struct work_struct *work)
> @@ -203,9 +217,7 @@ static void drm_sched_job_finish(struct work_struct *work)
>         /* remove job from ring_mirror_list */
>         list_del(&s_job->node);
>         /* queue TDR for next job */
> -       if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> -           !list_empty(&sched->ring_mirror_list))
> -               schedule_delayed_work(&sched->work_tdr, sched->timeout);
> +       drm_sched_start_timeout(sched);
>         spin_unlock(&sched->job_list_lock);
>
>         dma_fence_put(&s_job->s_fence->finished);
> @@ -229,10 +241,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
>
>         spin_lock(&sched->job_list_lock);
>         list_add_tail(&s_job->node, &sched->ring_mirror_list);
> -       if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
> -           list_first_entry_or_null(&sched->ring_mirror_list,
> -                                    struct drm_sched_job, node) == s_job)
> -               schedule_delayed_work(&sched->work_tdr, sched->timeout);
> +       drm_sched_start_timeout(sched);
>         spin_unlock(&sched->job_list_lock);
>  }
>
> @@ -313,11 +322,6 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
>         int r;
>
>         spin_lock(&sched->job_list_lock);
> -       s_job = list_first_entry_or_null(&sched->ring_mirror_list,
> -                                        struct drm_sched_job, node);
> -       if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
> -               schedule_delayed_work(&sched->work_tdr, sched->timeout);
> -
>         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
>                 struct drm_sched_fence *s_fence = s_job->s_fence;
>                 struct dma_fence *fence;
> @@ -350,6 +354,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
>                 }
>                 spin_lock(&sched->job_list_lock);
>         }
> +       drm_sched_start_timeout(sched);
>         spin_unlock(&sched->job_list_lock);
>  }
>  EXPORT_SYMBOL(drm_sched_job_recovery);
> --
> 2.14.1
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox series

Patch

diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 4e8505d51795..bd7d11c47202 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -182,6 +182,20 @@  bool drm_sched_dependency_optimized(struct dma_fence* fence,
 }
 EXPORT_SYMBOL(drm_sched_dependency_optimized);
 
+/**
+ * drm_sched_start_timeout - start timeout for reset worker
+ *
+ * @sched: scheduler instance to start the worker for
+ *
+ * Start the timeout for the given scheduler.
+ */
+static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
+{
+	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+	    !list_empty(&sched->ring_mirror_list))
+		schedule_delayed_work(&sched->work_tdr, sched->timeout);
+}
+
 /* job_finish is called after hw fence signaled
  */
 static void drm_sched_job_finish(struct work_struct *work)
@@ -203,9 +217,7 @@  static void drm_sched_job_finish(struct work_struct *work)
 	/* remove job from ring_mirror_list */
 	list_del(&s_job->node);
 	/* queue TDR for next job */
-	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-	    !list_empty(&sched->ring_mirror_list))
-		schedule_delayed_work(&sched->work_tdr, sched->timeout);
+	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 
 	dma_fence_put(&s_job->s_fence->finished);
@@ -229,10 +241,7 @@  static void drm_sched_job_begin(struct drm_sched_job *s_job)
 
 	spin_lock(&sched->job_list_lock);
 	list_add_tail(&s_job->node, &sched->ring_mirror_list);
-	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
-	    list_first_entry_or_null(&sched->ring_mirror_list,
-				     struct drm_sched_job, node) == s_job)
-		schedule_delayed_work(&sched->work_tdr, sched->timeout);
+	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 }
 
@@ -313,11 +322,6 @@  void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 	int r;
 
 	spin_lock(&sched->job_list_lock);
-	s_job = list_first_entry_or_null(&sched->ring_mirror_list,
-					 struct drm_sched_job, node);
-	if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
-		schedule_delayed_work(&sched->work_tdr, sched->timeout);
-
 	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
 		struct drm_sched_fence *s_fence = s_job->s_fence;
 		struct dma_fence *fence;
@@ -350,6 +354,7 @@  void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
 		}
 		spin_lock(&sched->job_list_lock);
 	}
+	drm_sched_start_timeout(sched);
 	spin_unlock(&sched->job_list_lock);
 }
 EXPORT_SYMBOL(drm_sched_job_recovery);