diff mbox series

[RFC] drm/scheduler: Add drm_sched_job_cleanup

Message ID 1540388423-30236-1-git-send-email-smasetty@codeaurora.org (mailing list archive)
State Not Applicable, archived
Headers show
Series [RFC] drm/scheduler: Add drm_sched_job_cleanup | expand

Commit Message

Sharat Masetty Oct. 24, 2018, 1:40 p.m. UTC
Hi,

Can you please review this and share your thoughts on this approach. We
primarily need a clean way to release the finished fence in case the job
was not pushed to the queue. So, this patch adds the new API to clean up
the resources allocated in sched_job_init()

Additionally I also move the fence_put(finished_fence) from the
scheduler to the drivers handler of free_job(). The drivers get to use
this new API. This is done so that the layer creating the sched object is
the one freeing up the resources as well.

Signed-off-by: Sharat Masetty <smasetty@codeaurora.org>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 ++
 drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  2 ++
 drivers/gpu/drm/msm/msm_sched.c           |  8 +++-----
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 11 +++++++++--
 drivers/gpu/drm/v3d/v3d_sched.c           |  2 ++
 include/drm/gpu_scheduler.h               |  1 +
 7 files changed, 20 insertions(+), 8 deletions(-)

Comments

Jordan Crouse Oct. 24, 2018, 2:55 p.m. UTC | #1
On Wed, Oct 24, 2018 at 07:10:23PM +0530, Sharat Masetty wrote:
> Hi,
> 
> Can you please review this and share your thoughts on this approach. We
> primarily need a clean way to release the finished fence in case the job
> was not pushed to the queue. So, this patch adds the new API to clean up
> the resources allocated in sched_job_init()
> 
> Additionally I also move the fence_put(finished_fence) from the
> scheduler to the drivers handler of free_job(). The drivers get to use
> this new API. This is done so that the layer creating the sched object is
> the one freeing up the resources as well.
> 
> Signed-off-by: Sharat Masetty <smasetty@codeaurora.org>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
>  drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 ++
>  drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  2 ++
>  drivers/gpu/drm/msm/msm_sched.c           |  8 +++-----

Just a nit - msm_sched.c isn't a thing yet. Can you re-generate a patch against
drm-next so that if folks like this they can get it merged sooner?

Jordan
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 11 +++++++++--
>  drivers/gpu/drm/v3d/v3d_sched.c           |  2 ++
>  include/drm/gpu_scheduler.h               |  1 +
>  7 files changed, 20 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> index 9c85a90..9a83152 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
> @@ -1198,7 +1198,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>  	r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
>  	if (r) {
>  		dma_fence_put(p->fence);
> -		dma_fence_put(&job->base.s_fence->finished);
> +		drm_sched_job_cleanup(&job->base);
>  		amdgpu_job_free(job);
>  		amdgpu_mn_unlock(p->mn);
>  		return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> index 2bd5676..5d252d4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
> @@ -100,6 +100,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
>  {
>  	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
>  
> +	drm_sched_job_cleanup(s_job);
> +
>  	amdgpu_ring_priority_put(job->ring, s_job->s_priority);
>  	dma_fence_put(job->fence);
>  	amdgpu_sync_free(&job->sync);
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> index 50d6b88..30398c5 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
> @@ -127,6 +127,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
>  {
>  	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
>  
> +	drm_sched_job_cleanup(sched_job);
> +
>  	etnaviv_submit_put(submit);
>  }
>  
> diff --git a/drivers/gpu/drm/msm/msm_sched.c b/drivers/gpu/drm/msm/msm_sched.c
> index 70b7713..748acc6 100644
> --- a/drivers/gpu/drm/msm/msm_sched.c
> +++ b/drivers/gpu/drm/msm/msm_sched.c
> @@ -235,6 +235,8 @@ static void msm_sched_free_job(struct drm_sched_job *sched_job)
>  	struct msm_gpu *gpu = submit->gpu;
>  	int i;
>  
> +	drm_sched_job_cleanup(sched_job);
> +
>  	mutex_lock(&gpu->dev->struct_mutex);
>  
>  	for (i = 0; i < submit->nr_bos; i++) {
> @@ -300,11 +302,7 @@ int msm_sched_job_init(struct drm_sched_job *sched_job)
>  	mutex_unlock(&ring->fence_idr_lock);
>  
>  	if (submit->out_fence_id < 0) {
> -		/*
> -		 * TODO: The scheduler's finished fence leaks here since the
> -		 * job will not be pushed to the queue. Need to update scheduler
> -		 * to fix this cleanly(?)
> -		 */
> +		drm_sched_job_cleanup(sched_job);
>  		dma_fence_put(submit->out_fence);
>  		submit->out_fence = NULL;
>  		return -ENOMEM;
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index f5534ff..bbf9315 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -214,7 +214,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>  						 finish_cb);
>  	drm_sched_fence_finished(job->s_fence);
>  	WARN_ON(job->s_fence->parent);
> -	dma_fence_put(&job->s_fence->finished);
>  	job->sched->ops->free_job(job);
>  }
>  
> @@ -480,7 +479,6 @@ static void drm_sched_job_finish(struct work_struct *work)
>  			drm_sched_queue_delayed_work(next);
>  	}
>  	spin_unlock(&sched->job_list_lock);
> -	dma_fence_put(&s_job->s_fence->finished);
>  	sched->ops->free_job(s_job);
>  }
>  
> @@ -636,6 +634,15 @@ int drm_sched_job_init(struct drm_sched_job *job,
>  EXPORT_SYMBOL(drm_sched_job_init);
>  
>  /**
> + * Cleanup sched_job resources
> + */
> +void drm_sched_job_cleanup(struct drm_sched_job *job)
> +{
> +	dma_fence_put(&job->s_fence->finished);
> +}
> +EXPORT_SYMBOL(drm_sched_job_cleanup);
> +
> +/**
>   * Return ture if we can push more jobs to the hw.
>   */
>  static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
> index b07bece..4abd73c 100644
> --- a/drivers/gpu/drm/v3d/v3d_sched.c
> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
> @@ -35,6 +35,8 @@
>  {
>  	struct v3d_job *job = to_v3d_job(sched_job);
>  
> +	drm_sched_job_cleanup(sched_job);
> +
>  	v3d_exec_put(job->exec);
>  }
>  
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 5c59c38..2ec3ddf 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -213,6 +213,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
>  		       struct drm_gpu_scheduler *sched,
>  		       struct drm_sched_entity *entity,
>  		       void *owner);
> +void drm_sched_job_cleanup(struct drm_sched_job *job);
>  void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
>  			    struct drm_sched_job *job);
>  void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
> -- 
> 1.9.1
>
Christian König Oct. 25, 2018, 1:35 p.m. UTC | #2
Am 24.10.18 um 16:55 schrieb Jordan Crouse:
> On Wed, Oct 24, 2018 at 07:10:23PM +0530, Sharat Masetty wrote:
>> Hi,
>>
>> Can you please review this and share your thoughts on this approach. We
>> primarily need a clean way to release the finished fence in case the job
>> was not pushed to the queue. So, this patch adds the new API to clean up
>> the resources allocated in sched_job_init()
>>
>> Additionally I also move the fence_put(finished_fence) from the
>> scheduler to the drivers handler of free_job(). The drivers get to use
>> this new API. This is done so that the layer creating the sched object is
>> the one freeing up the resources as well.
>>
>> Signed-off-by: Sharat Masetty <smasetty@codeaurora.org>
>> ---
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c    |  2 +-
>>   drivers/gpu/drm/amd/amdgpu/amdgpu_job.c   |  2 ++
>>   drivers/gpu/drm/etnaviv/etnaviv_sched.c   |  2 ++
>>   drivers/gpu/drm/msm/msm_sched.c           |  8 +++-----
> Just a nit - msm_sched.c isn't a thing yet. Can you re-generate a patch against
> drm-next so that if folks like this they can get it merged sooner?

Not a bad idea, but first of all as Jordan commented please rebase this 
on drm-next maybe even Alex amd-staging-drm-next branch.

Additional to that you missed one more occurrence where this needs to be 
applied in amdgpu_cs_submit().

Regards,
Christian.

>
> Jordan
>>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 11 +++++++++--
>>   drivers/gpu/drm/v3d/v3d_sched.c           |  2 ++
>>   include/drm/gpu_scheduler.h               |  1 +
>>   7 files changed, 20 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> index 9c85a90..9a83152 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
>> @@ -1198,7 +1198,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
>>   	r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
>>   	if (r) {
>>   		dma_fence_put(p->fence);
>> -		dma_fence_put(&job->base.s_fence->finished);
>> +		drm_sched_job_cleanup(&job->base);
>>   		amdgpu_job_free(job);
>>   		amdgpu_mn_unlock(p->mn);
>>   		return r;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> index 2bd5676..5d252d4 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
>> @@ -100,6 +100,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
>>   {
>>   	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
>>   
>> +	drm_sched_job_cleanup(s_job);
>> +
>>   	amdgpu_ring_priority_put(job->ring, s_job->s_priority);
>>   	dma_fence_put(job->fence);
>>   	amdgpu_sync_free(&job->sync);
>> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>> index 50d6b88..30398c5 100644
>> --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>> +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
>> @@ -127,6 +127,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
>>   {
>>   	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
>>   
>> +	drm_sched_job_cleanup(sched_job);
>> +
>>   	etnaviv_submit_put(submit);
>>   }
>>   
>> diff --git a/drivers/gpu/drm/msm/msm_sched.c b/drivers/gpu/drm/msm/msm_sched.c
>> index 70b7713..748acc6 100644
>> --- a/drivers/gpu/drm/msm/msm_sched.c
>> +++ b/drivers/gpu/drm/msm/msm_sched.c
>> @@ -235,6 +235,8 @@ static void msm_sched_free_job(struct drm_sched_job *sched_job)
>>   	struct msm_gpu *gpu = submit->gpu;
>>   	int i;
>>   
>> +	drm_sched_job_cleanup(sched_job);
>> +
>>   	mutex_lock(&gpu->dev->struct_mutex);
>>   
>>   	for (i = 0; i < submit->nr_bos; i++) {
>> @@ -300,11 +302,7 @@ int msm_sched_job_init(struct drm_sched_job *sched_job)
>>   	mutex_unlock(&ring->fence_idr_lock);
>>   
>>   	if (submit->out_fence_id < 0) {
>> -		/*
>> -		 * TODO: The scheduler's finished fence leaks here since the
>> -		 * job will not be pushed to the queue. Need to update scheduler
>> -		 * to fix this cleanly(?)
>> -		 */
>> +		drm_sched_job_cleanup(sched_job);
>>   		dma_fence_put(submit->out_fence);
>>   		submit->out_fence = NULL;
>>   		return -ENOMEM;
>> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> index f5534ff..bbf9315 100644
>> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> @@ -214,7 +214,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
>>   						 finish_cb);
>>   	drm_sched_fence_finished(job->s_fence);
>>   	WARN_ON(job->s_fence->parent);
>> -	dma_fence_put(&job->s_fence->finished);
>>   	job->sched->ops->free_job(job);
>>   }
>>   
>> @@ -480,7 +479,6 @@ static void drm_sched_job_finish(struct work_struct *work)
>>   			drm_sched_queue_delayed_work(next);
>>   	}
>>   	spin_unlock(&sched->job_list_lock);
>> -	dma_fence_put(&s_job->s_fence->finished);
>>   	sched->ops->free_job(s_job);
>>   }
>>   
>> @@ -636,6 +634,15 @@ int drm_sched_job_init(struct drm_sched_job *job,
>>   EXPORT_SYMBOL(drm_sched_job_init);
>>   
>>   /**
>> + * Cleanup sched_job resources
>> + */
>> +void drm_sched_job_cleanup(struct drm_sched_job *job)
>> +{
>> +	dma_fence_put(&job->s_fence->finished);
>> +}
>> +EXPORT_SYMBOL(drm_sched_job_cleanup);
>> +
>> +/**
>>    * Return ture if we can push more jobs to the hw.
>>    */
>>   static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
>> diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
>> index b07bece..4abd73c 100644
>> --- a/drivers/gpu/drm/v3d/v3d_sched.c
>> +++ b/drivers/gpu/drm/v3d/v3d_sched.c
>> @@ -35,6 +35,8 @@
>>   {
>>   	struct v3d_job *job = to_v3d_job(sched_job);
>>   
>> +	drm_sched_job_cleanup(sched_job);
>> +
>>   	v3d_exec_put(job->exec);
>>   }
>>   
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index 5c59c38..2ec3ddf 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -213,6 +213,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
>>   		       struct drm_gpu_scheduler *sched,
>>   		       struct drm_sched_entity *entity,
>>   		       void *owner);
>> +void drm_sched_job_cleanup(struct drm_sched_job *job);
>>   void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
>>   			    struct drm_sched_job *job);
>>   void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
>> -- 
>> 1.9.1
>>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9c85a90..9a83152 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1198,7 +1198,7 @@  static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 	r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
 	if (r) {
 		dma_fence_put(p->fence);
-		dma_fence_put(&job->base.s_fence->finished);
+		drm_sched_job_cleanup(&job->base);
 		amdgpu_job_free(job);
 		amdgpu_mn_unlock(p->mn);
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 2bd5676..5d252d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -100,6 +100,8 @@  static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 {
 	struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
 
+	drm_sched_job_cleanup(s_job);
+
 	amdgpu_ring_priority_put(job->ring, s_job->s_priority);
 	dma_fence_put(job->fence);
 	amdgpu_sync_free(&job->sync);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 50d6b88..30398c5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -127,6 +127,8 @@  static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
 {
 	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
 
+	drm_sched_job_cleanup(sched_job);
+
 	etnaviv_submit_put(submit);
 }
 
diff --git a/drivers/gpu/drm/msm/msm_sched.c b/drivers/gpu/drm/msm/msm_sched.c
index 70b7713..748acc6 100644
--- a/drivers/gpu/drm/msm/msm_sched.c
+++ b/drivers/gpu/drm/msm/msm_sched.c
@@ -235,6 +235,8 @@  static void msm_sched_free_job(struct drm_sched_job *sched_job)
 	struct msm_gpu *gpu = submit->gpu;
 	int i;
 
+	drm_sched_job_cleanup(sched_job);
+
 	mutex_lock(&gpu->dev->struct_mutex);
 
 	for (i = 0; i < submit->nr_bos; i++) {
@@ -300,11 +302,7 @@  int msm_sched_job_init(struct drm_sched_job *sched_job)
 	mutex_unlock(&ring->fence_idr_lock);
 
 	if (submit->out_fence_id < 0) {
-		/*
-		 * TODO: The scheduler's finished fence leaks here since the
-		 * job will not be pushed to the queue. Need to update scheduler
-		 * to fix this cleanly(?)
-		 */
+		drm_sched_job_cleanup(sched_job);
 		dma_fence_put(submit->out_fence);
 		submit->out_fence = NULL;
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index f5534ff..bbf9315 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -214,7 +214,6 @@  static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
 						 finish_cb);
 	drm_sched_fence_finished(job->s_fence);
 	WARN_ON(job->s_fence->parent);
-	dma_fence_put(&job->s_fence->finished);
 	job->sched->ops->free_job(job);
 }
 
@@ -480,7 +479,6 @@  static void drm_sched_job_finish(struct work_struct *work)
 			drm_sched_queue_delayed_work(next);
 	}
 	spin_unlock(&sched->job_list_lock);
-	dma_fence_put(&s_job->s_fence->finished);
 	sched->ops->free_job(s_job);
 }
 
@@ -636,6 +634,15 @@  int drm_sched_job_init(struct drm_sched_job *job,
 EXPORT_SYMBOL(drm_sched_job_init);
 
 /**
+ * Cleanup sched_job resources
+ */
+void drm_sched_job_cleanup(struct drm_sched_job *job)
+{
+	dma_fence_put(&job->s_fence->finished);
+}
+EXPORT_SYMBOL(drm_sched_job_cleanup);
+
+/**
  * Return ture if we can push more jobs to the hw.
  */
 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index b07bece..4abd73c 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -35,6 +35,8 @@ 
 {
 	struct v3d_job *job = to_v3d_job(sched_job);
 
+	drm_sched_job_cleanup(sched_job);
+
 	v3d_exec_put(job->exec);
 }
 
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 5c59c38..2ec3ddf 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -213,6 +213,7 @@  int drm_sched_job_init(struct drm_sched_job *job,
 		       struct drm_gpu_scheduler *sched,
 		       struct drm_sched_entity *entity,
 		       void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
 void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
 			    struct drm_sched_job *job);
 void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);