diff mbox series

[2/4] drm/scheduler: add counter for total jobs in scheduler

Message ID 20180801082002.20696-2-nayan26deshmukh@gmail.com (mailing list archive)
State New, archived
Headers show
Series [1/4] drm/scheduler: add a list of run queues to the entity | expand

Commit Message

Nayan Deshmukh Aug. 1, 2018, 8:20 a.m. UTC
Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
 include/drm/gpu_scheduler.h               | 2 ++
 2 files changed, 5 insertions(+)

Comments

Christian König Aug. 1, 2018, 1:06 p.m. UTC | #1
Yeah, I've actually added one before pushing it to amd-staging-drm-next.

But thanks for the reminder, wanted to note that to Nayan as well :)

Christian.

Am 01.08.2018 um 15:15 schrieb Huang Rui:
> On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
>
> This should need a commmit message.
>
> Thanks,
> Ray
>
>> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
>> ---
>>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
>>   include/drm/gpu_scheduler.h               | 2 ++
>>   2 files changed, 5 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> index a3eacc35cf98..375f6f7f6a93 100644
>> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>> @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
>>   
>>   	trace_drm_sched_job(sched_job, entity);
>>   
>> +	atomic_inc(&entity->rq->sched->num_jobs);
>>   	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
>>   
>>   	/* first job wakes up scheduler */
>> @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
>>   
>>   	dma_fence_get(&s_fence->finished);
>>   	atomic_dec(&sched->hw_rq_count);
>> +	atomic_dec(&sched->num_jobs);
>>   	drm_sched_fence_finished(s_fence);
>>   
>>   	trace_drm_sched_process_job(s_fence);
>> @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>>   	INIT_LIST_HEAD(&sched->ring_mirror_list);
>>   	spin_lock_init(&sched->job_list_lock);
>>   	atomic_set(&sched->hw_rq_count, 0);
>> +	atomic_set(&sched->num_jobs, 0);
>>   	atomic64_set(&sched->job_id_count, 0);
>>   
>>   	/* Each scheduler will run on a seperate kernel thread */
>> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
>> index a60896222a3e..89881ce974a5 100644
>> --- a/include/drm/gpu_scheduler.h
>> +++ b/include/drm/gpu_scheduler.h
>> @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
>>    * @job_list_lock: lock to protect the ring_mirror_list.
>>    * @hang_limit: once the hangs by a job crosses this limit then it is marked
>>    *              guilty and it will be considered for scheduling further.
>> + * @num_jobs: the number of jobs in queue in the scheduler
>>    *
>>    * One scheduler is implemented for each hardware ring.
>>    */
>> @@ -277,6 +278,7 @@ struct drm_gpu_scheduler {
>>   	struct list_head		ring_mirror_list;
>>   	spinlock_t			job_list_lock;
>>   	int				hang_limit;
>> +	atomic_t                        num_jobs;
>>   };
>>   
>>   int drm_sched_init(struct drm_gpu_scheduler *sched,
>> -- 
>> 2.14.3
>>
>> _______________________________________________
>> amd-gfx mailing list
>> amd-gfx@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
Huang Rui Aug. 1, 2018, 1:15 p.m. UTC | #2
On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:

This should need a commmit message.

Thanks,
Ray

> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> ---
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
>  include/drm/gpu_scheduler.h               | 2 ++
>  2 files changed, 5 insertions(+)
> 
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index a3eacc35cf98..375f6f7f6a93 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
>  
>  	trace_drm_sched_job(sched_job, entity);
>  
> +	atomic_inc(&entity->rq->sched->num_jobs);
>  	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
>  
>  	/* first job wakes up scheduler */
> @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
>  
>  	dma_fence_get(&s_fence->finished);
>  	atomic_dec(&sched->hw_rq_count);
> +	atomic_dec(&sched->num_jobs);
>  	drm_sched_fence_finished(s_fence);
>  
>  	trace_drm_sched_process_job(s_fence);
> @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>  	INIT_LIST_HEAD(&sched->ring_mirror_list);
>  	spin_lock_init(&sched->job_list_lock);
>  	atomic_set(&sched->hw_rq_count, 0);
> +	atomic_set(&sched->num_jobs, 0);
>  	atomic64_set(&sched->job_id_count, 0);
>  
>  	/* Each scheduler will run on a seperate kernel thread */
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index a60896222a3e..89881ce974a5 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
>   * @job_list_lock: lock to protect the ring_mirror_list.
>   * @hang_limit: once the hangs by a job crosses this limit then it is marked
>   *              guilty and it will be considered for scheduling further.
> + * @num_jobs: the number of jobs in queue in the scheduler
>   *
>   * One scheduler is implemented for each hardware ring.
>   */
> @@ -277,6 +278,7 @@ struct drm_gpu_scheduler {
>  	struct list_head		ring_mirror_list;
>  	spinlock_t			job_list_lock;
>  	int				hang_limit;
> +	atomic_t                        num_jobs;
>  };
>  
>  int drm_sched_init(struct drm_gpu_scheduler *sched,
> -- 
> 2.14.3
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
Huang Rui Aug. 2, 2018, 2:57 a.m. UTC | #3
On Wed, Aug 01, 2018 at 09:06:29PM +0800, Christian König wrote:
> Yeah, I've actually added one before pushing it to amd-staging-drm-next.
> 
> But thanks for the reminder, wanted to note that to Nayan as well :)
> 

Yes, a soft reminder to Nayan. Thanks Nayan for the contribution. :-)

Thanks,
Ray

> Christian.
> 
> Am 01.08.2018 um 15:15 schrieb Huang Rui:
> > On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
> >
> > This should need a commmit message.
> >
> > Thanks,
> > Ray
> >
> >> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> >> ---
> >>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
> >>   include/drm/gpu_scheduler.h               | 2 ++
> >>   2 files changed, 5 insertions(+)
> >>
> >> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> >> index a3eacc35cf98..375f6f7f6a93 100644
> >> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> >> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> >> @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
> >>   
> >>   	trace_drm_sched_job(sched_job, entity);
> >>   
> >> +	atomic_inc(&entity->rq->sched->num_jobs);
> >>   	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
> >>   
> >>   	/* first job wakes up scheduler */
> >> @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
> >>   
> >>   	dma_fence_get(&s_fence->finished);
> >>   	atomic_dec(&sched->hw_rq_count);
> >> +	atomic_dec(&sched->num_jobs);
> >>   	drm_sched_fence_finished(s_fence);
> >>   
> >>   	trace_drm_sched_process_job(s_fence);
> >> @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
> >>   	INIT_LIST_HEAD(&sched->ring_mirror_list);
> >>   	spin_lock_init(&sched->job_list_lock);
> >>   	atomic_set(&sched->hw_rq_count, 0);
> >> +	atomic_set(&sched->num_jobs, 0);
> >>   	atomic64_set(&sched->job_id_count, 0);
> >>   
> >>   	/* Each scheduler will run on a seperate kernel thread */
> >> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> >> index a60896222a3e..89881ce974a5 100644
> >> --- a/include/drm/gpu_scheduler.h
> >> +++ b/include/drm/gpu_scheduler.h
> >> @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
> >>    * @job_list_lock: lock to protect the ring_mirror_list.
> >>    * @hang_limit: once the hangs by a job crosses this limit then it is marked
> >>    *              guilty and it will be considered for scheduling further.
> >> + * @num_jobs: the number of jobs in queue in the scheduler
> >>    *
> >>    * One scheduler is implemented for each hardware ring.
> >>    */
> >> @@ -277,6 +278,7 @@ struct drm_gpu_scheduler {
> >>   	struct list_head		ring_mirror_list;
> >>   	spinlock_t			job_list_lock;
> >>   	int				hang_limit;
> >> +	atomic_t                        num_jobs;
> >>   };
> >>   
> >>   int drm_sched_init(struct drm_gpu_scheduler *sched,
> >> -- 
> >> 2.14.3
> >>
> >> _______________________________________________
> >> amd-gfx mailing list
> >> amd-gfx@lists.freedesktop.org
> >> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> > _______________________________________________
> > dri-devel mailing list
> > dri-devel@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/dri-devel
>
Nayan Deshmukh Aug. 2, 2018, 6:11 a.m. UTC | #4
Thanks for the reminders. I felt that the commit header was sufficient
enough but I guess that didn't cover the motivation for the change.

Thanks Christian for adding the commit message.

Regards,
Nayan

On Thu, Aug 2, 2018 at 8:16 AM Huang Rui <ray.huang@amd.com> wrote:

> On Wed, Aug 01, 2018 at 09:06:29PM +0800, Christian König wrote:
> > Yeah, I've actually added one before pushing it to amd-staging-drm-next.
> >
> > But thanks for the reminder, wanted to note that to Nayan as well :)
> >
>
> Yes, a soft reminder to Nayan. Thanks Nayan for the contribution. :-)
>
> Thanks,
> Ray
>
> > Christian.
> >
> > Am 01.08.2018 um 15:15 schrieb Huang Rui:
> > > On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:
> > >
> > > This should need a commmit message.
> > >
> > > Thanks,
> > > Ray
> > >
> > >> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> > >> ---
> > >>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++
> > >>   include/drm/gpu_scheduler.h               | 2 ++
> > >>   2 files changed, 5 insertions(+)
> > >>
> > >> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > >> index a3eacc35cf98..375f6f7f6a93 100644
> > >> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > >> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > >> @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct
> drm_sched_job *sched_job,
> > >>
> > >>    trace_drm_sched_job(sched_job, entity);
> > >>
> > >> +  atomic_inc(&entity->rq->sched->num_jobs);
> > >>    first = spsc_queue_push(&entity->job_queue,
> &sched_job->queue_node);
> > >>
> > >>    /* first job wakes up scheduler */
> > >> @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct
> dma_fence *f, struct dma_fence_cb *cb)
> > >>
> > >>    dma_fence_get(&s_fence->finished);
> > >>    atomic_dec(&sched->hw_rq_count);
> > >> +  atomic_dec(&sched->num_jobs);
> > >>    drm_sched_fence_finished(s_fence);
> > >>
> > >>    trace_drm_sched_process_job(s_fence);
> > >> @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler
> *sched,
> > >>    INIT_LIST_HEAD(&sched->ring_mirror_list);
> > >>    spin_lock_init(&sched->job_list_lock);
> > >>    atomic_set(&sched->hw_rq_count, 0);
> > >> +  atomic_set(&sched->num_jobs, 0);
> > >>    atomic64_set(&sched->job_id_count, 0);
> > >>
> > >>    /* Each scheduler will run on a seperate kernel thread */
> > >> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> > >> index a60896222a3e..89881ce974a5 100644
> > >> --- a/include/drm/gpu_scheduler.h
> > >> +++ b/include/drm/gpu_scheduler.h
> > >> @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {
> > >>    * @job_list_lock: lock to protect the ring_mirror_list.
> > >>    * @hang_limit: once the hangs by a job crosses this limit then it
> is marked
> > >>    *              guilty and it will be considered for scheduling
> further.
> > >> + * @num_jobs: the number of jobs in queue in the scheduler
> > >>    *
> > >>    * One scheduler is implemented for each hardware ring.
> > >>    */
> > >> @@ -277,6 +278,7 @@ struct drm_gpu_scheduler {
> > >>    struct list_head                ring_mirror_list;
> > >>    spinlock_t                      job_list_lock;
> > >>    int                             hang_limit;
> > >> +  atomic_t                        num_jobs;
> > >>   };
> > >>
> > >>   int drm_sched_init(struct drm_gpu_scheduler *sched,
> > >> --
> > >> 2.14.3
> > >>
> > >> _______________________________________________
> > >> amd-gfx mailing list
> > >> amd-gfx@lists.freedesktop.org
> > >> https://lists.freedesktop.org/mailman/listinfo/amd-gfx
> > > _______________________________________________
> > > dri-devel mailing list
> > > dri-devel@lists.freedesktop.org
> > > https://lists.freedesktop.org/mailman/listinfo/dri-devel
> >
>
<div dir="ltr"><div><div><div>Thanks for the reminders. I felt that the commit header was sufficient enough but I guess that didn&#39;t cover the motivation for the change.<br><br></div>Thanks Christian for adding the commit message.<br><br></div>Regards,<br></div>Nayan<br></div><br><div class="gmail_quote"><div dir="ltr">On Thu, Aug 2, 2018 at 8:16 AM Huang Rui &lt;<a href="mailto:ray.huang@amd.com">ray.huang@amd.com</a>&gt; wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">On Wed, Aug 01, 2018 at 09:06:29PM +0800, Christian König wrote:<br>
&gt; Yeah, I&#39;ve actually added one before pushing it to amd-staging-drm-next.<br>
&gt; <br>
&gt; But thanks for the reminder, wanted to note that to Nayan as well :)<br>
&gt; <br>
<br>
Yes, a soft reminder to Nayan. Thanks Nayan for the contribution. :-)<br>
<br>
Thanks,<br>
Ray<br>
<br>
&gt; Christian.<br>
&gt; <br>
&gt; Am 01.08.2018 um 15:15 schrieb Huang Rui:<br>
&gt; &gt; On Wed, Aug 01, 2018 at 01:50:00PM +0530, Nayan Deshmukh wrote:<br>
&gt; &gt;<br>
&gt; &gt; This should need a commmit message.<br>
&gt; &gt;<br>
&gt; &gt; Thanks,<br>
&gt; &gt; Ray<br>
&gt; &gt;<br>
&gt; &gt;&gt; Signed-off-by: Nayan Deshmukh &lt;<a href="mailto:nayan26deshmukh@gmail.com" target="_blank">nayan26deshmukh@gmail.com</a>&gt;<br>
&gt; &gt;&gt; ---<br>
&gt; &gt;&gt;   drivers/gpu/drm/scheduler/gpu_scheduler.c | 3 +++<br>
&gt; &gt;&gt;   include/drm/gpu_scheduler.h               | 2 ++<br>
&gt; &gt;&gt;   2 files changed, 5 insertions(+)<br>
&gt; &gt;&gt;<br>
&gt; &gt;&gt; diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
&gt; &gt;&gt; index a3eacc35cf98..375f6f7f6a93 100644<br>
&gt; &gt;&gt; --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
&gt; &gt;&gt; +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
&gt; &gt;&gt; @@ -549,6 +549,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,<br>
&gt; &gt;&gt;   <br>
&gt; &gt;&gt;    trace_drm_sched_job(sched_job, entity);<br>
&gt; &gt;&gt;   <br>
&gt; &gt;&gt; +  atomic_inc(&amp;entity-&gt;rq-&gt;sched-&gt;num_jobs);<br>
&gt; &gt;&gt;    first = spsc_queue_push(&amp;entity-&gt;job_queue, &amp;sched_job-&gt;queue_node);<br>
&gt; &gt;&gt;   <br>
&gt; &gt;&gt;    /* first job wakes up scheduler */<br>
&gt; &gt;&gt; @@ -836,6 +837,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)<br>
&gt; &gt;&gt;   <br>
&gt; &gt;&gt;    dma_fence_get(&amp;s_fence-&gt;finished);<br>
&gt; &gt;&gt;    atomic_dec(&amp;sched-&gt;hw_rq_count);<br>
&gt; &gt;&gt; +  atomic_dec(&amp;sched-&gt;num_jobs);<br>
&gt; &gt;&gt;    drm_sched_fence_finished(s_fence);<br>
&gt; &gt;&gt;   <br>
&gt; &gt;&gt;    trace_drm_sched_process_job(s_fence);<br>
&gt; &gt;&gt; @@ -953,6 +955,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,<br>
&gt; &gt;&gt;    INIT_LIST_HEAD(&amp;sched-&gt;ring_mirror_list);<br>
&gt; &gt;&gt;    spin_lock_init(&amp;sched-&gt;job_list_lock);<br>
&gt; &gt;&gt;    atomic_set(&amp;sched-&gt;hw_rq_count, 0);<br>
&gt; &gt;&gt; +  atomic_set(&amp;sched-&gt;num_jobs, 0);<br>
&gt; &gt;&gt;    atomic64_set(&amp;sched-&gt;job_id_count, 0);<br>
&gt; &gt;&gt;   <br>
&gt; &gt;&gt;    /* Each scheduler will run on a seperate kernel thread */<br>
&gt; &gt;&gt; diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h<br>
&gt; &gt;&gt; index a60896222a3e..89881ce974a5 100644<br>
&gt; &gt;&gt; --- a/include/drm/gpu_scheduler.h<br>
&gt; &gt;&gt; +++ b/include/drm/gpu_scheduler.h<br>
&gt; &gt;&gt; @@ -260,6 +260,7 @@ struct drm_sched_backend_ops {<br>
&gt; &gt;&gt;    * @job_list_lock: lock to protect the ring_mirror_list.<br>
&gt; &gt;&gt;    * @hang_limit: once the hangs by a job crosses this limit then it is marked<br>
&gt; &gt;&gt;    *              guilty and it will be considered for scheduling further.<br>
&gt; &gt;&gt; + * @num_jobs: the number of jobs in queue in the scheduler<br>
&gt; &gt;&gt;    *<br>
&gt; &gt;&gt;    * One scheduler is implemented for each hardware ring.<br>
&gt; &gt;&gt;    */<br>
&gt; &gt;&gt; @@ -277,6 +278,7 @@ struct drm_gpu_scheduler {<br>
&gt; &gt;&gt;    struct list_head                ring_mirror_list;<br>
&gt; &gt;&gt;    spinlock_t                      job_list_lock;<br>
&gt; &gt;&gt;    int                             hang_limit;<br>
&gt; &gt;&gt; +  atomic_t                        num_jobs;<br>
&gt; &gt;&gt;   };<br>
&gt; &gt;&gt;   <br>
&gt; &gt;&gt;   int drm_sched_init(struct drm_gpu_scheduler *sched,<br>
&gt; &gt;&gt; -- <br>
&gt; &gt;&gt; 2.14.3<br>
&gt; &gt;&gt;<br>
&gt; &gt;&gt; _______________________________________________<br>
&gt; &gt;&gt; amd-gfx mailing list<br>
&gt; &gt;&gt; <a href="mailto:amd-gfx@lists.freedesktop.org" target="_blank">amd-gfx@lists.freedesktop.org</a><br>
&gt; &gt;&gt; <a href="https://lists.freedesktop.org/mailman/listinfo/amd-gfx" rel="noreferrer" target="_blank">https://lists.freedesktop.org/mailman/listinfo/amd-gfx</a><br>
&gt; &gt; _______________________________________________<br>
&gt; &gt; dri-devel mailing list<br>
&gt; &gt; <a href="mailto:dri-devel@lists.freedesktop.org" target="_blank">dri-devel@lists.freedesktop.org</a><br>
&gt; &gt; <a href="https://lists.freedesktop.org/mailman/listinfo/dri-devel" rel="noreferrer" target="_blank">https://lists.freedesktop.org/mailman/listinfo/dri-devel</a><br>
&gt; <br>
</blockquote></div>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index a3eacc35cf98..375f6f7f6a93 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -549,6 +549,7 @@  void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 
 	trace_drm_sched_job(sched_job, entity);
 
+	atomic_inc(&entity->rq->sched->num_jobs);
 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
 
 	/* first job wakes up scheduler */
@@ -836,6 +837,7 @@  static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 
 	dma_fence_get(&s_fence->finished);
 	atomic_dec(&sched->hw_rq_count);
+	atomic_dec(&sched->num_jobs);
 	drm_sched_fence_finished(s_fence);
 
 	trace_drm_sched_process_job(s_fence);
@@ -953,6 +955,7 @@  int drm_sched_init(struct drm_gpu_scheduler *sched,
 	INIT_LIST_HEAD(&sched->ring_mirror_list);
 	spin_lock_init(&sched->job_list_lock);
 	atomic_set(&sched->hw_rq_count, 0);
+	atomic_set(&sched->num_jobs, 0);
 	atomic64_set(&sched->job_id_count, 0);
 
 	/* Each scheduler will run on a seperate kernel thread */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index a60896222a3e..89881ce974a5 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -260,6 +260,7 @@  struct drm_sched_backend_ops {
  * @job_list_lock: lock to protect the ring_mirror_list.
  * @hang_limit: once the hangs by a job crosses this limit then it is marked
  *              guilty and it will be considered for scheduling further.
+ * @num_jobs: the number of jobs in queue in the scheduler
  *
  * One scheduler is implemented for each hardware ring.
  */
@@ -277,6 +278,7 @@  struct drm_gpu_scheduler {
 	struct list_head		ring_mirror_list;
 	spinlock_t			job_list_lock;
 	int				hang_limit;
+	atomic_t                        num_jobs;
 };
 
 int drm_sched_init(struct drm_gpu_scheduler *sched,