diff mbox series

drm/scheduler: select the least loaded sched during entity init

Message ID 20180803070628.21471-1-nayan26deshmukh@gmail.com (mailing list archive)
State New, archived
Headers show
Series drm/scheduler: select the least loaded sched during entity init | expand

Commit Message

Nayan Deshmukh Aug. 3, 2018, 7:06 a.m. UTC
Instead of assigning entity to the first scheduler in the list
assign it to the least loaded scheduler.

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
---
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

Comments

Christian König Aug. 3, 2018, 10:42 a.m. UTC | #1
Am 03.08.2018 um 09:06 schrieb Nayan Deshmukh:
> Instead of assigning entity to the first scheduler in the list
> assign it to the least loaded scheduler.

I thought about that as well, but then abandoned the idea.

The reason is that we are going to reassign the rq when the first job is 
pushed to it anyway.

This will only become useful when we take the number of entities a rq 
has into account for the decision as well.

Christian.

>
> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> ---
>   drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 +++-
>   1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 21fa0d8a8783..dbd707d24597 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -61,6 +61,8 @@
>   static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
>   static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
>   static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
> +static struct drm_sched_rq *
> +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity);
>   
>   /**
>    * drm_sched_rq_init - initialize a given run queue struct
> @@ -186,13 +188,13 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
>   
>   	memset(entity, 0, sizeof(struct drm_sched_entity));
>   	INIT_LIST_HEAD(&entity->list);
> -	entity->rq = rq_list[0];
>   	entity->guilty = guilty;
>   	entity->num_rq_list = num_rq_list;
>   	entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
>   				GFP_KERNEL);
>   	for (i = 0; i < num_rq_list; ++i)
>   		entity->rq_list[i] = rq_list[i];
> +	entity->rq = drm_sched_entity_get_free_sched(entity);
>   	entity->last_scheduled = NULL;
>   
>   	spin_lock_init(&entity->rq_lock);
Nayan Deshmukh Aug. 3, 2018, 11:53 a.m. UTC | #2
Ah...you are correct. We will reschedule on the first job push. I didn't
take that into account. Let's drop this patch then.

Thanks,
Nayan

On Fri, Aug 3, 2018, 4:12 PM Christian König <
ckoenig.leichtzumerken@gmail.com> wrote:

> Am 03.08.2018 um 09:06 schrieb Nayan Deshmukh:
> > Instead of assigning entity to the first scheduler in the list
> > assign it to the least loaded scheduler.
>
> I thought about that as well, but then abandoned the idea.
>
> The reason is that we are going to reassign the rq when the first job is
> pushed to it anyway.
>
> This will only become useful when we take the number of entities a rq
> has into account for the decision as well.
>
> Christian.
>
> >
> > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> > ---
> >   drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 +++-
> >   1 file changed, 3 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > index 21fa0d8a8783..dbd707d24597 100644
> > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > @@ -61,6 +61,8 @@
> >   static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
> >   static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
> >   static void drm_sched_process_job(struct dma_fence *f, struct
> dma_fence_cb *cb);
> > +static struct drm_sched_rq *
> > +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity);
> >
> >   /**
> >    * drm_sched_rq_init - initialize a given run queue struct
> > @@ -186,13 +188,13 @@ int drm_sched_entity_init(struct drm_sched_entity
> *entity,
> >
> >       memset(entity, 0, sizeof(struct drm_sched_entity));
> >       INIT_LIST_HEAD(&entity->list);
> > -     entity->rq = rq_list[0];
> >       entity->guilty = guilty;
> >       entity->num_rq_list = num_rq_list;
> >       entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq
> *),
> >                               GFP_KERNEL);
> >       for (i = 0; i < num_rq_list; ++i)
> >               entity->rq_list[i] = rq_list[i];
> > +     entity->rq = drm_sched_entity_get_free_sched(entity);
> >       entity->last_scheduled = NULL;
> >
> >       spin_lock_init(&entity->rq_lock);
>
>
<div dir="auto">Ah...you are correct. We will reschedule on the first job push. I didn&#39;t take that into account. Let&#39;s drop this patch then.<div dir="auto"><br></div><div dir="auto">Thanks,</div><div dir="auto">Nayan</div></div><br><div class="gmail_quote"><div dir="ltr">On Fri, Aug 3, 2018, 4:12 PM Christian König &lt;<a href="mailto:ckoenig.leichtzumerken@gmail.com">ckoenig.leichtzumerken@gmail.com</a>&gt; wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Am 03.08.2018 um 09:06 schrieb Nayan Deshmukh:<br>
&gt; Instead of assigning entity to the first scheduler in the list<br>
&gt; assign it to the least loaded scheduler.<br>
<br>
I thought about that as well, but then abandoned the idea.<br>
<br>
The reason is that we are going to reassign the rq when the first job is <br>
pushed to it anyway.<br>
<br>
This will only become useful when we take the number of entities a rq <br>
has into account for the decision as well.<br>
<br>
Christian.<br>
<br>
&gt;<br>
&gt; Signed-off-by: Nayan Deshmukh &lt;<a href="mailto:nayan26deshmukh@gmail.com" target="_blank" rel="noreferrer">nayan26deshmukh@gmail.com</a>&gt;<br>
&gt; ---<br>
&gt;   drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 +++-<br>
&gt;   1 file changed, 3 insertions(+), 1 deletion(-)<br>
&gt;<br>
&gt; diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
&gt; index 21fa0d8a8783..dbd707d24597 100644<br>
&gt; --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
&gt; +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c<br>
&gt; @@ -61,6 +61,8 @@<br>
&gt;   static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);<br>
&gt;   static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);<br>
&gt;   static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);<br>
&gt; +static struct drm_sched_rq *<br>
&gt; +drm_sched_entity_get_free_sched(struct drm_sched_entity *entity);<br>
&gt;   <br>
&gt;   /**<br>
&gt;    * drm_sched_rq_init - initialize a given run queue struct<br>
&gt; @@ -186,13 +188,13 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,<br>
&gt;   <br>
&gt;       memset(entity, 0, sizeof(struct drm_sched_entity));<br>
&gt;       INIT_LIST_HEAD(&amp;entity-&gt;list);<br>
&gt; -     entity-&gt;rq = rq_list[0];<br>
&gt;       entity-&gt;guilty = guilty;<br>
&gt;       entity-&gt;num_rq_list = num_rq_list;<br>
&gt;       entity-&gt;rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),<br>
&gt;                               GFP_KERNEL);<br>
&gt;       for (i = 0; i &lt; num_rq_list; ++i)<br>
&gt;               entity-&gt;rq_list[i] = rq_list[i];<br>
&gt; +     entity-&gt;rq = drm_sched_entity_get_free_sched(entity);<br>
&gt;       entity-&gt;last_scheduled = NULL;<br>
&gt;   <br>
&gt;       spin_lock_init(&amp;entity-&gt;rq_lock);<br>
<br>
</blockquote></div>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 21fa0d8a8783..dbd707d24597 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -61,6 +61,8 @@ 
 static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+static struct drm_sched_rq *
+drm_sched_entity_get_free_sched(struct drm_sched_entity *entity);
 
 /**
  * drm_sched_rq_init - initialize a given run queue struct
@@ -186,13 +188,13 @@  int drm_sched_entity_init(struct drm_sched_entity *entity,
 
 	memset(entity, 0, sizeof(struct drm_sched_entity));
 	INIT_LIST_HEAD(&entity->list);
-	entity->rq = rq_list[0];
 	entity->guilty = guilty;
 	entity->num_rq_list = num_rq_list;
 	entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
 				GFP_KERNEL);
 	for (i = 0; i < num_rq_list; ++i)
 		entity->rq_list[i] = rq_list[i];
+	entity->rq = drm_sched_entity_get_free_sched(entity);
 	entity->last_scheduled = NULL;
 
 	spin_lock_init(&entity->rq_lock);