diff mbox

[3/3] drm/scheduler: modify args of drm_sched_entity_init

Message ID 20180712063643.8030-4-nayan26deshmukh@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Nayan Deshmukh July 12, 2018, 6:36 a.m. UTC
replace run queue by a list of run queues and remove the
sched arg as that is part of run queue itself

Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  4 ++--
 drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c     |  4 ++--
 drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c     |  4 ++--
 drivers/gpu/drm/etnaviv/etnaviv_drv.c     |  8 ++++----
 drivers/gpu/drm/scheduler/gpu_scheduler.c | 22 ++++++++++++++--------
 drivers/gpu/drm/v3d/v3d_drv.c             |  7 +++----
 include/drm/gpu_scheduler.h               | 13 +++++++++----
 11 files changed, 44 insertions(+), 34 deletions(-)

Comments

Nayan Deshmukh July 12, 2018, 6:42 a.m. UTC | #1
On Thu, Jul 12, 2018 at 12:07 PM Nayan Deshmukh
<nayan26deshmukh@gmail.com> wrote:
>
> replace run queue by a list of run queues and remove the
> sched arg as that is part of run queue itself
>
> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c   |  4 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c   |  4 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c   |  4 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c   |  4 ++--
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c    |  4 ++--
>  drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c     |  4 ++--
>  drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c     |  4 ++--
>  drivers/gpu/drm/etnaviv/etnaviv_drv.c     |  8 ++++----
>  drivers/gpu/drm/scheduler/gpu_scheduler.c | 22 ++++++++++++++--------
>  drivers/gpu/drm/v3d/v3d_drv.c             |  7 +++----
>  include/drm/gpu_scheduler.h               | 13 +++++++++----
>  11 files changed, 44 insertions(+), 34 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> index 0120b24fae1b..83e3b320a793 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
> @@ -90,8 +90,8 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
>                 if (ring == &adev->gfx.kiq.ring)
>                         continue;
>
> -               r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
> -                                         rq, &ctx->guilty);
> +               r = drm_sched_entity_init(&ctx->rings[i].entity,
> +                                         &rq, 1, &ctx->guilty);
>                 if (r)
>                         goto failed;
>         }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> index 0246cb87d9e4..c937f6755f55 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
> @@ -140,8 +140,8 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
>
>         ring = adev->mman.buffer_funcs_ring;
>         rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> -       r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
> -                                 rq, NULL);
> +       r = drm_sched_entity_init(&adev->mman.entity,
> +                                 &rq, 1, NULL);
>         if (r) {
>                 DRM_ERROR("Failed setting up TTM BO move run queue.\n");
>                 goto error_entity;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> index 0b46ea1c6290..1471a8c3ce24 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
> @@ -266,8 +266,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
>
>                 ring = &adev->uvd.inst[j].ring;
>                 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
> -               r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
> -                                         rq, NULL);
> +               r = drm_sched_entity_init(&adev->uvd.inst[j].entity,
> +                                         &rq, 1, NULL);
>                 if (r != 0) {
>                         DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
>                         return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> index b0dcdfd85f5b..62b2f0816695 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
> @@ -190,8 +190,8 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
>
>         ring = &adev->vce.ring[0];
>         rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
> -       r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
> -                                 rq, NULL);
> +       r = drm_sched_entity_init(&adev->vce.entity,
> +                                 &rq, 1, NULL);
>         if (r != 0) {
>                 DRM_ERROR("Failed setting up VCE run queue.\n");
>                 return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index 712af5c1a5d6..00cb46965237 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -2562,8 +2562,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
>         ring_instance %= adev->vm_manager.vm_pte_num_rings;
>         ring = adev->vm_manager.vm_pte_rings[ring_instance];
>         rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
> -       r = drm_sched_entity_init(&ring->sched, &vm->entity,
> -                                 rq, NULL);
> +       r = drm_sched_entity_init(&vm->entity, &rq,
> +                                 1, NULL);
>         if (r)
>                 return r;
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> index 8ee1c2eaaa14..da38aa2f3d13 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
> @@ -429,8 +429,8 @@ static int uvd_v6_0_sw_init(void *handle)
>                 struct drm_sched_rq *rq;
>                 ring = &adev->uvd.inst->ring_enc[0];
>                 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
> -               r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
> -                                         rq, NULL);
> +               r = drm_sched_entity_init(&adev->uvd.inst->entity_enc,
> +                                         &rq, 1, NULL);
>                 if (r) {
>                         DRM_ERROR("Failed setting up UVD ENC run queue.\n");
>                         return r;
> diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> index ba244d3b74db..69221430aa38 100644
> --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
> @@ -431,8 +431,8 @@ static int uvd_v7_0_sw_init(void *handle)
>         for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
>                 ring = &adev->uvd.inst[j].ring_enc[0];
>                 rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
> -               r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
> -                                         rq, NULL);
> +               r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc,
> +                                         &rq, 1, NULL);
>                 if (r) {
>                         DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
>                         return r;
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> index 45bfdf4cc107..121c53e04603 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> @@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
>
>         for (i = 0; i < ETNA_MAX_PIPES; i++) {
>                 struct etnaviv_gpu *gpu = priv->gpu[i];
> +               struct drm_sched_rq *rq;
>
> +               rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
>                 if (gpu) {
> -                       drm_sched_entity_init(&gpu->sched,
> -                               &ctx->sched_entity[i],
> -                               &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
> -                               NULL);
> +                       drm_sched_entity_init(&ctx->sched_entity[i],
> +                                             &rq, 1, NULL);
>                         }
>         }
>
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 3dc1a4f07e3f..b2dbd1c1ba69 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -162,26 +162,32 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
>   * drm_sched_entity_init - Init a context entity used by scheduler when
>   * submit to HW ring.
>   *
> - * @sched: scheduler instance
>   * @entity: scheduler entity to init
> - * @rq: the run queue this entity belongs
> + * @rq_list: the list of run queue on which jobs from this
> + *           entity can be submitted
> + * @num_rq_list: number of run queue in rq_list
>   * @guilty: atomic_t set to 1 when a job on this queue
>   *          is found to be guilty causing a timeout
>   *
> + * Note: the rq_list should have atleast one element to schedule
> + *       the entity
> + *
>   * Returns 0 on success or a negative error code on failure.
>  */
> -int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
> -                         struct drm_sched_entity *entity,
> -                         struct drm_sched_rq *rq,
> +int drm_sched_entity_init(struct drm_sched_entity *entity,
> +                         struct drm_sched_rq **rq_list,
> +                         unsigned int num_rq_list,
>                           atomic_t *guilty)
>  {
> -       if (!(sched && entity && rq))
> +       if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
>                 return -EINVAL;
>
>         memset(entity, 0, sizeof(struct drm_sched_entity));
>         INIT_LIST_HEAD(&entity->list);
> -       entity->rq = rq;
> -       entity->sched = sched;
> +       entity->rq_list = NULL;
> +       entity->rq = rq_list[0];
> +       entity->sched = rq_list[0]->sched;
> +       entity->num_rq_list = num_rq_list;
>         entity->guilty = guilty;
>         entity->last_scheduled = NULL;
>
> diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
> index 567f7d46d912..1dceba2b42fd 100644
> --- a/drivers/gpu/drm/v3d/v3d_drv.c
> +++ b/drivers/gpu/drm/v3d/v3d_drv.c
> @@ -123,6 +123,7 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
>  {
>         struct v3d_dev *v3d = to_v3d_dev(dev);
>         struct v3d_file_priv *v3d_priv;
> +       struct drm_sched_rq *rq;
>         int i;
>
>         v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
> @@ -132,10 +133,8 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
>         v3d_priv->v3d = v3d;
>
>         for (i = 0; i < V3D_MAX_QUEUES; i++) {
> -               drm_sched_entity_init(&v3d->queue[i].sched,
> -                                     &v3d_priv->sched_entity[i],
> -                                     &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
> -                                     NULL);
> +               rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
> +               drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
>         }
>
>         file->driver_priv = v3d_priv;
> diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
> index 605bd4ad2397..2b5e152d45fc 100644
> --- a/include/drm/gpu_scheduler.h
> +++ b/include/drm/gpu_scheduler.h
> @@ -50,7 +50,10 @@ enum drm_sched_priority {
>   *
>   * @list: used to append this struct to the list of entities in the
>   *        runqueue.
> - * @rq: runqueue to which this entity belongs.
> + * @rq: runqueue on which this entity is currently scheduled.
> + * @rq_list: a list of run queues on which jobs from this entity can
Added a whitespace by mistake will remove it in the final patch         ^^
> + *           be scheduled
> + * @num_rq_list: number of run queues in the rq_list
>   * @rq_lock: lock to modify the runqueue to which this entity belongs.
>   * @sched: the scheduler instance to which this entity is enqueued.
>   * @job_queue: the list of jobs of this entity.
> @@ -75,6 +78,8 @@ enum drm_sched_priority {
>  struct drm_sched_entity {
>         struct list_head                list;
>         struct drm_sched_rq             *rq;
> +       struct drm_sched_rq             **rq_list;
> +       unsigned int                    num_rq_list;
>         spinlock_t                      rq_lock;
>         struct drm_gpu_scheduler        *sched;
>
> @@ -284,9 +289,9 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
>                    const char *name);
>  void drm_sched_fini(struct drm_gpu_scheduler *sched);
>
> -int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
> -                         struct drm_sched_entity *entity,
> -                         struct drm_sched_rq *rq,
> +int drm_sched_entity_init(struct drm_sched_entity *entity,
> +                         struct drm_sched_rq **rq_list,
> +                         unsigned int num_rq_list,
>                           atomic_t *guilty);
>  long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
>                            struct drm_sched_entity *entity, long timeout);
> --
> 2.14.3
>
Eric Anholt July 12, 2018, 5:51 p.m. UTC | #2
Nayan Deshmukh <nayan26deshmukh@gmail.com> writes:
> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> index 3dc1a4f07e3f..b2dbd1c1ba69 100644
> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> @@ -162,26 +162,32 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
>   * drm_sched_entity_init - Init a context entity used by scheduler when
>   * submit to HW ring.
>   *
> - * @sched: scheduler instance
>   * @entity: scheduler entity to init
> - * @rq: the run queue this entity belongs
> + * @rq_list: the list of run queue on which jobs from this
> + *           entity can be submitted
> + * @num_rq_list: number of run queue in rq_list
>   * @guilty: atomic_t set to 1 when a job on this queue
>   *          is found to be guilty causing a timeout
>   *
> + * Note: the rq_list should have atleast one element to schedule
> + *       the entity
> + *
>   * Returns 0 on success or a negative error code on failure.
>  */
> -int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
> -			  struct drm_sched_entity *entity,
> -			  struct drm_sched_rq *rq,
> +int drm_sched_entity_init(struct drm_sched_entity *entity,
> +			  struct drm_sched_rq **rq_list,
> +			  unsigned int num_rq_list,
>  			  atomic_t *guilty)
>  {
> -	if (!(sched && entity && rq))
> +	if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
>  		return -EINVAL;
>  
>  	memset(entity, 0, sizeof(struct drm_sched_entity));
>  	INIT_LIST_HEAD(&entity->list);
> -	entity->rq = rq;
> -	entity->sched = sched;
> +	entity->rq_list = NULL;
> +	entity->rq = rq_list[0];
> +	entity->sched = rq_list[0]->sched;
> +	entity->num_rq_list = num_rq_list;

The API change makes sense as prep work, but I don't really like adding
the field to the struct (and changing the struct's docs for the existing
rq field) if it's going to always be NULL until a future change.

Similarly, I'd rather see patch 2 as part of a series that uses the
value.

That said, while I don't currently have a usecase for load-balancing
between entities, I may in the future, so thanks for working on this!
Christian König July 13, 2018, 7:10 a.m. UTC | #3
Am 12.07.2018 um 08:36 schrieb Nayan Deshmukh:
> replace run queue by a list of run queues and remove the
> sched arg as that is part of run queue itself
>
> Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>

Going over this I've found one more thing which needs to be fixed:

[SNIP]
> diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> index 45bfdf4cc107..121c53e04603 100644
> --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
> @@ -49,12 +49,12 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
>   
>   	for (i = 0; i < ETNA_MAX_PIPES; i++) {
>   		struct etnaviv_gpu *gpu = priv->gpu[i];
> +		struct drm_sched_rq *rq;
>   
> +		rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
>   		if (gpu) {

Here you use gpu right before the NULL check. It's only an address 
calculation, but still a bit ugly.

Just move the assignment under the "if (gpu) {".

> -			drm_sched_entity_init(&gpu->sched,
> -				&ctx->sched_entity[i],
> -				&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
> -				NULL);
> +			drm_sched_entity_init(&ctx->sched_entity[i],
> +					      &rq, 1, NULL);

Regards,
Christian.
Nayan Deshmukh July 13, 2018, 7:20 a.m. UTC | #4
On Thu, Jul 12, 2018 at 11:21 PM Eric Anholt <eric@anholt.net> wrote:
>
> Nayan Deshmukh <nayan26deshmukh@gmail.com> writes:
> > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > index 3dc1a4f07e3f..b2dbd1c1ba69 100644
> > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
> > @@ -162,26 +162,32 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
> >   * drm_sched_entity_init - Init a context entity used by scheduler when
> >   * submit to HW ring.
> >   *
> > - * @sched: scheduler instance
> >   * @entity: scheduler entity to init
> > - * @rq: the run queue this entity belongs
> > + * @rq_list: the list of run queue on which jobs from this
> > + *           entity can be submitted
> > + * @num_rq_list: number of run queue in rq_list
> >   * @guilty: atomic_t set to 1 when a job on this queue
> >   *          is found to be guilty causing a timeout
> >   *
> > + * Note: the rq_list should have atleast one element to schedule
> > + *       the entity
> > + *
> >   * Returns 0 on success or a negative error code on failure.
> >  */
> > -int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
> > -                       struct drm_sched_entity *entity,
> > -                       struct drm_sched_rq *rq,
> > +int drm_sched_entity_init(struct drm_sched_entity *entity,
> > +                       struct drm_sched_rq **rq_list,
> > +                       unsigned int num_rq_list,
> >                         atomic_t *guilty)
> >  {
> > -     if (!(sched && entity && rq))
> > +     if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
> >               return -EINVAL;
> >
> >       memset(entity, 0, sizeof(struct drm_sched_entity));
> >       INIT_LIST_HEAD(&entity->list);
> > -     entity->rq = rq;
> > -     entity->sched = sched;
> > +     entity->rq_list = NULL;
> > +     entity->rq = rq_list[0];
> > +     entity->sched = rq_list[0]->sched;
> > +     entity->num_rq_list = num_rq_list;
>
> The API change makes sense as prep work, but I don't really like adding
> the field to the struct (and changing the struct's docs for the existing
> rq field) if it's going to always be NULL until a future change.
>
> Similarly, I'd rather see patch 2 as part of a series that uses the
> value.
I agree with you. I am fine with dropping the patch 2 for now and
modifying patch 3. I am fine either way.

What are your thoughts on this Christian?
>
> That said, while I don't currently have a usecase for load-balancing
> between entities, I may in the future, so thanks for working on this!
Christian König July 13, 2018, 7:22 a.m. UTC | #5
Am 13.07.2018 um 09:20 schrieb Nayan Deshmukh:
> On Thu, Jul 12, 2018 at 11:21 PM Eric Anholt <eric@anholt.net> wrote:
>> Nayan Deshmukh <nayan26deshmukh@gmail.com> writes:
>>> diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>>> index 3dc1a4f07e3f..b2dbd1c1ba69 100644
>>> --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
>>> +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
>>> @@ -162,26 +162,32 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
>>>    * drm_sched_entity_init - Init a context entity used by scheduler when
>>>    * submit to HW ring.
>>>    *
>>> - * @sched: scheduler instance
>>>    * @entity: scheduler entity to init
>>> - * @rq: the run queue this entity belongs
>>> + * @rq_list: the list of run queue on which jobs from this
>>> + *           entity can be submitted
>>> + * @num_rq_list: number of run queue in rq_list
>>>    * @guilty: atomic_t set to 1 when a job on this queue
>>>    *          is found to be guilty causing a timeout
>>>    *
>>> + * Note: the rq_list should have atleast one element to schedule
>>> + *       the entity
>>> + *
>>>    * Returns 0 on success or a negative error code on failure.
>>>   */
>>> -int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
>>> -                       struct drm_sched_entity *entity,
>>> -                       struct drm_sched_rq *rq,
>>> +int drm_sched_entity_init(struct drm_sched_entity *entity,
>>> +                       struct drm_sched_rq **rq_list,
>>> +                       unsigned int num_rq_list,
>>>                          atomic_t *guilty)
>>>   {
>>> -     if (!(sched && entity && rq))
>>> +     if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
>>>                return -EINVAL;
>>>
>>>        memset(entity, 0, sizeof(struct drm_sched_entity));
>>>        INIT_LIST_HEAD(&entity->list);
>>> -     entity->rq = rq;
>>> -     entity->sched = sched;
>>> +     entity->rq_list = NULL;
>>> +     entity->rq = rq_list[0];
>>> +     entity->sched = rq_list[0]->sched;
>>> +     entity->num_rq_list = num_rq_list;
>> The API change makes sense as prep work, but I don't really like adding
>> the field to the struct (and changing the struct's docs for the existing
>> rq field) if it's going to always be NULL until a future change.
>>
>> Similarly, I'd rather see patch 2 as part of a series that uses the
>> value.
> I agree with you. I am fine with dropping the patch 2 for now and
> modifying patch 3. I am fine either way.
>
> What are your thoughts on this Christian?

Ok with me as well. In this case just send out the two patches with the 
API change.

Christian.

>> That said, while I don't currently have a usecase for load-balancing
>> between entities, I may in the future, so thanks for working on this!
diff mbox

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0120b24fae1b..83e3b320a793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -90,8 +90,8 @@  static int amdgpu_ctx_init(struct amdgpu_device *adev,
 		if (ring == &adev->gfx.kiq.ring)
 			continue;
 
-		r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
-					  rq, &ctx->guilty);
+		r = drm_sched_entity_init(&ctx->rings[i].entity,
+					  &rq, 1, &ctx->guilty);
 		if (r)
 			goto failed;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 0246cb87d9e4..c937f6755f55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -140,8 +140,8 @@  static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
 
 	ring = adev->mman.buffer_funcs_ring;
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-	r = drm_sched_entity_init(&ring->sched, &adev->mman.entity,
-				  rq, NULL);
+	r = drm_sched_entity_init(&adev->mman.entity,
+				  &rq, 1, NULL);
 	if (r) {
 		DRM_ERROR("Failed setting up TTM BO move run queue.\n");
 		goto error_entity;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 0b46ea1c6290..1471a8c3ce24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -266,8 +266,8 @@  int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
 
 		ring = &adev->uvd.inst[j].ring;
 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity,
-					  rq, NULL);
+		r = drm_sched_entity_init(&adev->uvd.inst[j].entity,
+					  &rq, 1, NULL);
 		if (r != 0) {
 			DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j);
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b0dcdfd85f5b..62b2f0816695 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -190,8 +190,8 @@  int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 
 	ring = &adev->vce.ring[0];
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-	r = drm_sched_entity_init(&ring->sched, &adev->vce.entity,
-				  rq, NULL);
+	r = drm_sched_entity_init(&adev->vce.entity,
+				  &rq, 1, NULL);
 	if (r != 0) {
 		DRM_ERROR("Failed setting up VCE run queue.\n");
 		return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 712af5c1a5d6..00cb46965237 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2562,8 +2562,8 @@  int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 	ring_instance %= adev->vm_manager.vm_pte_num_rings;
 	ring = adev->vm_manager.vm_pte_rings[ring_instance];
 	rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
-	r = drm_sched_entity_init(&ring->sched, &vm->entity,
-				  rq, NULL);
+	r = drm_sched_entity_init(&vm->entity, &rq,
+				  1, NULL);
 	if (r)
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 8ee1c2eaaa14..da38aa2f3d13 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -429,8 +429,8 @@  static int uvd_v6_0_sw_init(void *handle)
 		struct drm_sched_rq *rq;
 		ring = &adev->uvd.inst->ring_enc[0];
 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc,
-					  rq, NULL);
+		r = drm_sched_entity_init(&adev->uvd.inst->entity_enc,
+					  &rq, 1, NULL);
 		if (r) {
 			DRM_ERROR("Failed setting up UVD ENC run queue.\n");
 			return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index ba244d3b74db..69221430aa38 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -431,8 +431,8 @@  static int uvd_v7_0_sw_init(void *handle)
 	for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
 		ring = &adev->uvd.inst[j].ring_enc[0];
 		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc,
-					  rq, NULL);
+		r = drm_sched_entity_init(&adev->uvd.inst[j].entity_enc,
+					  &rq, 1, NULL);
 		if (r) {
 			DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j);
 			return r;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 45bfdf4cc107..121c53e04603 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -49,12 +49,12 @@  static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
 
 	for (i = 0; i < ETNA_MAX_PIPES; i++) {
 		struct etnaviv_gpu *gpu = priv->gpu[i];
+		struct drm_sched_rq *rq;
 
+		rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
 		if (gpu) {
-			drm_sched_entity_init(&gpu->sched,
-				&ctx->sched_entity[i],
-				&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
-				NULL);
+			drm_sched_entity_init(&ctx->sched_entity[i],
+					      &rq, 1, NULL);
 			}
 	}
 
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 3dc1a4f07e3f..b2dbd1c1ba69 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -162,26 +162,32 @@  drm_sched_rq_select_entity(struct drm_sched_rq *rq)
  * drm_sched_entity_init - Init a context entity used by scheduler when
  * submit to HW ring.
  *
- * @sched: scheduler instance
  * @entity: scheduler entity to init
- * @rq: the run queue this entity belongs
+ * @rq_list: the list of run queue on which jobs from this
+ *           entity can be submitted
+ * @num_rq_list: number of run queue in rq_list
  * @guilty: atomic_t set to 1 when a job on this queue
  *          is found to be guilty causing a timeout
  *
+ * Note: the rq_list should have atleast one element to schedule
+ *       the entity
+ *
  * Returns 0 on success or a negative error code on failure.
 */
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
-			  struct drm_sched_entity *entity,
-			  struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+			  struct drm_sched_rq **rq_list,
+			  unsigned int num_rq_list,
 			  atomic_t *guilty)
 {
-	if (!(sched && entity && rq))
+	if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
 		return -EINVAL;
 
 	memset(entity, 0, sizeof(struct drm_sched_entity));
 	INIT_LIST_HEAD(&entity->list);
-	entity->rq = rq;
-	entity->sched = sched;
+	entity->rq_list = NULL;
+	entity->rq = rq_list[0];
+	entity->sched = rq_list[0]->sched;
+	entity->num_rq_list = num_rq_list;
 	entity->guilty = guilty;
 	entity->last_scheduled = NULL;
 
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 567f7d46d912..1dceba2b42fd 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -123,6 +123,7 @@  v3d_open(struct drm_device *dev, struct drm_file *file)
 {
 	struct v3d_dev *v3d = to_v3d_dev(dev);
 	struct v3d_file_priv *v3d_priv;
+	struct drm_sched_rq *rq;
 	int i;
 
 	v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL);
@@ -132,10 +133,8 @@  v3d_open(struct drm_device *dev, struct drm_file *file)
 	v3d_priv->v3d = v3d;
 
 	for (i = 0; i < V3D_MAX_QUEUES; i++) {
-		drm_sched_entity_init(&v3d->queue[i].sched,
-				      &v3d_priv->sched_entity[i],
-				      &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
-				      NULL);
+		rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+		drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL);
 	}
 
 	file->driver_priv = v3d_priv;
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 605bd4ad2397..2b5e152d45fc 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -50,7 +50,10 @@  enum drm_sched_priority {
  *
  * @list: used to append this struct to the list of entities in the
  *        runqueue.
- * @rq: runqueue to which this entity belongs.
+ * @rq: runqueue on which this entity is currently scheduled.
+ * @rq_list: a list of run queues on which jobs from this entity can 
+ *           be scheduled
+ * @num_rq_list: number of run queues in the rq_list
  * @rq_lock: lock to modify the runqueue to which this entity belongs.
  * @sched: the scheduler instance to which this entity is enqueued.
  * @job_queue: the list of jobs of this entity.
@@ -75,6 +78,8 @@  enum drm_sched_priority {
 struct drm_sched_entity {
 	struct list_head		list;
 	struct drm_sched_rq		*rq;
+	struct drm_sched_rq		**rq_list;
+	unsigned int                    num_rq_list;
 	spinlock_t			rq_lock;
 	struct drm_gpu_scheduler	*sched;
 
@@ -284,9 +289,9 @@  int drm_sched_init(struct drm_gpu_scheduler *sched,
 		   const char *name);
 void drm_sched_fini(struct drm_gpu_scheduler *sched);
 
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
-			  struct drm_sched_entity *entity,
-			  struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+			  struct drm_sched_rq **rq_list,
+			  unsigned int num_rq_list,
 			  atomic_t *guilty);
 long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
 			   struct drm_sched_entity *entity, long timeout);