Message ID | 20180720122106.10344-1-nayan26deshmukh@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Am 20.07.2018 um 14:21 schrieb Nayan Deshmukh: > entity has a scheduler field and we don't need the sched argument > in any of the functions where entity is provided. > > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> Reviewed-by: Christian König <christian.koenig@amd.com> for the series. > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 13 +++++-------- > drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +-- > drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 3 +-- > drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++-- > drivers/gpu/drm/etnaviv/etnaviv_drv.c | 3 +-- > drivers/gpu/drm/etnaviv/etnaviv_sched.c | 4 ++-- > drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++--------- > drivers/gpu/drm/v3d/v3d_drv.c | 4 +--- > drivers/gpu/drm/v3d/v3d_gem.c | 2 -- > include/drm/gpu_scheduler.h | 10 +++------- > 13 files changed, 30 insertions(+), 42 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > index 7c5cc33d0cda..7e5ebf823309 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, > job = p->job; > p->job = NULL; > > - r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); > + r = drm_sched_job_init(&job->base, entity, p->filp); > if (r) { > amdgpu_job_free(job); > amdgpu_mn_unlock(p->mn); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c > index 83e3b320a793..df6965761046 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c > @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, > > failed: > for (j = 0; j < i; j++) > - drm_sched_entity_destroy(&adev->rings[j]->sched, > - &ctx->rings[j].entity); > + drm_sched_entity_destroy(&ctx->rings[j].entity); > kfree(ctx->fences); > ctx->fences = NULL; > return r; > @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) > if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) > continue; > > - drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, > - &ctx->rings[i].entity); > + drm_sched_entity_destroy(&ctx->rings[i].entity); > } > > amdgpu_ctx_fini(ref); > @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) > if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) > continue; > > - max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, > - &ctx->rings[i].entity, max_wait); > + max_wait = drm_sched_entity_flush(&ctx->rings[i].entity, > + max_wait); > } > } > mutex_unlock(&mgr->lock); > @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) > continue; > > if (kref_read(&ctx->refcount) == 1) > - drm_sched_entity_fini(&ctx->adev->rings[i]->sched, > - &ctx->rings[i].entity); > + drm_sched_entity_fini(&ctx->rings[i].entity); > else > DRM_ERROR("ctx %p is still alive\n", ctx); > } > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c > index 5a2c26a85984..631481a730e0 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c > @@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, > if (!f) > return -EINVAL; > > - r = drm_sched_job_init(&job->base, entity->sched, entity, owner); > + r = drm_sched_job_init(&job->base, entity, owner); > if (r) > return r; > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > index 13977ea6a097..913705d4dfd3 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > @@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) > return; > } > } else { > - drm_sched_entity_destroy(adev->mman.entity.sched, > - &adev->mman.entity); > + drm_sched_entity_destroy(&adev->mman.entity); > } > > /* this just adjusts TTM size idea, which sets lpfn to the correct value */ > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > index 80b5c453f8c1..8e2c96da275e 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > @@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) > { > int i, j; > > - drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, > - &adev->uvd.entity); > + drm_sched_entity_destroy(&adev->uvd.entity); > > for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { > kfree(adev->uvd.inst[j].saved_bo); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > index 86182c966ed6..b6ab4f5350c8 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > @@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) > if (adev->vce.vcpu_bo == NULL) > return 0; > > - drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); > + drm_sched_entity_destroy(&adev->vce.entity); > > amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, > (void **)&adev->vce.cpu_addr); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > index 098dd1ba751a..74b4a28a41d6 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > @@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, > vm->root.base.bo = NULL; > > error_free_sched_entity: > - drm_sched_entity_destroy(&ring->sched, &vm->entity); > + drm_sched_entity_destroy(&vm->entity); > > return r; > } > @@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) > spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); > } > > - drm_sched_entity_destroy(vm->entity.sched, &vm->entity); > + drm_sched_entity_destroy(&vm->entity); > > if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { > dev_err(adev->dev, "still active bo inside vm\n"); > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c > index 36414ba56b22..207532c05eb8 100644 > --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c > +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c > @@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) > gpu->lastctx = NULL; > mutex_unlock(&gpu->lock); > > - drm_sched_entity_destroy(&gpu->sched, > - &ctx->sched_entity[i]); > + drm_sched_entity_destroy(&ctx->sched_entity[i]); > } > } > > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c > index a74eb57af15b..590e44b0d963 100644 > --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c > +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c > @@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, > { > int ret; > > - ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, > - sched_entity, submit->cmdbuf.ctx); > + ret = drm_sched_job_init(&submit->sched_job, sched_entity, > + submit->cmdbuf.ctx); > if (ret) > return ret; > > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c > index dac71e3b4514..a3b55c542025 100644 > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c > @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, > * > * Returns the remaining time in jiffies left from the input timeout > */ > -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity, long timeout) > +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) > { > + struct drm_gpu_scheduler *sched; > long ret = timeout; > > + sched = entity->sched; > if (!drm_sched_entity_is_initialized(sched, entity)) > return ret; > /** > @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush); > * entity and signals all jobs with an error code if the process was killed. > * > */ > -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity) > +void drm_sched_entity_fini(struct drm_sched_entity *entity) > { > + struct drm_gpu_scheduler *sched; > > + sched = entity->sched; > drm_sched_entity_set_rq(entity, NULL); > > /* Consumption of existing IBs wasn't completed. Forcefully > @@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini); > * > * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() > */ > -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity) > +void drm_sched_entity_destroy(struct drm_sched_entity *entity) > { > - drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); > - drm_sched_entity_fini(sched, entity); > + drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); > + drm_sched_entity_fini(entity); > } > EXPORT_SYMBOL(drm_sched_entity_destroy); > > @@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery); > * Returns 0 for success, negative error code otherwise. > */ > int drm_sched_job_init(struct drm_sched_job *job, > - struct drm_gpu_scheduler *sched, > struct drm_sched_entity *entity, > void *owner) > { > + struct drm_gpu_scheduler *sched = entity->sched; > + > job->sched = sched; > job->entity = entity; > job->s_priority = entity->rq - sched->sched_rq; > diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c > index 1dceba2b42fd..2a85fa68ffea 100644 > --- a/drivers/gpu/drm/v3d/v3d_drv.c > +++ b/drivers/gpu/drm/v3d/v3d_drv.c > @@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file) > static void > v3d_postclose(struct drm_device *dev, struct drm_file *file) > { > - struct v3d_dev *v3d = to_v3d_dev(dev); > struct v3d_file_priv *v3d_priv = file->driver_priv; > enum v3d_queue q; > > for (q = 0; q < V3D_MAX_QUEUES; q++) { > - drm_sched_entity_destroy(&v3d->queue[q].sched, > - &v3d_priv->sched_entity[q]); > + drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); > } > > kfree(v3d_priv); > diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c > index b513f9189caf..9029590267aa 100644 > --- a/drivers/gpu/drm/v3d/v3d_gem.c > +++ b/drivers/gpu/drm/v3d/v3d_gem.c > @@ -552,7 +552,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, > > if (exec->bin.start != exec->bin.end) { > ret = drm_sched_job_init(&exec->bin.base, > - &v3d->queue[V3D_BIN].sched, > &v3d_priv->sched_entity[V3D_BIN], > v3d_priv); > if (ret) > @@ -567,7 +566,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, > } > > ret = drm_sched_job_init(&exec->render.base, > - &v3d->queue[V3D_RENDER].sched, > &v3d_priv->sched_entity[V3D_RENDER], > v3d_priv); > if (ret) > diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h > index 2205e89722f6..728346abcc81 100644 > --- a/include/drm/gpu_scheduler.h > +++ b/include/drm/gpu_scheduler.h > @@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, > struct drm_sched_rq **rq_list, > unsigned int num_rq_list, > atomic_t *guilty); > -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity, long timeout); > -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity); > -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity); > +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); > +void drm_sched_entity_fini(struct drm_sched_entity *entity); > +void drm_sched_entity_destroy(struct drm_sched_entity *entity); > void drm_sched_entity_push_job(struct drm_sched_job *sched_job, > struct drm_sched_entity *entity); > void drm_sched_entity_set_rq(struct drm_sched_entity *entity, > @@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create( > void drm_sched_fence_scheduled(struct drm_sched_fence *fence); > void drm_sched_fence_finished(struct drm_sched_fence *fence); > int drm_sched_job_init(struct drm_sched_job *job, > - struct drm_gpu_scheduler *sched, > struct drm_sched_entity *entity, > void *owner); > void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
Nayan Deshmukh <nayan26deshmukh@gmail.com> writes: > entity has a scheduler field and we don't need the sched argument > in any of the functions where entity is provided. > > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 13 +++++-------- > drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +-- > drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 3 +-- > drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++-- > drivers/gpu/drm/etnaviv/etnaviv_drv.c | 3 +-- > drivers/gpu/drm/etnaviv/etnaviv_sched.c | 4 ++-- > drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++--------- > drivers/gpu/drm/v3d/v3d_drv.c | 4 +--- > drivers/gpu/drm/v3d/v3d_gem.c | 2 -- > include/drm/gpu_scheduler.h | 10 +++------- > 13 files changed, 30 insertions(+), 42 deletions(-) > > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c > index dac71e3b4514..a3b55c542025 100644 > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c > @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, > * > * Returns the remaining time in jiffies left from the input timeout > */ > -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity, long timeout) > +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) > { > + struct drm_gpu_scheduler *sched; > long ret = timeout; > > + sched = entity->sched; > if (!drm_sched_entity_is_initialized(sched, entity)) > return ret; > /** > @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush); > * entity and signals all jobs with an error code if the process was killed. > * > */ > -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity) > +void drm_sched_entity_fini(struct drm_sched_entity *entity) > { > + struct drm_gpu_scheduler *sched; > > + sched = entity->sched; Maybe fold the initialization into the declaration above, like you did elsewhere? Regardless, this is a wonderful cleanup of the API. Reviewed-by: Eric Anholt <eric@anholt.net>
On Fri, Jul 20, 2018 at 2:21 PM, Nayan Deshmukh <nayan26deshmukh@gmail.com> wrote: > entity has a scheduler field and we don't need the sched argument > in any of the functions where entity is provided. > > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> This breaks the make htmldocs build a bit: ./drivers/gpu/drm/scheduler/gpu_scheduler.c:262: warning: Excess function parameter 'sched' description in 'drm_sched_entity_flush' ./drivers/gpu/drm/scheduler/gpu_scheduler.c:303: warning: Excess function parameter 'sched' description in 'drm_sched_entity_fini' ./drivers/gpu/drm/scheduler/gpu_scheduler.c:365: warning: Excess function parameter 'sched' description in 'drm_sched_entity_destroy' ./drivers/gpu/drm/scheduler/gpu_scheduler.c:730: warning: Excess function parameter 'sched' description in 'drm_sched_job_init' ./drivers/gpu/drm/scheduler/gpu_scheduler.c:263: warning: Excess function parameter 'sched' description in 'drm_sched_entity_flush' ./drivers/gpu/drm/scheduler/gpu_scheduler.c:304: warning: Excess function parameter 'sched' description in 'drm_sched_entity_fini' ./drivers/gpu/drm/scheduler/gpu_scheduler.c:366: warning: Excess function parameter 'sched' description in 'drm_sched_entity_destroy' ./drivers/gpu/drm/scheduler/gpu_scheduler.c:731: warning: Excess function parameter 'sched' description in 'drm_sched_job_init' Care to fix it? Thanks, Daniel > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 13 +++++-------- > drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +-- > drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 3 +-- > drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++-- > drivers/gpu/drm/etnaviv/etnaviv_drv.c | 3 +-- > drivers/gpu/drm/etnaviv/etnaviv_sched.c | 4 ++-- > drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++--------- > drivers/gpu/drm/v3d/v3d_drv.c | 4 +--- > drivers/gpu/drm/v3d/v3d_gem.c | 2 -- > include/drm/gpu_scheduler.h | 10 +++------- > 13 files changed, 30 insertions(+), 42 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > index 7c5cc33d0cda..7e5ebf823309 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, > job = p->job; > p->job = NULL; > > - r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); > + r = drm_sched_job_init(&job->base, entity, p->filp); > if (r) { > amdgpu_job_free(job); > amdgpu_mn_unlock(p->mn); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c > index 83e3b320a793..df6965761046 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c > @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, > > failed: > for (j = 0; j < i; j++) > - drm_sched_entity_destroy(&adev->rings[j]->sched, > - &ctx->rings[j].entity); > + drm_sched_entity_destroy(&ctx->rings[j].entity); > kfree(ctx->fences); > ctx->fences = NULL; > return r; > @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) > if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) > continue; > > - drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, > - &ctx->rings[i].entity); > + drm_sched_entity_destroy(&ctx->rings[i].entity); > } > > amdgpu_ctx_fini(ref); > @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) > if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) > continue; > > - max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, > - &ctx->rings[i].entity, max_wait); > + max_wait = drm_sched_entity_flush(&ctx->rings[i].entity, > + max_wait); > } > } > mutex_unlock(&mgr->lock); > @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) > continue; > > if (kref_read(&ctx->refcount) == 1) > - drm_sched_entity_fini(&ctx->adev->rings[i]->sched, > - &ctx->rings[i].entity); > + drm_sched_entity_fini(&ctx->rings[i].entity); > else > DRM_ERROR("ctx %p is still alive\n", ctx); > } > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c > index 5a2c26a85984..631481a730e0 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c > @@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, > if (!f) > return -EINVAL; > > - r = drm_sched_job_init(&job->base, entity->sched, entity, owner); > + r = drm_sched_job_init(&job->base, entity, owner); > if (r) > return r; > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > index 13977ea6a097..913705d4dfd3 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > @@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) > return; > } > } else { > - drm_sched_entity_destroy(adev->mman.entity.sched, > - &adev->mman.entity); > + drm_sched_entity_destroy(&adev->mman.entity); > } > > /* this just adjusts TTM size idea, which sets lpfn to the correct value */ > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > index 80b5c453f8c1..8e2c96da275e 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > @@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) > { > int i, j; > > - drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, > - &adev->uvd.entity); > + drm_sched_entity_destroy(&adev->uvd.entity); > > for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { > kfree(adev->uvd.inst[j].saved_bo); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > index 86182c966ed6..b6ab4f5350c8 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > @@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) > if (adev->vce.vcpu_bo == NULL) > return 0; > > - drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); > + drm_sched_entity_destroy(&adev->vce.entity); > > amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, > (void **)&adev->vce.cpu_addr); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > index 098dd1ba751a..74b4a28a41d6 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > @@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, > vm->root.base.bo = NULL; > > error_free_sched_entity: > - drm_sched_entity_destroy(&ring->sched, &vm->entity); > + drm_sched_entity_destroy(&vm->entity); > > return r; > } > @@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) > spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); > } > > - drm_sched_entity_destroy(vm->entity.sched, &vm->entity); > + drm_sched_entity_destroy(&vm->entity); > > if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { > dev_err(adev->dev, "still active bo inside vm\n"); > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c > index 36414ba56b22..207532c05eb8 100644 > --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c > +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c > @@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) > gpu->lastctx = NULL; > mutex_unlock(&gpu->lock); > > - drm_sched_entity_destroy(&gpu->sched, > - &ctx->sched_entity[i]); > + drm_sched_entity_destroy(&ctx->sched_entity[i]); > } > } > > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c > index a74eb57af15b..590e44b0d963 100644 > --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c > +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c > @@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, > { > int ret; > > - ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, > - sched_entity, submit->cmdbuf.ctx); > + ret = drm_sched_job_init(&submit->sched_job, sched_entity, > + submit->cmdbuf.ctx); > if (ret) > return ret; > > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c > index dac71e3b4514..a3b55c542025 100644 > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c > @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, > * > * Returns the remaining time in jiffies left from the input timeout > */ > -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity, long timeout) > +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) > { > + struct drm_gpu_scheduler *sched; > long ret = timeout; > > + sched = entity->sched; > if (!drm_sched_entity_is_initialized(sched, entity)) > return ret; > /** > @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush); > * entity and signals all jobs with an error code if the process was killed. > * > */ > -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity) > +void drm_sched_entity_fini(struct drm_sched_entity *entity) > { > + struct drm_gpu_scheduler *sched; > > + sched = entity->sched; > drm_sched_entity_set_rq(entity, NULL); > > /* Consumption of existing IBs wasn't completed. Forcefully > @@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini); > * > * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() > */ > -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity) > +void drm_sched_entity_destroy(struct drm_sched_entity *entity) > { > - drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); > - drm_sched_entity_fini(sched, entity); > + drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); > + drm_sched_entity_fini(entity); > } > EXPORT_SYMBOL(drm_sched_entity_destroy); > > @@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery); > * Returns 0 for success, negative error code otherwise. > */ > int drm_sched_job_init(struct drm_sched_job *job, > - struct drm_gpu_scheduler *sched, > struct drm_sched_entity *entity, > void *owner) > { > + struct drm_gpu_scheduler *sched = entity->sched; > + > job->sched = sched; > job->entity = entity; > job->s_priority = entity->rq - sched->sched_rq; > diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c > index 1dceba2b42fd..2a85fa68ffea 100644 > --- a/drivers/gpu/drm/v3d/v3d_drv.c > +++ b/drivers/gpu/drm/v3d/v3d_drv.c > @@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file) > static void > v3d_postclose(struct drm_device *dev, struct drm_file *file) > { > - struct v3d_dev *v3d = to_v3d_dev(dev); > struct v3d_file_priv *v3d_priv = file->driver_priv; > enum v3d_queue q; > > for (q = 0; q < V3D_MAX_QUEUES; q++) { > - drm_sched_entity_destroy(&v3d->queue[q].sched, > - &v3d_priv->sched_entity[q]); > + drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); > } > > kfree(v3d_priv); > diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c > index b513f9189caf..9029590267aa 100644 > --- a/drivers/gpu/drm/v3d/v3d_gem.c > +++ b/drivers/gpu/drm/v3d/v3d_gem.c > @@ -552,7 +552,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, > > if (exec->bin.start != exec->bin.end) { > ret = drm_sched_job_init(&exec->bin.base, > - &v3d->queue[V3D_BIN].sched, > &v3d_priv->sched_entity[V3D_BIN], > v3d_priv); > if (ret) > @@ -567,7 +566,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, > } > > ret = drm_sched_job_init(&exec->render.base, > - &v3d->queue[V3D_RENDER].sched, > &v3d_priv->sched_entity[V3D_RENDER], > v3d_priv); > if (ret) > diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h > index 2205e89722f6..728346abcc81 100644 > --- a/include/drm/gpu_scheduler.h > +++ b/include/drm/gpu_scheduler.h > @@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, > struct drm_sched_rq **rq_list, > unsigned int num_rq_list, > atomic_t *guilty); > -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity, long timeout); > -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity); > -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, > - struct drm_sched_entity *entity); > +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); > +void drm_sched_entity_fini(struct drm_sched_entity *entity); > +void drm_sched_entity_destroy(struct drm_sched_entity *entity); > void drm_sched_entity_push_job(struct drm_sched_job *sched_job, > struct drm_sched_entity *entity); > void drm_sched_entity_set_rq(struct drm_sched_entity *entity, > @@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create( > void drm_sched_fence_scheduled(struct drm_sched_fence *fence); > void drm_sched_fence_finished(struct drm_sched_fence *fence); > int drm_sched_job_init(struct drm_sched_job *job, > - struct drm_gpu_scheduler *sched, > struct drm_sched_entity *entity, > void *owner); > void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, > -- > 2.14.3 > > _______________________________________________ > dri-devel mailing list > dri-devel@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/dri-devel
Hi Daniel, On Thu, Aug 9, 2018 at 2:27 PM Daniel Vetter <daniel@ffwll.ch> wrote: > > On Fri, Jul 20, 2018 at 2:21 PM, Nayan Deshmukh > <nayan26deshmukh@gmail.com> wrote: > > entity has a scheduler field and we don't need the sched argument > > in any of the functions where entity is provided. > > > > Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> > > This breaks the make htmldocs build a bit: > > ./drivers/gpu/drm/scheduler/gpu_scheduler.c:262: warning: Excess > function parameter 'sched' description in 'drm_sched_entity_flush' > ./drivers/gpu/drm/scheduler/gpu_scheduler.c:303: warning: Excess > function parameter 'sched' description in 'drm_sched_entity_fini' > ./drivers/gpu/drm/scheduler/gpu_scheduler.c:365: warning: Excess > function parameter 'sched' description in 'drm_sched_entity_destroy' > ./drivers/gpu/drm/scheduler/gpu_scheduler.c:730: warning: Excess > function parameter 'sched' description in 'drm_sched_job_init' > ./drivers/gpu/drm/scheduler/gpu_scheduler.c:263: warning: Excess > function parameter 'sched' description in 'drm_sched_entity_flush' > ./drivers/gpu/drm/scheduler/gpu_scheduler.c:304: warning: Excess > function parameter 'sched' description in 'drm_sched_entity_fini' > ./drivers/gpu/drm/scheduler/gpu_scheduler.c:366: warning: Excess > function parameter 'sched' description in 'drm_sched_entity_destroy' > ./drivers/gpu/drm/scheduler/gpu_scheduler.c:731: warning: Excess > function parameter 'sched' description in 'drm_sched_job_init' > > Care to fix it? My Bad thanks for pointing it out. I will send in a patch to fix it. Cheers, Nayan > > Thanks, Daniel > > --- > > drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- > > drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 13 +++++-------- > > drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- > > drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +-- > > drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 3 +-- > > drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- > > drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++-- > > drivers/gpu/drm/etnaviv/etnaviv_drv.c | 3 +-- > > drivers/gpu/drm/etnaviv/etnaviv_sched.c | 4 ++-- > > drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++--------- > > drivers/gpu/drm/v3d/v3d_drv.c | 4 +--- > > drivers/gpu/drm/v3d/v3d_gem.c | 2 -- > > include/drm/gpu_scheduler.h | 10 +++------- > > 13 files changed, 30 insertions(+), 42 deletions(-) > > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > > index 7c5cc33d0cda..7e5ebf823309 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > > @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, > > job = p->job; > > p->job = NULL; > > > > - r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); > > + r = drm_sched_job_init(&job->base, entity, p->filp); > > if (r) { > > amdgpu_job_free(job); > > amdgpu_mn_unlock(p->mn); > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c > > index 83e3b320a793..df6965761046 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c > > @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, > > > > failed: > > for (j = 0; j < i; j++) > > - drm_sched_entity_destroy(&adev->rings[j]->sched, > > - &ctx->rings[j].entity); > > + drm_sched_entity_destroy(&ctx->rings[j].entity); > > kfree(ctx->fences); > > ctx->fences = NULL; > > return r; > > @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) > > if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) > > continue; > > > > - drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, > > - &ctx->rings[i].entity); > > + drm_sched_entity_destroy(&ctx->rings[i].entity); > > } > > > > amdgpu_ctx_fini(ref); > > @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) > > if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) > > continue; > > > > - max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, > > - &ctx->rings[i].entity, max_wait); > > + max_wait = drm_sched_entity_flush(&ctx->rings[i].entity, > > + max_wait); > > } > > } > > mutex_unlock(&mgr->lock); > > @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) > > continue; > > > > if (kref_read(&ctx->refcount) == 1) > > - drm_sched_entity_fini(&ctx->adev->rings[i]->sched, > > - &ctx->rings[i].entity); > > + drm_sched_entity_fini(&ctx->rings[i].entity); > > else > > DRM_ERROR("ctx %p is still alive\n", ctx); > > } > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c > > index 5a2c26a85984..631481a730e0 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c > > @@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, > > if (!f) > > return -EINVAL; > > > > - r = drm_sched_job_init(&job->base, entity->sched, entity, owner); > > + r = drm_sched_job_init(&job->base, entity, owner); > > if (r) > > return r; > > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > > index 13977ea6a097..913705d4dfd3 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c > > @@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) > > return; > > } > > } else { > > - drm_sched_entity_destroy(adev->mman.entity.sched, > > - &adev->mman.entity); > > + drm_sched_entity_destroy(&adev->mman.entity); > > } > > > > /* this just adjusts TTM size idea, which sets lpfn to the correct value */ > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > > index 80b5c453f8c1..8e2c96da275e 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c > > @@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) > > { > > int i, j; > > > > - drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, > > - &adev->uvd.entity); > > + drm_sched_entity_destroy(&adev->uvd.entity); > > > > for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { > > kfree(adev->uvd.inst[j].saved_bo); > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > > index 86182c966ed6..b6ab4f5350c8 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c > > @@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) > > if (adev->vce.vcpu_bo == NULL) > > return 0; > > > > - drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); > > + drm_sched_entity_destroy(&adev->vce.entity); > > > > amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, > > (void **)&adev->vce.cpu_addr); > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > > index 098dd1ba751a..74b4a28a41d6 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c > > @@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, > > vm->root.base.bo = NULL; > > > > error_free_sched_entity: > > - drm_sched_entity_destroy(&ring->sched, &vm->entity); > > + drm_sched_entity_destroy(&vm->entity); > > > > return r; > > } > > @@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) > > spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); > > } > > > > - drm_sched_entity_destroy(vm->entity.sched, &vm->entity); > > + drm_sched_entity_destroy(&vm->entity); > > > > if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { > > dev_err(adev->dev, "still active bo inside vm\n"); > > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c > > index 36414ba56b22..207532c05eb8 100644 > > --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c > > +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c > > @@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) > > gpu->lastctx = NULL; > > mutex_unlock(&gpu->lock); > > > > - drm_sched_entity_destroy(&gpu->sched, > > - &ctx->sched_entity[i]); > > + drm_sched_entity_destroy(&ctx->sched_entity[i]); > > } > > } > > > > diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c > > index a74eb57af15b..590e44b0d963 100644 > > --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c > > +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c > > @@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, > > { > > int ret; > > > > - ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, > > - sched_entity, submit->cmdbuf.ctx); > > + ret = drm_sched_job_init(&submit->sched_job, sched_entity, > > + submit->cmdbuf.ctx); > > if (ret) > > return ret; > > > > diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c > > index dac71e3b4514..a3b55c542025 100644 > > --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c > > +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c > > @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, > > * > > * Returns the remaining time in jiffies left from the input timeout > > */ > > -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, > > - struct drm_sched_entity *entity, long timeout) > > +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) > > { > > + struct drm_gpu_scheduler *sched; > > long ret = timeout; > > > > + sched = entity->sched; > > if (!drm_sched_entity_is_initialized(sched, entity)) > > return ret; > > /** > > @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush); > > * entity and signals all jobs with an error code if the process was killed. > > * > > */ > > -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, > > - struct drm_sched_entity *entity) > > +void drm_sched_entity_fini(struct drm_sched_entity *entity) > > { > > + struct drm_gpu_scheduler *sched; > > > > + sched = entity->sched; > > drm_sched_entity_set_rq(entity, NULL); > > > > /* Consumption of existing IBs wasn't completed. Forcefully > > @@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini); > > * > > * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() > > */ > > -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, > > - struct drm_sched_entity *entity) > > +void drm_sched_entity_destroy(struct drm_sched_entity *entity) > > { > > - drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); > > - drm_sched_entity_fini(sched, entity); > > + drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); > > + drm_sched_entity_fini(entity); > > } > > EXPORT_SYMBOL(drm_sched_entity_destroy); > > > > @@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery); > > * Returns 0 for success, negative error code otherwise. > > */ > > int drm_sched_job_init(struct drm_sched_job *job, > > - struct drm_gpu_scheduler *sched, > > struct drm_sched_entity *entity, > > void *owner) > > { > > + struct drm_gpu_scheduler *sched = entity->sched; > > + > > job->sched = sched; > > job->entity = entity; > > job->s_priority = entity->rq - sched->sched_rq; > > diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c > > index 1dceba2b42fd..2a85fa68ffea 100644 > > --- a/drivers/gpu/drm/v3d/v3d_drv.c > > +++ b/drivers/gpu/drm/v3d/v3d_drv.c > > @@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file) > > static void > > v3d_postclose(struct drm_device *dev, struct drm_file *file) > > { > > - struct v3d_dev *v3d = to_v3d_dev(dev); > > struct v3d_file_priv *v3d_priv = file->driver_priv; > > enum v3d_queue q; > > > > for (q = 0; q < V3D_MAX_QUEUES; q++) { > > - drm_sched_entity_destroy(&v3d->queue[q].sched, > > - &v3d_priv->sched_entity[q]); > > + drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); > > } > > > > kfree(v3d_priv); > > diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c > > index b513f9189caf..9029590267aa 100644 > > --- a/drivers/gpu/drm/v3d/v3d_gem.c > > +++ b/drivers/gpu/drm/v3d/v3d_gem.c > > @@ -552,7 +552,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, > > > > if (exec->bin.start != exec->bin.end) { > > ret = drm_sched_job_init(&exec->bin.base, > > - &v3d->queue[V3D_BIN].sched, > > &v3d_priv->sched_entity[V3D_BIN], > > v3d_priv); > > if (ret) > > @@ -567,7 +566,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, > > } > > > > ret = drm_sched_job_init(&exec->render.base, > > - &v3d->queue[V3D_RENDER].sched, > > &v3d_priv->sched_entity[V3D_RENDER], > > v3d_priv); > > if (ret) > > diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h > > index 2205e89722f6..728346abcc81 100644 > > --- a/include/drm/gpu_scheduler.h > > +++ b/include/drm/gpu_scheduler.h > > @@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, > > struct drm_sched_rq **rq_list, > > unsigned int num_rq_list, > > atomic_t *guilty); > > -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, > > - struct drm_sched_entity *entity, long timeout); > > -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, > > - struct drm_sched_entity *entity); > > -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, > > - struct drm_sched_entity *entity); > > +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); > > +void drm_sched_entity_fini(struct drm_sched_entity *entity); > > +void drm_sched_entity_destroy(struct drm_sched_entity *entity); > > void drm_sched_entity_push_job(struct drm_sched_job *sched_job, > > struct drm_sched_entity *entity); > > void drm_sched_entity_set_rq(struct drm_sched_entity *entity, > > @@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create( > > void drm_sched_fence_scheduled(struct drm_sched_fence *fence); > > void drm_sched_fence_finished(struct drm_sched_fence *fence); > > int drm_sched_job_init(struct drm_sched_job *job, > > - struct drm_gpu_scheduler *sched, > > struct drm_sched_entity *entity, > > void *owner); > > void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, > > -- > > 2.14.3 > > > > _______________________________________________ > > dri-devel mailing list > > dri-devel@lists.freedesktop.org > > https://lists.freedesktop.org/mailman/listinfo/dri-devel > > > > -- > Daniel Vetter > Software Engineer, Intel Corporation > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 7c5cc33d0cda..7e5ebf823309 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1232,7 +1232,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job = p->job; p->job = NULL; - r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp); + r = drm_sched_job_init(&job->base, entity, p->filp); if (r) { amdgpu_job_free(job); amdgpu_mn_unlock(p->mn); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 83e3b320a793..df6965761046 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, failed: for (j = 0; j < i; j++) - drm_sched_entity_destroy(&adev->rings[j]->sched, - &ctx->rings[j].entity); + drm_sched_entity_destroy(&ctx->rings[j].entity); kfree(ctx->fences); ctx->fences = NULL; return r; @@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; - drm_sched_entity_destroy(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity); + drm_sched_entity_destroy(&ctx->rings[i].entity); } amdgpu_ctx_fini(ref); @@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) continue; - max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity, max_wait); + max_wait = drm_sched_entity_flush(&ctx->rings[i].entity, + max_wait); } } mutex_unlock(&mgr->lock); @@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) continue; if (kref_read(&ctx->refcount) == 1) - drm_sched_entity_fini(&ctx->adev->rings[i]->sched, - &ctx->rings[i].entity); + drm_sched_entity_fini(&ctx->rings[i].entity); else DRM_ERROR("ctx %p is still alive\n", ctx); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 5a2c26a85984..631481a730e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, if (!f) return -EINVAL; - r = drm_sched_job_init(&job->base, entity->sched, entity, owner); + r = drm_sched_job_init(&job->base, entity, owner); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 13977ea6a097..913705d4dfd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1925,8 +1925,7 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) return; } } else { - drm_sched_entity_destroy(adev->mman.entity.sched, - &adev->mman.entity); + drm_sched_entity_destroy(&adev->mman.entity); } /* this just adjusts TTM size idea, which sets lpfn to the correct value */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 80b5c453f8c1..8e2c96da275e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -305,8 +305,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int i, j; - drm_sched_entity_destroy(&adev->uvd.inst->ring.sched, - &adev->uvd.entity); + drm_sched_entity_destroy(&adev->uvd.entity); for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { kfree(adev->uvd.inst[j].saved_bo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 86182c966ed6..b6ab4f5350c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -221,7 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; - drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity); + drm_sched_entity_destroy(&adev->vce.entity); amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, (void **)&adev->vce.cpu_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 098dd1ba751a..74b4a28a41d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2642,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->root.base.bo = NULL; error_free_sched_entity: - drm_sched_entity_destroy(&ring->sched, &vm->entity); + drm_sched_entity_destroy(&vm->entity); return r; } @@ -2779,7 +2779,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); } - drm_sched_entity_destroy(vm->entity.sched, &vm->entity); + drm_sched_entity_destroy(&vm->entity); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 36414ba56b22..207532c05eb8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) gpu->lastctx = NULL; mutex_unlock(&gpu->lock); - drm_sched_entity_destroy(&gpu->sched, - &ctx->sched_entity[i]); + drm_sched_entity_destroy(&ctx->sched_entity[i]); } } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index a74eb57af15b..590e44b0d963 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -118,8 +118,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, { int ret; - ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, - sched_entity, submit->cmdbuf.ctx); + ret = drm_sched_job_init(&submit->sched_job, sched_entity, + submit->cmdbuf.ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index dac71e3b4514..a3b55c542025 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -273,11 +273,12 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, * * Returns the remaining time in jiffies left from the input timeout */ -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity, long timeout) +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) { + struct drm_gpu_scheduler *sched; long ret = timeout; + sched = entity->sched; if (!drm_sched_entity_is_initialized(sched, entity)) return ret; /** @@ -312,10 +313,11 @@ EXPORT_SYMBOL(drm_sched_entity_flush); * entity and signals all jobs with an error code if the process was killed. * */ -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity) +void drm_sched_entity_fini(struct drm_sched_entity *entity) { + struct drm_gpu_scheduler *sched; + sched = entity->sched; drm_sched_entity_set_rq(entity, NULL); /* Consumption of existing IBs wasn't completed. Forcefully @@ -373,11 +375,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini); * * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() */ -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity) +void drm_sched_entity_destroy(struct drm_sched_entity *entity) { - drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); - drm_sched_entity_fini(sched, entity); + drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); + drm_sched_entity_fini(entity); } EXPORT_SYMBOL(drm_sched_entity_destroy); @@ -740,10 +741,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery); * Returns 0 for success, negative error code otherwise. */ int drm_sched_job_init(struct drm_sched_job *job, - struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity, void *owner) { + struct drm_gpu_scheduler *sched = entity->sched; + job->sched = sched; job->entity = entity; job->s_priority = entity->rq - sched->sched_rq; diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 1dceba2b42fd..2a85fa68ffea 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file) static void v3d_postclose(struct drm_device *dev, struct drm_file *file) { - struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv = file->driver_priv; enum v3d_queue q; for (q = 0; q < V3D_MAX_QUEUES; q++) { - drm_sched_entity_destroy(&v3d->queue[q].sched, - &v3d_priv->sched_entity[q]); + drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); } kfree(v3d_priv); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index b513f9189caf..9029590267aa 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -552,7 +552,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (exec->bin.start != exec->bin.end) { ret = drm_sched_job_init(&exec->bin.base, - &v3d->queue[V3D_BIN].sched, &v3d_priv->sched_entity[V3D_BIN], v3d_priv); if (ret) @@ -567,7 +566,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, } ret = drm_sched_job_init(&exec->render.base, - &v3d->queue[V3D_RENDER].sched, &v3d_priv->sched_entity[V3D_RENDER], v3d_priv); if (ret) diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 2205e89722f6..728346abcc81 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -286,12 +286,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, struct drm_sched_rq **rq_list, unsigned int num_rq_list, atomic_t *guilty); -long drm_sched_entity_flush(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity, long timeout); -void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity); -void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched, - struct drm_sched_entity *entity); +long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); +void drm_sched_entity_fini(struct drm_sched_entity *entity); +void drm_sched_entity_destroy(struct drm_sched_entity *entity); void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity); void drm_sched_entity_set_rq(struct drm_sched_entity *entity, @@ -302,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create( void drm_sched_fence_scheduled(struct drm_sched_fence *fence); void drm_sched_fence_finished(struct drm_sched_fence *fence); int drm_sched_job_init(struct drm_sched_job *job, - struct drm_gpu_scheduler *sched, struct drm_sched_entity *entity, void *owner); void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
entity has a scheduler field and we don't need the sched argument in any of the functions where entity is provided. Signed-off-by: Nayan Deshmukh <nayan26deshmukh@gmail.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 13 +++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++-- drivers/gpu/drm/etnaviv/etnaviv_drv.c | 3 +-- drivers/gpu/drm/etnaviv/etnaviv_sched.c | 4 ++-- drivers/gpu/drm/scheduler/gpu_scheduler.c | 20 +++++++++++--------- drivers/gpu/drm/v3d/v3d_drv.c | 4 +--- drivers/gpu/drm/v3d/v3d_gem.c | 2 -- include/drm/gpu_scheduler.h | 10 +++------- 13 files changed, 30 insertions(+), 42 deletions(-)