Message ID | 20231027165859.395638-7-robdclark@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/msm/gem: drm_exec conversion | expand |
Am 27.10.23 um 18:58 schrieb Rob Clark: > From: Rob Clark <robdclark@chromium.org> > > In cases where the # is known ahead of time, it is silly to do the table > resize dance. Ah, yes that was my initial implementation as well, but I ditched that because nobody actually used it. One comment below. > > Signed-off-by: Rob Clark <robdclark@chromium.org> > --- > drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- > drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 4 ++-- > drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 ++-- > drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 4 ++-- > drivers/gpu/drm/drm_exec.c | 15 ++++++++++++--- > drivers/gpu/drm/nouveau/nouveau_exec.c | 2 +- > drivers/gpu/drm/nouveau/nouveau_uvmm.c | 2 +- > include/drm/drm_exec.h | 2 +- > 8 files changed, 22 insertions(+), 13 deletions(-) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > index efdb1c48f431..d27ca8f61929 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > @@ -65,7 +65,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, > } > > amdgpu_sync_create(&p->sync); > - drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); > + drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); > return 0; > } > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c > index 720011019741..796fa6f1420b 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c > @@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, > struct drm_exec exec; > int r; > > - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); > + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); > drm_exec_until_all_locked(&exec) { > r = amdgpu_vm_lock_pd(vm, &exec, 0); > if (likely(!r)) > @@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, > struct drm_exec exec; > int r; > > - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); > + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); > drm_exec_until_all_locked(&exec) { > r = amdgpu_vm_lock_pd(vm, &exec, 0); > if (likely(!r)) > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > index ca4d2d430e28..16f1715148ad 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > @@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, > struct drm_exec exec; > long r; > > - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); > + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); > drm_exec_until_all_locked(&exec) { > r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); > drm_exec_retry_on_contention(&exec); > @@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, > } > > drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | > - DRM_EXEC_IGNORE_DUPLICATES); > + DRM_EXEC_IGNORE_DUPLICATES, 0); > drm_exec_until_all_locked(&exec) { > if (gobj) { > r = drm_exec_lock_obj(&exec, gobj); > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c > index b6015157763a..3c351941701e 100644 > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c > @@ -1105,7 +1105,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, > > amdgpu_sync_create(&sync); > > - drm_exec_init(&exec, 0); > + drm_exec_init(&exec, 0, 0); > drm_exec_until_all_locked(&exec) { > r = drm_exec_lock_obj(&exec, > &ctx_data->meta_data_obj->tbo.base); > @@ -1176,7 +1176,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, > struct drm_exec exec; > long r; > > - drm_exec_init(&exec, 0); > + drm_exec_init(&exec, 0, 0); > drm_exec_until_all_locked(&exec) { > r = drm_exec_lock_obj(&exec, > &ctx_data->meta_data_obj->tbo.base); > diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c > index 5d2809de4517..27d11c20d148 100644 > --- a/drivers/gpu/drm/drm_exec.c > +++ b/drivers/gpu/drm/drm_exec.c > @@ -69,16 +69,25 @@ static void drm_exec_unlock_all(struct drm_exec *exec) > * drm_exec_init - initialize a drm_exec object > * @exec: the drm_exec object to initialize > * @flags: controls locking behavior, see DRM_EXEC_* defines > + * @nr: the initial # of objects > * > * Initialize the object and make sure that we can track locked objects. > + * > + * If nr is non-zero then it is used as the initial objects table size. > + * In either case, the table will grow (be re-allocated) on demand. > */ > -void drm_exec_init(struct drm_exec *exec, uint32_t flags) > +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr) > { > + size_t sz = PAGE_SIZE; > + > + if (nr) > + sz = (size_t)nr * sizeof(void *); > + > exec->flags = flags; > - exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); > + exec->objects = kmalloc(sz, GFP_KERNEL); Please use k*v*malloc() here since we can't predict how large that will be. With that fixed the patch is Reviewed-by: Christian König <christian.koenig@amd.com>. Regards, Christian. > > /* If allocation here fails, just delay that till the first use */ > - exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; > + exec->max_objects = exec->objects ? sz / sizeof(void *) : 0; > exec->num_objects = 0; > exec->contended = DRM_EXEC_DUMMY; > exec->prelocked = NULL; > diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c > index 19024ce21fbb..f5930cc0b3fb 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_exec.c > +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c > @@ -103,7 +103,7 @@ nouveau_exec_job_submit(struct nouveau_job *job) > > nouveau_uvmm_lock(uvmm); > drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | > - DRM_EXEC_IGNORE_DUPLICATES); > + DRM_EXEC_IGNORE_DUPLICATES, 0); > drm_exec_until_all_locked(exec) { > struct drm_gpuva *va; > > diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > index aae780e4a4aa..3a9331a1c830 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c > +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > @@ -1288,7 +1288,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) > } > > drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | > - DRM_EXEC_IGNORE_DUPLICATES); > + DRM_EXEC_IGNORE_DUPLICATES, 0); > drm_exec_until_all_locked(exec) { > list_for_each_op(op, &bind_job->ops) { > struct drm_gpuva_op *va_op; > diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h > index b5bf0b6da791..f1a66c048721 100644 > --- a/include/drm/drm_exec.h > +++ b/include/drm/drm_exec.h > @@ -135,7 +135,7 @@ static inline bool drm_exec_is_contended(struct drm_exec *exec) > return !!exec->contended; > } > > -void drm_exec_init(struct drm_exec *exec, uint32_t flags); > +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr); > void drm_exec_fini(struct drm_exec *exec); > bool drm_exec_cleanup(struct drm_exec *exec); > int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
On Mon, Oct 30, 2023 at 1:05 AM Christian König <christian.koenig@amd.com> wrote: > > Am 27.10.23 um 18:58 schrieb Rob Clark: > > From: Rob Clark <robdclark@chromium.org> > > > > In cases where the # is known ahead of time, it is silly to do the table > > resize dance. > > Ah, yes that was my initial implementation as well, but I ditched that > because nobody actually used it. > > One comment below. > > > > > Signed-off-by: Rob Clark <robdclark@chromium.org> > > --- > > drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- > > drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 4 ++-- > > drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 ++-- > > drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 4 ++-- > > drivers/gpu/drm/drm_exec.c | 15 ++++++++++++--- > > drivers/gpu/drm/nouveau/nouveau_exec.c | 2 +- > > drivers/gpu/drm/nouveau/nouveau_uvmm.c | 2 +- > > include/drm/drm_exec.h | 2 +- > > 8 files changed, 22 insertions(+), 13 deletions(-) > > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > > index efdb1c48f431..d27ca8f61929 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > > @@ -65,7 +65,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, > > } > > > > amdgpu_sync_create(&p->sync); > > - drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); > > + drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); > > return 0; > > } > > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c > > index 720011019741..796fa6f1420b 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c > > @@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, > > struct drm_exec exec; > > int r; > > > > - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); > > + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); > > drm_exec_until_all_locked(&exec) { > > r = amdgpu_vm_lock_pd(vm, &exec, 0); > > if (likely(!r)) > > @@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, > > struct drm_exec exec; > > int r; > > > > - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); > > + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); > > drm_exec_until_all_locked(&exec) { > > r = amdgpu_vm_lock_pd(vm, &exec, 0); > > if (likely(!r)) > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > > index ca4d2d430e28..16f1715148ad 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > > @@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, > > struct drm_exec exec; > > long r; > > > > - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); > > + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); > > drm_exec_until_all_locked(&exec) { > > r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); > > drm_exec_retry_on_contention(&exec); > > @@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, > > } > > > > drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | > > - DRM_EXEC_IGNORE_DUPLICATES); > > + DRM_EXEC_IGNORE_DUPLICATES, 0); > > drm_exec_until_all_locked(&exec) { > > if (gobj) { > > r = drm_exec_lock_obj(&exec, gobj); > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c > > index b6015157763a..3c351941701e 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c > > @@ -1105,7 +1105,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, > > > > amdgpu_sync_create(&sync); > > > > - drm_exec_init(&exec, 0); > > + drm_exec_init(&exec, 0, 0); > > drm_exec_until_all_locked(&exec) { > > r = drm_exec_lock_obj(&exec, > > &ctx_data->meta_data_obj->tbo.base); > > @@ -1176,7 +1176,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, > > struct drm_exec exec; > > long r; > > > > - drm_exec_init(&exec, 0); > > + drm_exec_init(&exec, 0, 0); > > drm_exec_until_all_locked(&exec) { > > r = drm_exec_lock_obj(&exec, > > &ctx_data->meta_data_obj->tbo.base); > > diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c > > index 5d2809de4517..27d11c20d148 100644 > > --- a/drivers/gpu/drm/drm_exec.c > > +++ b/drivers/gpu/drm/drm_exec.c > > @@ -69,16 +69,25 @@ static void drm_exec_unlock_all(struct drm_exec *exec) > > * drm_exec_init - initialize a drm_exec object > > * @exec: the drm_exec object to initialize > > * @flags: controls locking behavior, see DRM_EXEC_* defines > > + * @nr: the initial # of objects > > * > > * Initialize the object and make sure that we can track locked objects. > > + * > > + * If nr is non-zero then it is used as the initial objects table size. > > + * In either case, the table will grow (be re-allocated) on demand. > > */ > > -void drm_exec_init(struct drm_exec *exec, uint32_t flags) > > +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr) > > { > > + size_t sz = PAGE_SIZE; > > + > > + if (nr) > > + sz = (size_t)nr * sizeof(void *); > > + > > exec->flags = flags; > > - exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); > > + exec->objects = kmalloc(sz, GFP_KERNEL); > > Please use k*v*malloc() here since we can't predict how large that will be. or __GFP_NOWARN? If userspace (or kasan) is cheeky and asks for ~0 objects, we should probably just fail? BR, -R > With that fixed the patch is Reviewed-by: Christian König > <christian.koenig@amd.com>. > > Regards, > Christian. > > > > > /* If allocation here fails, just delay that till the first use */ > > - exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; > > + exec->max_objects = exec->objects ? sz / sizeof(void *) : 0; > > exec->num_objects = 0; > > exec->contended = DRM_EXEC_DUMMY; > > exec->prelocked = NULL; > > diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c > > index 19024ce21fbb..f5930cc0b3fb 100644 > > --- a/drivers/gpu/drm/nouveau/nouveau_exec.c > > +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c > > @@ -103,7 +103,7 @@ nouveau_exec_job_submit(struct nouveau_job *job) > > > > nouveau_uvmm_lock(uvmm); > > drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | > > - DRM_EXEC_IGNORE_DUPLICATES); > > + DRM_EXEC_IGNORE_DUPLICATES, 0); > > drm_exec_until_all_locked(exec) { > > struct drm_gpuva *va; > > > > diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > > index aae780e4a4aa..3a9331a1c830 100644 > > --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c > > +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > > @@ -1288,7 +1288,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) > > } > > > > drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | > > - DRM_EXEC_IGNORE_DUPLICATES); > > + DRM_EXEC_IGNORE_DUPLICATES, 0); > > drm_exec_until_all_locked(exec) { > > list_for_each_op(op, &bind_job->ops) { > > struct drm_gpuva_op *va_op; > > diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h > > index b5bf0b6da791..f1a66c048721 100644 > > --- a/include/drm/drm_exec.h > > +++ b/include/drm/drm_exec.h > > @@ -135,7 +135,7 @@ static inline bool drm_exec_is_contended(struct drm_exec *exec) > > return !!exec->contended; > > } > > > > -void drm_exec_init(struct drm_exec *exec, uint32_t flags); > > +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr); > > void drm_exec_fini(struct drm_exec *exec); > > bool drm_exec_cleanup(struct drm_exec *exec); > > int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj); >
Am 30.10.23 um 14:38 schrieb Rob Clark: > On Mon, Oct 30, 2023 at 1:05 AM Christian König > <christian.koenig@amd.com> wrote: >> Am 27.10.23 um 18:58 schrieb Rob Clark: >>> From: Rob Clark <robdclark@chromium.org> >>> >>> In cases where the # is known ahead of time, it is silly to do the table >>> resize dance. >> Ah, yes that was my initial implementation as well, but I ditched that >> because nobody actually used it. >> >> One comment below. >> >>> Signed-off-by: Rob Clark <robdclark@chromium.org> >>> --- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 4 ++-- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 ++-- >>> drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 4 ++-- >>> drivers/gpu/drm/drm_exec.c | 15 ++++++++++++--- >>> drivers/gpu/drm/nouveau/nouveau_exec.c | 2 +- >>> drivers/gpu/drm/nouveau/nouveau_uvmm.c | 2 +- >>> include/drm/drm_exec.h | 2 +- >>> 8 files changed, 22 insertions(+), 13 deletions(-) >>> >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> index efdb1c48f431..d27ca8f61929 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c >>> @@ -65,7 +65,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, >>> } >>> >>> amdgpu_sync_create(&p->sync); >>> - drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); >>> + drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); >>> return 0; >>> } >>> >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c >>> index 720011019741..796fa6f1420b 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c >>> @@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, >>> struct drm_exec exec; >>> int r; >>> >>> - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); >>> + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); >>> drm_exec_until_all_locked(&exec) { >>> r = amdgpu_vm_lock_pd(vm, &exec, 0); >>> if (likely(!r)) >>> @@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, >>> struct drm_exec exec; >>> int r; >>> >>> - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); >>> + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); >>> drm_exec_until_all_locked(&exec) { >>> r = amdgpu_vm_lock_pd(vm, &exec, 0); >>> if (likely(!r)) >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> index ca4d2d430e28..16f1715148ad 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c >>> @@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, >>> struct drm_exec exec; >>> long r; >>> >>> - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); >>> + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); >>> drm_exec_until_all_locked(&exec) { >>> r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); >>> drm_exec_retry_on_contention(&exec); >>> @@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, >>> } >>> >>> drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | >>> - DRM_EXEC_IGNORE_DUPLICATES); >>> + DRM_EXEC_IGNORE_DUPLICATES, 0); >>> drm_exec_until_all_locked(&exec) { >>> if (gobj) { >>> r = drm_exec_lock_obj(&exec, gobj); >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c >>> index b6015157763a..3c351941701e 100644 >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c >>> @@ -1105,7 +1105,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, >>> >>> amdgpu_sync_create(&sync); >>> >>> - drm_exec_init(&exec, 0); >>> + drm_exec_init(&exec, 0, 0); >>> drm_exec_until_all_locked(&exec) { >>> r = drm_exec_lock_obj(&exec, >>> &ctx_data->meta_data_obj->tbo.base); >>> @@ -1176,7 +1176,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, >>> struct drm_exec exec; >>> long r; >>> >>> - drm_exec_init(&exec, 0); >>> + drm_exec_init(&exec, 0, 0); >>> drm_exec_until_all_locked(&exec) { >>> r = drm_exec_lock_obj(&exec, >>> &ctx_data->meta_data_obj->tbo.base); >>> diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c >>> index 5d2809de4517..27d11c20d148 100644 >>> --- a/drivers/gpu/drm/drm_exec.c >>> +++ b/drivers/gpu/drm/drm_exec.c >>> @@ -69,16 +69,25 @@ static void drm_exec_unlock_all(struct drm_exec *exec) >>> * drm_exec_init - initialize a drm_exec object >>> * @exec: the drm_exec object to initialize >>> * @flags: controls locking behavior, see DRM_EXEC_* defines >>> + * @nr: the initial # of objects >>> * >>> * Initialize the object and make sure that we can track locked objects. >>> + * >>> + * If nr is non-zero then it is used as the initial objects table size. >>> + * In either case, the table will grow (be re-allocated) on demand. >>> */ >>> -void drm_exec_init(struct drm_exec *exec, uint32_t flags) >>> +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr) >>> { >>> + size_t sz = PAGE_SIZE; >>> + >>> + if (nr) >>> + sz = (size_t)nr * sizeof(void *); >>> + >>> exec->flags = flags; >>> - exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); >>> + exec->objects = kmalloc(sz, GFP_KERNEL); >> Please use k*v*malloc() here since we can't predict how large that will be. > or __GFP_NOWARN? If userspace (or kasan) is cheeky and asks for ~0 > objects, we should probably just fail? Oh, good point! If this value is controlled by userspace we must be much more careful. Instead of __GFP_NOWARN or any other workaround we should use kvmalloc_array() here. Maybe turn the code upside down, in other words something like this here: if (!nr) nr = PAGE_SIZE / sizeof(void *); exec->objects = kvmalloc_array(nr, sizeof(void *), GFP_KERNEL); exec->max_objects = exec->objects ? nr : 0; Regards, Christian. > > BR, > -R > >> With that fixed the patch is Reviewed-by: Christian König >> <christian.koenig@amd.com>. >> >> Regards, >> Christian. >> >>> /* If allocation here fails, just delay that till the first use */ >>> - exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; >>> + exec->max_objects = exec->objects ? sz / sizeof(void *) : 0; >>> exec->num_objects = 0; >>> exec->contended = DRM_EXEC_DUMMY; >>> exec->prelocked = NULL; >>> diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c >>> index 19024ce21fbb..f5930cc0b3fb 100644 >>> --- a/drivers/gpu/drm/nouveau/nouveau_exec.c >>> +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c >>> @@ -103,7 +103,7 @@ nouveau_exec_job_submit(struct nouveau_job *job) >>> >>> nouveau_uvmm_lock(uvmm); >>> drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | >>> - DRM_EXEC_IGNORE_DUPLICATES); >>> + DRM_EXEC_IGNORE_DUPLICATES, 0); >>> drm_exec_until_all_locked(exec) { >>> struct drm_gpuva *va; >>> >>> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c >>> index aae780e4a4aa..3a9331a1c830 100644 >>> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c >>> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c >>> @@ -1288,7 +1288,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) >>> } >>> >>> drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | >>> - DRM_EXEC_IGNORE_DUPLICATES); >>> + DRM_EXEC_IGNORE_DUPLICATES, 0); >>> drm_exec_until_all_locked(exec) { >>> list_for_each_op(op, &bind_job->ops) { >>> struct drm_gpuva_op *va_op; >>> diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h >>> index b5bf0b6da791..f1a66c048721 100644 >>> --- a/include/drm/drm_exec.h >>> +++ b/include/drm/drm_exec.h >>> @@ -135,7 +135,7 @@ static inline bool drm_exec_is_contended(struct drm_exec *exec) >>> return !!exec->contended; >>> } >>> >>> -void drm_exec_init(struct drm_exec *exec, uint32_t flags); >>> +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr); >>> void drm_exec_fini(struct drm_exec *exec); >>> bool drm_exec_cleanup(struct drm_exec *exec); >>> int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
On Mon, Oct 30, 2023 at 9:01 AM Christian König <ckoenig.leichtzumerken@gmail.com> wrote: > > Am 30.10.23 um 14:38 schrieb Rob Clark: > > On Mon, Oct 30, 2023 at 1:05 AM Christian König > > <christian.koenig@amd.com> wrote: > >> Am 27.10.23 um 18:58 schrieb Rob Clark: > >>> From: Rob Clark <robdclark@chromium.org> > >>> > >>> In cases where the # is known ahead of time, it is silly to do the table > >>> resize dance. > >> Ah, yes that was my initial implementation as well, but I ditched that > >> because nobody actually used it. > >> > >> One comment below. > >> > >>> Signed-off-by: Rob Clark <robdclark@chromium.org> > >>> --- > >>> drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- > >>> drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c | 4 ++-- > >>> drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 ++-- > >>> drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 4 ++-- > >>> drivers/gpu/drm/drm_exec.c | 15 ++++++++++++--- > >>> drivers/gpu/drm/nouveau/nouveau_exec.c | 2 +- > >>> drivers/gpu/drm/nouveau/nouveau_uvmm.c | 2 +- > >>> include/drm/drm_exec.h | 2 +- > >>> 8 files changed, 22 insertions(+), 13 deletions(-) > >>> > >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > >>> index efdb1c48f431..d27ca8f61929 100644 > >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c > >>> @@ -65,7 +65,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, > >>> } > >>> > >>> amdgpu_sync_create(&p->sync); > >>> - drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); > >>> + drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); > >>> return 0; > >>> } > >>> > >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c > >>> index 720011019741..796fa6f1420b 100644 > >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c > >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c > >>> @@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, > >>> struct drm_exec exec; > >>> int r; > >>> > >>> - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); > >>> + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); > >>> drm_exec_until_all_locked(&exec) { > >>> r = amdgpu_vm_lock_pd(vm, &exec, 0); > >>> if (likely(!r)) > >>> @@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, > >>> struct drm_exec exec; > >>> int r; > >>> > >>> - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); > >>> + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); > >>> drm_exec_until_all_locked(&exec) { > >>> r = amdgpu_vm_lock_pd(vm, &exec, 0); > >>> if (likely(!r)) > >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > >>> index ca4d2d430e28..16f1715148ad 100644 > >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c > >>> @@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, > >>> struct drm_exec exec; > >>> long r; > >>> > >>> - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); > >>> + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); > >>> drm_exec_until_all_locked(&exec) { > >>> r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); > >>> drm_exec_retry_on_contention(&exec); > >>> @@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, > >>> } > >>> > >>> drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | > >>> - DRM_EXEC_IGNORE_DUPLICATES); > >>> + DRM_EXEC_IGNORE_DUPLICATES, 0); > >>> drm_exec_until_all_locked(&exec) { > >>> if (gobj) { > >>> r = drm_exec_lock_obj(&exec, gobj); > >>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c > >>> index b6015157763a..3c351941701e 100644 > >>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c > >>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c > >>> @@ -1105,7 +1105,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, > >>> > >>> amdgpu_sync_create(&sync); > >>> > >>> - drm_exec_init(&exec, 0); > >>> + drm_exec_init(&exec, 0, 0); > >>> drm_exec_until_all_locked(&exec) { > >>> r = drm_exec_lock_obj(&exec, > >>> &ctx_data->meta_data_obj->tbo.base); > >>> @@ -1176,7 +1176,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, > >>> struct drm_exec exec; > >>> long r; > >>> > >>> - drm_exec_init(&exec, 0); > >>> + drm_exec_init(&exec, 0, 0); > >>> drm_exec_until_all_locked(&exec) { > >>> r = drm_exec_lock_obj(&exec, > >>> &ctx_data->meta_data_obj->tbo.base); > >>> diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c > >>> index 5d2809de4517..27d11c20d148 100644 > >>> --- a/drivers/gpu/drm/drm_exec.c > >>> +++ b/drivers/gpu/drm/drm_exec.c > >>> @@ -69,16 +69,25 @@ static void drm_exec_unlock_all(struct drm_exec *exec) > >>> * drm_exec_init - initialize a drm_exec object > >>> * @exec: the drm_exec object to initialize > >>> * @flags: controls locking behavior, see DRM_EXEC_* defines > >>> + * @nr: the initial # of objects > >>> * > >>> * Initialize the object and make sure that we can track locked objects. > >>> + * > >>> + * If nr is non-zero then it is used as the initial objects table size. > >>> + * In either case, the table will grow (be re-allocated) on demand. > >>> */ > >>> -void drm_exec_init(struct drm_exec *exec, uint32_t flags) > >>> +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr) > >>> { > >>> + size_t sz = PAGE_SIZE; > >>> + > >>> + if (nr) > >>> + sz = (size_t)nr * sizeof(void *); > >>> + > >>> exec->flags = flags; > >>> - exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); > >>> + exec->objects = kmalloc(sz, GFP_KERNEL); > >> Please use k*v*malloc() here since we can't predict how large that will be. > > or __GFP_NOWARN? If userspace (or kasan) is cheeky and asks for ~0 > > objects, we should probably just fail? > > Oh, good point! If this value is controlled by userspace we must be much > more careful. > > Instead of __GFP_NOWARN or any other workaround we should use > kvmalloc_array() here. > > Maybe turn the code upside down, in other words something like this here: > > if (!nr) > nr = PAGE_SIZE / sizeof(void *); > > exec->objects = kvmalloc_array(nr, sizeof(void *), GFP_KERNEL); > exec->max_objects = exec->objects ? nr : 0; oh, good point BR, -R > > Regards, > Christian. > > > > > BR, > > -R > > > >> With that fixed the patch is Reviewed-by: Christian König > >> <christian.koenig@amd.com>. > >> > >> Regards, > >> Christian. > >> > >>> /* If allocation here fails, just delay that till the first use */ > >>> - exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; > >>> + exec->max_objects = exec->objects ? sz / sizeof(void *) : 0; > >>> exec->num_objects = 0; > >>> exec->contended = DRM_EXEC_DUMMY; > >>> exec->prelocked = NULL; > >>> diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c > >>> index 19024ce21fbb..f5930cc0b3fb 100644 > >>> --- a/drivers/gpu/drm/nouveau/nouveau_exec.c > >>> +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c > >>> @@ -103,7 +103,7 @@ nouveau_exec_job_submit(struct nouveau_job *job) > >>> > >>> nouveau_uvmm_lock(uvmm); > >>> drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | > >>> - DRM_EXEC_IGNORE_DUPLICATES); > >>> + DRM_EXEC_IGNORE_DUPLICATES, 0); > >>> drm_exec_until_all_locked(exec) { > >>> struct drm_gpuva *va; > >>> > >>> diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > >>> index aae780e4a4aa..3a9331a1c830 100644 > >>> --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c > >>> +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c > >>> @@ -1288,7 +1288,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) > >>> } > >>> > >>> drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | > >>> - DRM_EXEC_IGNORE_DUPLICATES); > >>> + DRM_EXEC_IGNORE_DUPLICATES, 0); > >>> drm_exec_until_all_locked(exec) { > >>> list_for_each_op(op, &bind_job->ops) { > >>> struct drm_gpuva_op *va_op; > >>> diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h > >>> index b5bf0b6da791..f1a66c048721 100644 > >>> --- a/include/drm/drm_exec.h > >>> +++ b/include/drm/drm_exec.h > >>> @@ -135,7 +135,7 @@ static inline bool drm_exec_is_contended(struct drm_exec *exec) > >>> return !!exec->contended; > >>> } > >>> > >>> -void drm_exec_init(struct drm_exec *exec, uint32_t flags); > >>> +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr); > >>> void drm_exec_fini(struct drm_exec *exec); > >>> bool drm_exec_cleanup(struct drm_exec *exec); > >>> int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj); >
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index efdb1c48f431..d27ca8f61929 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -65,7 +65,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, } amdgpu_sync_create(&p->sync); - drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 720011019741..796fa6f1420b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -70,7 +70,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) @@ -110,7 +110,7 @@ int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_exec exec; int r; - drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT); + drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&exec) { r = amdgpu_vm_lock_pd(vm, &exec, 0); if (likely(!r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index ca4d2d430e28..16f1715148ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -203,7 +203,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj, struct drm_exec exec; long r; - drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES); + drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1); drm_exec_retry_on_contention(&exec); @@ -739,7 +739,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, } drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(&exec) { if (gobj) { r = drm_exec_lock_obj(&exec, gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index b6015157763a..3c351941701e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1105,7 +1105,7 @@ int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev, amdgpu_sync_create(&sync); - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &ctx_data->meta_data_obj->tbo.base); @@ -1176,7 +1176,7 @@ int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev, struct drm_exec exec; long r; - drm_exec_init(&exec, 0); + drm_exec_init(&exec, 0, 0); drm_exec_until_all_locked(&exec) { r = drm_exec_lock_obj(&exec, &ctx_data->meta_data_obj->tbo.base); diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c index 5d2809de4517..27d11c20d148 100644 --- a/drivers/gpu/drm/drm_exec.c +++ b/drivers/gpu/drm/drm_exec.c @@ -69,16 +69,25 @@ static void drm_exec_unlock_all(struct drm_exec *exec) * drm_exec_init - initialize a drm_exec object * @exec: the drm_exec object to initialize * @flags: controls locking behavior, see DRM_EXEC_* defines + * @nr: the initial # of objects * * Initialize the object and make sure that we can track locked objects. + * + * If nr is non-zero then it is used as the initial objects table size. + * In either case, the table will grow (be re-allocated) on demand. */ -void drm_exec_init(struct drm_exec *exec, uint32_t flags) +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr) { + size_t sz = PAGE_SIZE; + + if (nr) + sz = (size_t)nr * sizeof(void *); + exec->flags = flags; - exec->objects = kmalloc(PAGE_SIZE, GFP_KERNEL); + exec->objects = kmalloc(sz, GFP_KERNEL); /* If allocation here fails, just delay that till the first use */ - exec->max_objects = exec->objects ? PAGE_SIZE / sizeof(void *) : 0; + exec->max_objects = exec->objects ? sz / sizeof(void *) : 0; exec->num_objects = 0; exec->contended = DRM_EXEC_DUMMY; exec->prelocked = NULL; diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index 19024ce21fbb..f5930cc0b3fb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -103,7 +103,7 @@ nouveau_exec_job_submit(struct nouveau_job *job) nouveau_uvmm_lock(uvmm); drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(exec) { struct drm_gpuva *va; diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index aae780e4a4aa..3a9331a1c830 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1288,7 +1288,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job) } drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | - DRM_EXEC_IGNORE_DUPLICATES); + DRM_EXEC_IGNORE_DUPLICATES, 0); drm_exec_until_all_locked(exec) { list_for_each_op(op, &bind_job->ops) { struct drm_gpuva_op *va_op; diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h index b5bf0b6da791..f1a66c048721 100644 --- a/include/drm/drm_exec.h +++ b/include/drm/drm_exec.h @@ -135,7 +135,7 @@ static inline bool drm_exec_is_contended(struct drm_exec *exec) return !!exec->contended; } -void drm_exec_init(struct drm_exec *exec, uint32_t flags); +void drm_exec_init(struct drm_exec *exec, uint32_t flags, unsigned nr); void drm_exec_fini(struct drm_exec *exec); bool drm_exec_cleanup(struct drm_exec *exec); int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);