@@ -446,7 +446,7 @@ panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
int ret;
mem = panthor_kernel_bo_create(ptdev, ptdev->fw->vm, SZ_8K,
- DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_BO_NO_MMAP, 0,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
PANTHOR_VM_KERNEL_AUTO_VA);
@@ -479,7 +479,7 @@ struct panthor_kernel_bo *
panthor_fw_alloc_suspend_buf_mem(struct panthor_device *ptdev, size_t size)
{
return panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev), size,
- DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_BO_NO_MMAP, 0,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC,
PANTHOR_VM_KERNEL_AUTO_VA);
}
@@ -600,7 +600,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
section->mem = panthor_kernel_bo_create(ptdev, panthor_fw_vm(ptdev),
section_size,
- DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_BO_NO_MMAP, 0,
vm_map_flags, va);
if (IS_ERR(section->mem))
return PTR_ERR(section->mem);
@@ -75,7 +75,8 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
*/
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
- size_t size, u32 bo_flags, u32 vm_map_flags,
+ size_t size, u32 bo_flags,
+ u32 alloc_on_fault_granularity, u32 vm_map_flags,
u64 gpu_va)
{
struct drm_gem_shmem_object *obj;
@@ -100,6 +101,10 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ if (bo_flags & DRM_PANTHOR_BO_ALLOC_ON_FAULT)
+ drm_gem_shmem_sparse_init(&bo->base, &bo->sparse,
+ alloc_on_fault_granularity);
+
/* The system and GPU MMU page size might differ, which becomes a
* problem for FW sections that need to be mapped at explicit address
* since our PAGE_SIZE alignment might cover a VA range that's
@@ -139,8 +139,8 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo)
struct panthor_kernel_bo *
panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
- size_t size, u32 bo_flags, u32 vm_map_flags,
- u64 gpu_va);
+ size_t size, u32 bo_flags, u32 alloc_on_fault_granularity,
+ u32 vm_map_flags, u64 gpu_va);
void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo);
@@ -3329,7 +3329,7 @@ group_create_queue(struct panthor_group *group,
queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm,
args->ringbuf_size,
- DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_BO_NO_MMAP, 0,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
PANTHOR_VM_KERNEL_AUTO_VA);
@@ -3359,7 +3359,7 @@ group_create_queue(struct panthor_group *group,
panthor_kernel_bo_create(group->ptdev, group->vm,
queue->profiling.slot_count *
sizeof(struct panthor_job_profiling_data),
- DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_BO_NO_MMAP, 0,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
PANTHOR_VM_KERNEL_AUTO_VA);
@@ -3490,7 +3490,7 @@ int panthor_group_create(struct panthor_file *pfile,
group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm,
group_args->queues.count *
sizeof(struct panthor_syncobj_64b),
- DRM_PANTHOR_BO_NO_MMAP,
+ DRM_PANTHOR_BO_NO_MMAP, 0,
DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC |
DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED,
PANTHOR_VM_KERNEL_AUTO_VA);
This will be used by the heap logic to allow for real non-blocking allocations when growing the heap. Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com> --- drivers/gpu/drm/panthor/panthor_fw.c | 6 +++--- drivers/gpu/drm/panthor/panthor_gem.c | 7 ++++++- drivers/gpu/drm/panthor/panthor_gem.h | 4 ++-- drivers/gpu/drm/panthor/panthor_sched.c | 6 +++--- 4 files changed, 14 insertions(+), 9 deletions(-)