@@ -1842,6 +1842,8 @@ int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_gem_find_bo_by_cpu_mapping_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
@@ -216,6 +216,65 @@ error_unlock:
return r;
}
+static int amdgpu_gem_get_handle_from_object(struct drm_file *filp,
+ struct drm_gem_object *obj)
+{
+ int i;
+ struct drm_gem_object *tmp;
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, tmp, i) {
+ if (obj == tmp) {
+ drm_gem_object_reference(obj);
+ spin_unlock(&filp->table_lock);
+ return i;
+ }
+ }
+ spin_unlock(&filp->table_lock);
+ return 0;
+}
+
+
+int amdgpu_gem_find_bo_by_cpu_mapping_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_amdgpu_gem_find_bo *args = data;
+ struct drm_gem_object *gobj;
+ struct amdgpu_bo *bo;
+ struct ttm_buffer_object *tbo;
+ struct vm_area_struct *vma;
+ uint32_t handle;
+ int r;
+
+ if (offset_in_page(args->addr | args->size))
+ return -EINVAL;
+
+ down_read(¤t->mm->mmap_sem);
+ vma = find_vma(current->mm, args->addr);
+ if (!vma || vma->vm_file != filp->filp ||
+ (args->size > (vma->vm_end - args->addr))) {
+ args->handle = 0;
+ up_read(¤t->mm->mmap_sem);
+ return -EINVAL;
+ }
+ tbo = vma->vm_private_data;
+ bo = container_of(tbo, struct amdgpu_bo, tbo);
+ amdgpu_bo_ref(bo);
+ gobj = &bo->gem_base;
+ handle = amdgpu_gem_get_handle_from_object(filp, gobj);
+ if (handle == 0) {
+ r = drm_gem_handle_create(filp, gobj, &handle);
+ if (r) {
+ DRM_ERROR("create gem handle failed\n");
+ up_read(¤t->mm->mmap_sem);
+ return r;
+ }
+ }
+ args->handle = handle;
+ args->offset = args->addr - vma->vm_start;
+ up_read(¤t->mm->mmap_sem);
+ return 0;
+}
+
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -709,5 +709,6 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_FIND_BO, amdgpu_gem_find_bo_by_cpu_mapping_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
@@ -47,6 +47,7 @@
#define DRM_AMDGPU_GEM_OP 0x10
#define DRM_AMDGPU_GEM_USERPTR 0x11
#define DRM_AMDGPU_WAIT_FENCES 0x12
+#define DRM_AMDGPU_GEM_FIND_BO 0x13
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -61,6 +62,7 @@
#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
+#define DRM_IOCTL_AMDGPU_GEM_FIND_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_FIND_BO, struct drm_amdgpu_gem_find_bo)
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -199,6 +201,16 @@ struct drm_amdgpu_gem_userptr {
uint32_t handle;
};
+struct drm_amdgpu_gem_find_bo {
+ uint64_t addr;
+ uint64_t size;
+ uint32_t flags;
+ /* Resulting GEM handle */
+ uint32_t handle;
+ /* offset in bo */
+ uint64_t offset;
+};
+
/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf