@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
@@ -105,7 +106,41 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *
i915_gem_object_unpin_map(obj);
}
-static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+/**
+ * i915_gem_dmabuf_update_vma - Setup VMA information for exported LMEM
+ * objects
+ * @obj: valid LMEM object
+ * @vma: va;od vma
+ *
+ * NOTE: on success, the final _object_put() will be done by the VMA
+ * vm_close() callback.
+ */
+static int i915_gem_dmabuf_update_vma(struct drm_i915_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct i915_mmap_offset *mmo;
+ int err;
+
+ i915_gem_object_get(obj);
+ mmo = i915_gem_mmap_offset_attach(obj, I915_MMAP_TYPE_WC, NULL);
+ if (IS_ERR(mmo)) {
+ err = PTR_ERR(mmo);
+ goto out;
+ }
+
+ err = i915_gem_update_vma_info(obj, mmo, vma);
+ if (err)
+ goto out;
+
+ return 0;
+
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+ struct vm_area_struct *vma)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
int ret;
@@ -113,16 +148,20 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
if (obj->base.size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (!obj->base.filp)
- return -ENODEV;
+ /* shmem */
+ if (obj->base.filp) {
+ ret = call_mmap(obj->base.filp, vma);
+ if (ret)
+ return ret;
- ret = call_mmap(obj->base.filp, vma);
- if (ret)
- return ret;
+ vma_set_file(vma, obj->base.filp);
+ return 0;
+ }
- vma_set_file(vma, obj->base.filp);
+ if (i915_gem_object_is_lmem(obj))
+ return i915_gem_dmabuf_update_vma(obj, vma);
- return 0;
+ return -ENODEV;
}
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
@@ -254,10 +293,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
*/
return &i915_gem_object_get(obj)->base;
}
-
- /* not our device, but still a i915 object? */
- if (i915_gem_object_is_lmem(obj))
- return ERR_PTR(-ENOTSUPP);
}
/* need to attach */
@@ -620,10 +620,10 @@ insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
return mmo;
}
-static struct i915_mmap_offset *
-mmap_offset_attach(struct drm_i915_gem_object *obj,
- enum i915_mmap_type mmap_type,
- struct drm_file *file)
+struct i915_mmap_offset *
+i915_gem_mmap_offset_attach(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type,
+ struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_mmap_offset *mmo;
@@ -696,7 +696,7 @@ __assign_mmap_offset(struct drm_file *file,
goto out;
}
- mmo = mmap_offset_attach(obj, mmap_type, file);
+ mmo = i915_gem_mmap_offset_attach(obj, mmap_type, file);
if (IS_ERR(mmo)) {
err = PTR_ERR(mmo);
goto out;
@@ -867,56 +867,22 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
return file;
}
-/*
- * This overcomes the limitation in drm_gem_mmap's assignment of a
- * drm_gem_object as the vma->vm_private_data. Since we need to
- * be able to resolve multiple mmap offsets which could be tied
- * to a single gem object.
- */
-int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+int i915_gem_update_vma_info(struct drm_i915_gem_object *obj,
+ struct i915_mmap_offset *mmo,
+ struct vm_area_struct *vma)
{
- struct drm_vma_offset_node *node;
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
- struct drm_i915_gem_object *obj = NULL;
- struct i915_mmap_offset *mmo = NULL;
struct file *anon;
- if (drm_dev_is_unplugged(dev))
- return -ENODEV;
-
- rcu_read_lock();
- drm_vma_offset_lock_lookup(dev->vma_offset_manager);
- node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
- vma->vm_pgoff,
- vma_pages(vma));
- if (node && drm_vma_node_is_allowed(node, priv)) {
- /*
- * Skip 0-refcnted objects as it is in the process of being
- * destroyed and will be invalid when the vma manager lock
- * is released.
- */
- mmo = container_of(node, struct i915_mmap_offset, vma_node);
- obj = i915_gem_object_get_rcu(mmo->obj);
- }
- drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
- rcu_read_unlock();
- if (!obj)
- return node ? -EACCES : -EINVAL;
-
if (i915_gem_object_is_readonly(obj)) {
- if (vma->vm_flags & VM_WRITE) {
- i915_gem_object_put(obj);
+ if (vma->vm_flags & VM_WRITE)
return -EINVAL;
- }
+
vma->vm_flags &= ~VM_MAYWRITE;
}
- anon = mmap_singleton(to_i915(dev));
- if (IS_ERR(anon)) {
- i915_gem_object_put(obj);
+ anon = mmap_singleton(to_i915(obj->base.dev));
+ if (IS_ERR(anon))
return PTR_ERR(anon);
- }
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = mmo;
@@ -962,6 +928,50 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return 0;
}
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_vma_offset_node *node;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_i915_gem_object *obj = NULL;
+ struct i915_mmap_offset *mmo = NULL;
+ int err;
+
+ if (drm_dev_is_unplugged(dev))
+ return -ENODEV;
+
+ rcu_read_lock();
+ drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+ node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (node && drm_vma_node_is_allowed(node, priv)) {
+ /*
+ * Skip 0-refcnted objects as it is in the process of being
+ * destroyed and will be invalid when the vma manager lock
+ * is released.
+ */
+ mmo = container_of(node, struct i915_mmap_offset, vma_node);
+ obj = i915_gem_object_get_rcu(mmo->obj);
+ }
+ drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ rcu_read_unlock();
+ if (!obj)
+ return node ? -EACCES : -EINVAL;
+
+ err = i915_gem_update_vma_info(obj, mmo, vma);
+ if (err)
+ i915_gem_object_put(obj);
+
+ return err;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_mman.c"
#endif
@@ -10,6 +10,8 @@
#include <linux/mm_types.h>
#include <linux/types.h>
+#include "gem/i915_gem_object_types.h"
+
struct drm_device;
struct drm_file;
struct drm_i915_gem_object;
@@ -31,4 +33,11 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
+struct i915_mmap_offset *
+i915_gem_mmap_offset_attach(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type,
+ struct drm_file *file);
+int i915_gem_update_vma_info(struct drm_i915_gem_object *obj,
+ struct i915_mmap_offset *mmo,
+ struct vm_area_struct *vma);
#endif
@@ -583,7 +583,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
if (IS_ERR(obj))
return false;
- mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
+ mmo = i915_gem_mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
i915_gem_object_put(obj);
return PTR_ERR_OR_ZERO(mmo) == expected;
@@ -686,7 +686,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
- mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
+ mmo = i915_gem_mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
if (IS_ERR(mmo)) {
pr_err("Unable to insert object into reclaimed hole\n");
err = PTR_ERR(mmo);
@@ -860,7 +860,7 @@ static int __igt_mmap(struct drm_i915_private *i915,
if (err)
return err;
- mmo = mmap_offset_attach(obj, type, NULL);
+ mmo = i915_gem_mmap_offset_attach(obj, type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
@@ -996,7 +996,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
if (!can_mmap(obj, type) || !can_access(obj))
return 0;
- mmo = mmap_offset_attach(obj, type, NULL);
+ mmo = i915_gem_mmap_offset_attach(obj, type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
@@ -1109,7 +1109,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
if (err)
return err;
- mmo = mmap_offset_attach(obj, type, NULL);
+ mmo = i915_gem_mmap_offset_attach(obj, type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
@@ -1285,7 +1285,7 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
if (!can_mmap(obj, type))
return 0;
- mmo = mmap_offset_attach(obj, type, NULL);
+ mmo = i915_gem_mmap_offset_attach(obj, type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);