diff mbox series

[v2,28/37] drm/i915: Allow i915 to manage the vma offset nodes instead of drm core

Message ID 20190627205633.1143-29-matthew.auld@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce memory region concept (including device local memory) | expand

Commit Message

Matthew Auld June 27, 2019, 8:56 p.m. UTC
From: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>

This enables us to store extra data within vma->vm_private_data and assign
the pagefault ops for each mmap instance.

We replace the core drm_gem_mmap implementation to overcome the limitation
in having only a single offset node per gem object, allowing us to have
multiple offsets per object. This enables a mapping instance to use unique
fault-hadlers, per object.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 179 ++++++++++++++++--
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |  32 ++++
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  17 +-
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  17 ++
 .../drm/i915/gem/selftests/i915_gem_mman.c    |  12 +-
 drivers/gpu/drm/i915/gt/intel_reset.c         |  19 +-
 drivers/gpu/drm/i915/i915_drv.c               |   9 +-
 drivers/gpu/drm/i915/i915_drv.h               |   1 +
 drivers/gpu/drm/i915/i915_vma.c               |  21 +-
 9 files changed, 268 insertions(+), 39 deletions(-)

Comments

Chris Wilson June 28, 2019, 12:05 a.m. UTC | #1
Quoting Matthew Auld (2019-06-27 21:56:24)
> +static void i915_gem_vm_open(struct vm_area_struct *vma)
> +{
> +       struct i915_mmap_offset *priv = vma->vm_private_data;
> +       struct drm_i915_gem_object *obj = priv->obj;
> +
> +       drm_gem_object_get(&obj->base);

Pleae use the right getters, i915_gem_object_get and
i915_gem_object_put.
-Chris
Chris Wilson June 28, 2019, 12:08 a.m. UTC | #2
Quoting Matthew Auld (2019-06-27 21:56:24)
> +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +       struct drm_vma_offset_node *node;
> +       struct drm_file *priv = filp->private_data;
> +       struct drm_device *dev = priv->minor->dev;
> +       struct i915_mmap_offset *mmo;
> +       struct drm_gem_object *obj = NULL;
> +
> +       if (drm_dev_is_unplugged(dev))
> +               return -ENODEV;
> +
> +       drm_vma_offset_lock_lookup(dev->vma_offset_manager);
> +       node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
> +                                                 vma->vm_pgoff,
> +                                                 vma_pages(vma));
> +       if (likely(node)) {
> +               mmo = container_of(node, struct i915_mmap_offset,
> +                                  vma_node);
> +
> +               /* Take a ref for our mmap_offset and gem objects. The reference is cleaned

/*
 * Take

> +                * up when the vma is closed.
> +                *
> +                * Skip 0-refcnted objects as it is in the process of being destroyed
> +                * and will be invalid when the vma manager lock is released.
> +                */
> +               if (kref_get_unless_zero(&mmo->ref)) {
> +                       obj = &mmo->obj->base;
> +                       if (!kref_get_unless_zero(&obj->refcount))
> +                               obj = NULL;
> +               }
> +       }
> +       drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
> +
> +       if (!obj)
> +               return -EINVAL;

Please check the error paths for reference leaks.
-Chris
Chris Wilson June 28, 2019, 12:09 a.m. UTC | #3
Quoting Matthew Auld (2019-06-27 21:56:24)
> +       if (node->readonly) {

Note that we can now drop the readonly field from the node as we only
added it for out benefit. Now we've extended the vma impl, we can use
our obj readonly flag directly.
-Chris
Chris Wilson June 28, 2019, 12:10 a.m. UTC | #4
Quoting Matthew Auld (2019-06-27 21:56:24)
> +       vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;

Strictly speaking, it's not actually VM_IO as we do not wrap and expose
mmio registers to userspace.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 391621ee3cbb..7b46f44d9c20 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -219,7 +219,8 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 {
 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
 	struct vm_area_struct *area = vmf->vma;
-	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+	struct i915_mmap_offset *priv = area->vm_private_data;
+	struct drm_i915_gem_object *obj = priv->obj;
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
@@ -371,13 +372,15 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
+	struct i915_mmap_offset *mmo;
 
 	GEM_BUG_ON(!obj->userfault_count);
 
 	obj->userfault_count = 0;
 	list_del(&obj->userfault_link);
-	drm_vma_node_unmap(&obj->base.vma_node,
-			   obj->base.dev->anon_inode->i_mapping);
+	list_for_each_entry(mmo, &obj->mmap_offsets, offset)
+		drm_vma_node_unmap(&mmo->vma_node,
+				   obj->base.dev->anon_inode->i_mapping);
 
 	for_each_ggtt_vma(vma, obj)
 		i915_vma_unset_userfault(vma);
@@ -431,14 +434,31 @@  void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
-static int create_mmap_offset(struct drm_i915_gem_object *obj)
+static void init_mmap_offset(struct drm_i915_gem_object *obj,
+			     struct i915_mmap_offset *mmo)
+{
+	mutex_lock(&obj->mmo_lock);
+	kref_init(&mmo->ref);
+	list_add(&mmo->offset, &obj->mmap_offsets);
+	mutex_unlock(&obj->mmo_lock);
+}
+
+static int create_mmap_offset(struct drm_i915_gem_object *obj,
+			      struct i915_mmap_offset *mmo)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct drm_device *dev = obj->base.dev;
 	int err;
 
-	err = drm_gem_create_mmap_offset(&obj->base);
-	if (likely(!err))
+	drm_vma_node_reset(&mmo->vma_node);
+	if (mmo->file)
+		drm_vma_node_allow(&mmo->vma_node, mmo->file);
+	err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
+				 obj->base.size / PAGE_SIZE);
+	if (likely(!err)) {
+		init_mmap_offset(obj, mmo);
 		return 0;
+	}
 
 	/* Attempt to reap some mmap space from dead objects */
 	do {
@@ -449,32 +469,49 @@  static int create_mmap_offset(struct drm_i915_gem_object *obj)
 			break;
 
 		i915_gem_drain_freed_objects(i915);
-		err = drm_gem_create_mmap_offset(&obj->base);
-		if (!err)
+		err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
+					 obj->base.size / PAGE_SIZE);
+		if (!err) {
+			init_mmap_offset(obj, mmo);
 			break;
+		}
 
 	} while (flush_delayed_work(&i915->gem.retire_work));
 
 	return err;
 }
 
-int
-i915_gem_mmap_gtt(struct drm_file *file,
-		  struct drm_device *dev,
-		  u32 handle,
-		  u64 *offset)
+static int
+__assign_gem_object_mmap_data(struct drm_file *file,
+			      u32 handle,
+			      enum i915_mmap_type mmap_type,
+			      u64 *offset)
 {
 	struct drm_i915_gem_object *obj;
+	struct i915_mmap_offset *mmo;
 	int ret;
 
 	obj = i915_gem_object_lookup(file, handle);
 	if (!obj)
 		return -ENOENT;
 
-	ret = create_mmap_offset(obj);
-	if (ret == 0)
-		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+	mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
+	if (!mmo) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	mmo->file = file;
+	ret = create_mmap_offset(obj, mmo);
+	if (ret) {
+		kfree(mmo);
+		goto err;
+	}
 
+	mmo->mmap_type = mmap_type;
+	mmo->obj = obj;
+	*offset = drm_vma_node_offset_addr(&mmo->vma_node);
+err:
 	i915_gem_object_put(obj);
 	return ret;
 }
@@ -498,9 +535,115 @@  int
 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file)
 {
-	struct drm_i915_gem_mmap_gtt *args = data;
+	struct drm_i915_gem_mmap_offset *args = data;
 
-	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+	return __assign_gem_object_mmap_data(file, args->handle,
+					     I915_MMAP_TYPE_GTT,
+					     &args->offset);
+}
+
+void i915_mmap_offset_object_release(struct kref *ref)
+{
+	struct i915_mmap_offset *mmo = container_of(ref,
+						    struct i915_mmap_offset,
+						    ref);
+	struct drm_i915_gem_object *obj = mmo->obj;
+	struct drm_device *dev = obj->base.dev;
+
+	if (mmo->file)
+		drm_vma_node_revoke(&mmo->vma_node, mmo->file);
+	drm_vma_offset_remove(dev->vma_offset_manager, &mmo->vma_node);
+	list_del(&mmo->offset);
+
+	kfree(mmo);
+}
+
+static void i915_gem_vm_open(struct vm_area_struct *vma)
+{
+	struct i915_mmap_offset *priv = vma->vm_private_data;
+	struct drm_i915_gem_object *obj = priv->obj;
+
+	drm_gem_object_get(&obj->base);
+}
+
+static void i915_gem_vm_close(struct vm_area_struct *vma)
+{
+	struct i915_mmap_offset *priv = vma->vm_private_data;
+	struct drm_i915_gem_object *obj = priv->obj;
+
+	drm_gem_object_put_unlocked(&obj->base);
+	kref_put(&priv->ref, i915_mmap_offset_object_release);
+}
+
+static const struct vm_operations_struct i915_gem_gtt_vm_ops = {
+	.fault = i915_gem_fault,
+	.open = i915_gem_vm_open,
+	.close = i915_gem_vm_close,
+};
+
+/* This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_vma_offset_node *node;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct i915_mmap_offset *mmo;
+	struct drm_gem_object *obj = NULL;
+
+	if (drm_dev_is_unplugged(dev))
+		return -ENODEV;
+
+	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+						  vma->vm_pgoff,
+						  vma_pages(vma));
+	if (likely(node)) {
+	        mmo = container_of(node, struct i915_mmap_offset,
+				   vma_node);
+
+		/* Take a ref for our mmap_offset and gem objects. The reference is cleaned
+		 * up when the vma is closed.
+		 *
+		 * Skip 0-refcnted objects as it is in the process of being destroyed
+		 * and will be invalid when the vma manager lock is released.
+		 */
+		if (kref_get_unless_zero(&mmo->ref)) {
+			obj = &mmo->obj->base;
+			if (!kref_get_unless_zero(&obj->refcount))
+				obj = NULL;
+		}
+	}
+	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+
+	if (!obj)
+		return -EINVAL;
+
+	if (!drm_vma_node_is_allowed(node, priv)) {
+		drm_gem_object_put_unlocked(obj);
+		return -EACCES;
+	}
+
+	if (node->readonly) {
+		if (vma->vm_flags & VM_WRITE) {
+			drm_gem_object_put_unlocked(obj);
+			return -EINVAL;
+		}
+
+		vma->vm_flags &= ~VM_MAYWRITE;
+	}
+
+	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+	vma->vm_private_data = mmo;
+
+	vma->vm_ops = &i915_gem_gtt_vm_ops;
+
+	return 0;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 43194fbcbc2e..343162bc8181 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -55,6 +55,27 @@  frontbuffer_retire(struct i915_active_request *active,
 	intel_fb_obj_flush(obj, ORIGIN_CS);
 }
 
+static void i915_gem_object_funcs_free(struct drm_gem_object *obj)
+{
+	/* Unused for now. Mandatory callback */
+}
+
+static void i915_gem_object_funcs_close(struct drm_gem_object *gem_obj, struct drm_file *file)
+{
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+	struct i915_mmap_offset *mmo, *on;
+
+	mutex_lock(&obj->mmo_lock);
+	list_for_each_entry_safe(mmo, on, &obj->mmap_offsets, offset)
+		kref_put(&mmo->ref, i915_mmap_offset_object_release);
+	mutex_unlock(&obj->mmo_lock);
+}
+
+static const struct drm_gem_object_funcs i915_gem_object_funcs = {
+	.free = i915_gem_object_funcs_free,
+	.close = i915_gem_object_funcs_close,
+};
+
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
 			  const struct drm_i915_gem_object_ops *ops)
 {
@@ -66,9 +87,13 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&obj->lut_list);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
 
+	mutex_init(&obj->mmo_lock);
+	INIT_LIST_HEAD(&obj->mmap_offsets);
+
 	init_rcu_head(&obj->rcu);
 
 	obj->ops = ops;
+	obj->base.funcs = &i915_gem_object_funcs;
 
 	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
 	i915_active_request_init(&obj->frontbuffer_write,
@@ -155,6 +180,7 @@  static void __i915_gem_free_objects(struct drm_i915_private *i915,
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	llist_for_each_entry_safe(obj, on, freed, freed) {
 		struct i915_vma *vma, *vn;
+		struct i915_mmap_offset *mmo, *on;
 
 		trace_i915_gem_object_destroy(obj);
 
@@ -184,6 +210,7 @@  static void __i915_gem_free_objects(struct drm_i915_private *i915,
 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 		}
 
+		i915_gem_object_release_mmap(obj);
 		mutex_unlock(&i915->drm.struct_mutex);
 
 		GEM_BUG_ON(atomic_read(&obj->bind_count));
@@ -203,6 +230,11 @@  static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 		drm_gem_object_release(&obj->base);
 
+		mutex_lock(&obj->mmo_lock);
+		list_for_each_entry_safe(mmo, on, &obj->mmap_offsets, offset)
+			kref_put(&mmo->ref, i915_mmap_offset_object_release);
+		mutex_unlock(&obj->mmo_lock);
+
 		bitmap_free(obj->bit_17);
 		i915_gem_object_free(obj);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 20754c15412a..42b46bb46580 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -125,13 +125,23 @@  void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
 static inline void
 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 {
-	obj->base.vma_node.readonly = true;
+	struct i915_mmap_offset *mmo;
+
+	list_for_each_entry(mmo, &obj->mmap_offsets, offset)
+	        mmo->vma_node.readonly = true;
 }
 
 static inline bool
 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
 {
-	return obj->base.vma_node.readonly;
+	struct i915_mmap_offset *mmo;
+
+	list_for_each_entry(mmo, &obj->mmap_offsets, offset) {
+		if (mmo->vma_node.readonly)
+			return true;
+	}
+
+	return false;
 }
 
 static inline bool
@@ -419,6 +429,9 @@  int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
 				  const struct i915_sched_attr *attr);
+
+void i915_mmap_offset_object_release(struct kref *ref);
+
 #define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
 
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 8cdee185251a..86f358da8085 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -61,6 +61,19 @@  struct drm_i915_gem_object_ops {
 	void (*release)(struct drm_i915_gem_object *obj);
 };
 
+enum i915_mmap_type {
+	I915_MMAP_TYPE_GTT = 0,
+};
+
+struct i915_mmap_offset {
+	struct drm_vma_offset_node vma_node;
+	struct drm_i915_gem_object* obj;
+	struct drm_file *file;
+	enum i915_mmap_type mmap_type;
+	struct kref ref;
+	struct list_head offset;
+};
+
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
@@ -132,6 +145,10 @@  struct drm_i915_gem_object {
 	unsigned int userfault_count;
 	struct list_head userfault_link;
 
+	/* Protects access to mmap offsets */
+	struct mutex mmo_lock;
+	struct list_head mmap_offsets;
+
 	struct list_head batch_pool_link;
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 6949df0f963f..ea90ba9fd34c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -366,15 +366,20 @@  static bool assert_mmap_offset(struct drm_i915_private *i915,
 			       int expected)
 {
 	struct drm_i915_gem_object *obj;
+	/* refcounted in create_mmap_offset */
+	struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
 	int err;
 
 	obj = i915_gem_object_create_internal(i915, size);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	err = create_mmap_offset(obj);
+	err = create_mmap_offset(obj, mmo);
 	i915_gem_object_put(obj);
 
+	if (err)
+		kfree(mmo);
+
 	return err == expected;
 }
 
@@ -405,6 +410,8 @@  static int igt_mmap_offset_exhaustion(void *arg)
 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
 	struct drm_i915_gem_object *obj;
 	struct drm_mm_node resv, *hole;
+	/* refcounted in create_mmap_offset */
+	struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
 	u64 hole_start, hole_end;
 	int loop, err;
 
@@ -446,9 +453,10 @@  static int igt_mmap_offset_exhaustion(void *arg)
 		goto out;
 	}
 
-	err = create_mmap_offset(obj);
+	err = create_mmap_offset(obj, mmo);
 	if (err) {
 		pr_err("Unable to insert object into reclaimed hole\n");
+		kfree(mmo);
 		goto err_obj;
 	}
 
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index adfdb908587f..1ca98b0c4f82 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -688,6 +688,7 @@  static void revoke_mmaps(struct drm_i915_private *i915)
 
 	for (i = 0; i < i915->ggtt.num_fences; i++) {
 		struct drm_vma_offset_node *node;
+		struct i915_mmap_offset *mmo;
 		struct i915_vma *vma;
 		u64 vma_offset;
 
@@ -701,10 +702,20 @@  static void revoke_mmaps(struct drm_i915_private *i915)
 		GEM_BUG_ON(vma->fence != &i915->ggtt.fence_regs[i]);
 		node = &vma->obj->base.vma_node;
 		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
-		unmap_mapping_range(i915->drm.anon_inode->i_mapping,
-				    drm_vma_node_offset_addr(node) + vma_offset,
-				    vma->size,
-				    1);
+
+		mutex_lock(&vma->obj->mmo_lock);
+		list_for_each_entry(mmo, &vma->obj->mmap_offsets, offset) {
+			node = &mmo->vma_node;
+			if (!drm_mm_node_allocated(&node->vm_node) ||
+			    mmo->mmap_type != I915_MMAP_TYPE_GTT)
+				continue;
+
+			unmap_mapping_range(i915->drm.anon_inode->i_mapping,
+					    drm_vma_node_offset_addr(node) + vma_offset,
+					    vma->size,
+					    1);
+		}
+		mutex_unlock(&vma->obj->mmo_lock);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 34edc0302691..0f1f3b7f3029 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -3124,18 +3124,12 @@  const struct dev_pm_ops i915_pm_ops = {
 	.runtime_resume = intel_runtime_resume,
 };
 
-static const struct vm_operations_struct i915_gem_vm_ops = {
-	.fault = i915_gem_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
 static const struct file_operations i915_driver_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
 	.release = drm_release,
 	.unlocked_ioctl = drm_ioctl,
-	.mmap = drm_gem_mmap,
+	.mmap = i915_gem_mmap,
 	.poll = drm_poll,
 	.read = drm_read,
 	.compat_ioctl = i915_compat_ioctl,
@@ -3224,7 +3218,6 @@  static struct drm_driver driver = {
 
 	.gem_close_object = i915_gem_close_object,
 	.gem_free_object_unlocked = i915_gem_free_object,
-	.gem_vm_ops = &i915_gem_vm_ops,
 
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4d24f9dc1193..dc2bf48165f0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2557,6 +2557,7 @@  int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
 void i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 
 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index c20a3022cd80..e0e07818efe0 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -856,7 +856,8 @@  static void __i915_vma_iounmap(struct i915_vma *vma)
 
 void i915_vma_revoke_mmap(struct i915_vma *vma)
 {
-	struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+	struct drm_vma_offset_node *node;
+	struct i915_mmap_offset *mmo;
 	u64 vma_offset;
 
 	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -868,10 +869,20 @@  void i915_vma_revoke_mmap(struct i915_vma *vma)
 	GEM_BUG_ON(!vma->obj->userfault_count);
 
 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
-	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
-			    drm_vma_node_offset_addr(node) + vma_offset,
-			    vma->size,
-			    1);
+
+	mutex_lock(&vma->obj->mmo_lock);
+	list_for_each_entry(mmo, &vma->obj->mmap_offsets, offset) {
+		node = &mmo->vma_node;
+		if (!drm_mm_node_allocated(&node->vm_node) ||
+		    mmo->mmap_type != I915_MMAP_TYPE_GTT)
+			continue;
+
+		unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
+				    drm_vma_node_offset_addr(node) + vma_offset,
+				    vma->size,
+				    1);
+	}
+	mutex_unlock(&vma->obj->mmo_lock);
 
 	i915_vma_unset_userfault(vma);
 	if (!--vma->obj->userfault_count)