diff mbox series

[1/4] drm/i915: Allow i915 to manage the vma offset nodes instead of drm core

Message ID 20191114190910.20380-1-abdiel.janulgue@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series [1/4] drm/i915: Allow i915 to manage the vma offset nodes instead of drm core | expand

Commit Message

Abdiel Janulgue Nov. 14, 2019, 7:09 p.m. UTC
Have i915 replace the core drm_gem_mmap implementation to overcome its
limitation in having only a single mmap offset node per gem object.
This change allows us to have multiple mmap offsets per object and
enables a mmapping instance to use unique fault-handlers per user vma.

This allows i915 to store extra data within vma->vm_private_data and
assign the pagefault ops for each mmap instance allowing objects to use
multiple fault handlers depending on its backing storage.

v2:
 - Fix race condition exposed by gem_mmap_gtt@close-race. Simplify
   lifetime management of the mmap offset objects be ensuring it is
   owned by the parent gem object instead of refcounting.
 - Track mmo used by fencing to Avoid locking when revoking mmaps
   during GPU reset.
 - Rebase.
v3:
 - Simplify mmo tracking
v4:
 - use vma->mmo in __i915_gem_object_release_mmap_gtt
v5:
 - Revoke CPU mmaps on i915_gem_object_unbind() since unlike GTT
   mmaps, they don't have bound i915_vmas objects. Rebase.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_domain.c    |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 235 +++++++++++++++---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |  13 +
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |   8 +-
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  19 ++
 .../drm/i915/gem/selftests/i915_gem_mman.c    |  12 +-
 drivers/gpu/drm/i915/gt/intel_reset.c         |   7 +-
 drivers/gpu/drm/i915/i915_drv.c               |  10 +-
 drivers/gpu/drm/i915/i915_drv.h               |   3 +-
 drivers/gpu/drm/i915/i915_gem.c               |   5 +-
 drivers/gpu/drm/i915/i915_vma.c               |  15 +-
 drivers/gpu/drm/i915/i915_vma.h               |   3 +
 12 files changed, 278 insertions(+), 54 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index e2af63af67ad..d6c56272e785 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -255,7 +255,7 @@  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 			}
 
 			if (obj->userfault_count)
-				__i915_gem_object_release_mmap(obj);
+				__i915_gem_object_release_mmap_gtt(obj);
 
 			/*
 			 * As we no longer need a fence for GTT access,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index d60973603cc1..e2df6379c7a7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -219,7 +219,8 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 {
 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
 	struct vm_area_struct *area = vmf->vma;
-	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
+	struct i915_mmap_offset *priv = area->vm_private_data;
+	struct drm_i915_gem_object *obj = priv->obj;
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
@@ -312,6 +313,9 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 		list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
 	mutex_unlock(&i915->ggtt.vm.mutex);
 
+	/* Track the mmo associated with the fenced vma */
+	vma->mmo = priv;
+
 	if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
 		intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
@@ -358,7 +362,7 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	}
 }
 
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
 
@@ -366,20 +370,16 @@  void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 
 	obj->userfault_count = 0;
 	list_del(&obj->userfault_link);
-	drm_vma_node_unmap(&obj->base.vma_node,
-			   obj->base.dev->anon_inode->i_mapping);
 
-	for_each_ggtt_vma(vma, obj)
+	for_each_ggtt_vma(vma, obj) {
+		if (vma->mmo)
+			drm_vma_node_unmap(&vma->mmo->vma_node,
+					   obj->base.dev->anon_inode->i_mapping);
 		i915_vma_unset_userfault(vma);
+	}
 }
 
 /**
- * i915_gem_object_release_mmap - remove physical page mappings
- * @obj: obj in question
- *
- * Preserve the reservation of the mmapping with the DRM core code, but
- * relinquish ownership of the pages back to the system.
- *
  * It is vital that we remove the page mapping if we have mapped a tiled
  * object through the GTT and then lose the fence register due to
  * resource pressure. Similarly if the object has been moved out of the
@@ -387,7 +387,7 @@  void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
  * mapping will then trigger a page fault on the next user access, allowing
  * fixup by i915_gem_fault().
  */
-void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+static void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	intel_wakeref_t wakeref;
@@ -406,7 +406,7 @@  void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 	if (!obj->userfault_count)
 		goto out;
 
-	__i915_gem_object_release_mmap(obj);
+	__i915_gem_object_release_mmap_gtt(obj);
 
 	/* Ensure that the CPU's PTE are revoked and there are not outstanding
 	 * memory transactions from userspace before we return. The TLB
@@ -422,15 +422,63 @@  void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
-static int create_mmap_offset(struct drm_i915_gem_object *obj)
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
+{
+	struct i915_mmap_offset *mmo;
+
+	mutex_lock(&obj->mmo_lock);
+	list_for_each_entry(mmo, &obj->mmap_offsets, offset) {
+		/* vma_node_unmap for GTT mmaps handled already in
+		 * __i915_gem_object_release_mmap_gtt
+		 */
+		if (mmo->mmap_type != I915_MMAP_TYPE_GTT)
+			drm_vma_node_unmap(&mmo->vma_node,
+					   obj->base.dev->anon_inode->i_mapping);
+	}
+	mutex_unlock(&obj->mmo_lock);
+}
+
+/**
+ * i915_gem_object_release_mmap - remove physical page mappings
+ * @obj: obj in question
+ *
+ * Preserve the reservation of the mmapping with the DRM core code, but
+ * relinquish ownership of the pages back to the system.
+ */
+void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_release_mmap_gtt(obj);
+	i915_gem_object_release_mmap_offset(obj);
+}
+
+static void init_mmap_offset(struct drm_i915_gem_object *obj,
+			     struct i915_mmap_offset *mmo)
+{
+	mutex_lock(&obj->mmo_lock);
+	list_add(&mmo->offset, &obj->mmap_offsets);
+	mutex_unlock(&obj->mmo_lock);
+
+	mmo->obj = obj;
+	mmo->dev = obj->base.dev;
+}
+
+static int create_mmap_offset(struct drm_i915_gem_object *obj,
+			      struct i915_mmap_offset *mmo)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct intel_gt *gt = &i915->gt;
+	struct drm_device *dev = obj->base.dev;
 	int err;
 
-	err = drm_gem_create_mmap_offset(&obj->base);
-	if (likely(!err))
+	drm_vma_node_reset(&mmo->vma_node);
+	if (mmo->file)
+		drm_vma_node_allow(&mmo->vma_node, mmo->file);
+	err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
+				 obj->base.size / PAGE_SIZE);
+	if (likely(!err)) {
+		init_mmap_offset(obj, mmo);
 		return 0;
+	}
 
 	/* Attempt to reap some mmap space from dead objects */
 	err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT);
@@ -438,34 +486,50 @@  static int create_mmap_offset(struct drm_i915_gem_object *obj)
 		return err;
 
 	i915_gem_drain_freed_objects(i915);
-	return drm_gem_create_mmap_offset(&obj->base);
+	err = drm_vma_offset_add(dev->vma_offset_manager, &mmo->vma_node,
+				 obj->base.size / PAGE_SIZE);
+	if (err)
+		return err;
+
+	init_mmap_offset(obj, mmo);
+	return 0;
 }
 
-int
-i915_gem_mmap_gtt(struct drm_file *file,
-		  struct drm_device *dev,
-		  u32 handle,
-		  u64 *offset)
+static int
+__assign_gem_object_mmap_data(struct drm_file *file,
+			      u32 handle,
+			      enum i915_mmap_type mmap_type,
+			      u64 *offset)
 {
 	struct drm_i915_gem_object *obj;
+	struct i915_mmap_offset *mmo;
 	int ret;
 
-	if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
-		return -ENODEV;
-
 	obj = i915_gem_object_lookup(file, handle);
 	if (!obj)
 		return -ENOENT;
 
-	if (i915_gem_object_never_bind_ggtt(obj)) {
+	if (mmap_type == I915_MMAP_TYPE_GTT &&
+	    i915_gem_object_never_bind_ggtt(obj)) {
 		ret = -ENODEV;
 		goto out;
 	}
 
-	ret = create_mmap_offset(obj);
-	if (ret == 0)
-		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
+	mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
+	if (!mmo) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
+	mmo->file = file;
+	ret = create_mmap_offset(obj, mmo);
+	if (ret) {
+		kfree(mmo);
+		goto out;
+	}
+
+	mmo->mmap_type = mmap_type;
+	*offset = drm_vma_node_offset_addr(&mmo->vma_node);
 out:
 	i915_gem_object_put(obj);
 	return ret;
@@ -492,7 +556,118 @@  i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_mmap_gtt *args = data;
 
-	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
+	return __assign_gem_object_mmap_data(file, args->handle,
+					     I915_MMAP_TYPE_GTT,
+					     &args->offset);
+}
+
+void i915_mmap_offset_destroy(struct i915_mmap_offset *mmo, struct mutex *mutex)
+{
+	if (mmo->file)
+		drm_vma_node_revoke(&mmo->vma_node, mmo->file);
+	drm_vma_offset_remove(mmo->dev->vma_offset_manager, &mmo->vma_node);
+
+	mutex_lock(mutex);
+	list_del(&mmo->offset);
+	mutex_unlock(mutex);
+
+	kfree(mmo);
+}
+
+static void i915_gem_vm_open(struct vm_area_struct *vma)
+{
+	struct i915_mmap_offset *mmo = vma->vm_private_data;
+	struct drm_i915_gem_object *obj = mmo->obj;
+
+	GEM_BUG_ON(!obj);
+	i915_gem_object_get(obj);
+}
+
+static void i915_gem_vm_close(struct vm_area_struct *vma)
+{
+	struct i915_mmap_offset *mmo = vma->vm_private_data;
+	struct drm_i915_gem_object *obj = mmo->obj;
+
+	GEM_BUG_ON(!obj);
+	i915_gem_object_put(obj);
+}
+
+static const struct vm_operations_struct i915_gem_gtt_vm_ops = {
+	.fault = i915_gem_fault,
+	.open = i915_gem_vm_open,
+	.close = i915_gem_vm_close,
+};
+
+/* This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_vma_offset_node *node;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct i915_mmap_offset *mmo = NULL;
+	struct drm_gem_object *obj = NULL;
+
+	if (drm_dev_is_unplugged(dev))
+		return -ENODEV;
+
+	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+						  vma->vm_pgoff,
+						  vma_pages(vma));
+	if (likely(node)) {
+		mmo = container_of(node, struct i915_mmap_offset,
+				   vma_node);
+		/*
+		 * In our dependency chain, the drm_vma_offset_node
+		 * depends on the validity of the mmo, which depends on
+		 * the gem object. However the only reference we have
+		 * at this point is the mmo (as the parent of the node).
+		 * Try to check if the gem object was at least cleared.
+		 */
+		if (!mmo || !mmo->obj) {
+			drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+			return -EINVAL;
+		}
+		/*
+		 * Skip 0-refcnted objects as it is in the process of being
+		 * destroyed and will be invalid when the vma manager lock
+		 * is released.
+		 */
+		obj = &mmo->obj->base;
+		if (!kref_get_unless_zero(&obj->refcount))
+			obj = NULL;
+
+	}
+	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+
+	if (!obj)
+		return -EINVAL;
+
+	if (!drm_vma_node_is_allowed(node, priv)) {
+		drm_gem_object_put_unlocked(obj);
+		return -EACCES;
+	}
+
+	if (to_intel_bo(obj)->readonly) {
+		if (vma->vm_flags & VM_WRITE) {
+			drm_gem_object_put_unlocked(obj);
+			return -EINVAL;
+		}
+		vma->vm_flags &= ~VM_MAYWRITE;
+	}
+
+	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+	vma->vm_private_data = mmo;
+
+	vma->vm_ops = &i915_gem_gtt_vm_ops;
+
+	return 0;
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index db103d3c8760..30c44c8fd557 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -61,6 +61,9 @@  void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
 	INIT_LIST_HEAD(&obj->lut_list);
 
+	mutex_init(&obj->mmo_lock);
+	INIT_LIST_HEAD(&obj->mmap_offsets);
+
 	init_rcu_head(&obj->rcu);
 
 	obj->ops = ops;
@@ -158,6 +161,8 @@  static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	llist_for_each_entry_safe(obj, on, freed, freed) {
+		struct i915_mmap_offset *mmo, *mn;
+
 		trace_i915_gem_object_destroy(obj);
 
 		if (!list_empty(&obj->vma.list)) {
@@ -183,6 +188,14 @@  static void __i915_gem_free_objects(struct drm_i915_private *i915,
 			spin_unlock(&obj->vma.lock);
 		}
 
+		i915_gem_object_release_mmap(obj);
+
+		list_for_each_entry_safe(mmo, mn, &obj->mmap_offsets, offset) {
+			mmo->obj = NULL;
+			i915_mmap_offset_destroy(mmo, &obj->mmo_lock);
+		}
+
+		GEM_BUG_ON(!list_empty(&obj->mmap_offsets));
 		GEM_BUG_ON(atomic_read(&obj->bind_count));
 		GEM_BUG_ON(obj->userfault_count);
 		GEM_BUG_ON(!list_empty(&obj->lut_list));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e5750d506cc9..314cba57b185 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -132,13 +132,13 @@  void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
 static inline void
 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 {
-	obj->base.vma_node.readonly = true;
+	obj->readonly = true;
 }
 
 static inline bool
 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
 {
-	return obj->base.vma_node.readonly;
+	return obj->readonly;
 }
 
 static inline bool
@@ -387,8 +387,9 @@  static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 	i915_gem_object_unpin_pages(obj);
 }
 
-void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
 
 void
 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
@@ -473,5 +474,6 @@  int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
 				  const struct i915_sched_attr *attr);
+void i915_mmap_offset_destroy(struct i915_mmap_offset *mmo, struct mutex *mutex);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 15f8297dc34e..632d6b804cfb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -63,6 +63,20 @@  struct drm_i915_gem_object_ops {
 	void (*release)(struct drm_i915_gem_object *obj);
 };
 
+enum i915_mmap_type {
+	I915_MMAP_TYPE_GTT = 0,
+};
+
+struct i915_mmap_offset {
+	struct drm_device *dev;
+	struct drm_vma_offset_node vma_node;
+	struct drm_i915_gem_object *obj;
+	struct drm_file *file;
+	enum i915_mmap_type mmap_type;
+
+	struct list_head offset;
+};
+
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
@@ -118,6 +132,11 @@  struct drm_i915_gem_object {
 	unsigned int userfault_count;
 	struct list_head userfault_link;
 
+	/* Protects access to mmap offsets */
+	struct mutex mmo_lock;
+	struct list_head mmap_offsets;
+	bool readonly:1;
+
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
 	unsigned long flags;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 9f1a69027a04..7761d2e0971d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -564,15 +564,20 @@  static bool assert_mmap_offset(struct drm_i915_private *i915,
 			       int expected)
 {
 	struct drm_i915_gem_object *obj;
+	/* Ownership transferred to parent gem object in create_mmap_offset */
+	struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
 	int err;
 
 	obj = i915_gem_object_create_internal(i915, size);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	err = create_mmap_offset(obj);
+	err = create_mmap_offset(obj, mmo);
+	if (err)
+		kfree(mmo);
 	i915_gem_object_put(obj);
 
+
 	return err == expected;
 }
 
@@ -608,6 +613,8 @@  static int igt_mmap_offset_exhaustion(void *arg)
 	struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
 	struct drm_i915_gem_object *obj;
 	struct drm_mm_node *hole, *next;
+	/* Ownership transferred to parent gem object in create_mmap_offset */
+	struct i915_mmap_offset *mmo = kzalloc(sizeof(*mmo), GFP_KERNEL);
 	int loop, err;
 
 	/* Disable background reaper */
@@ -672,9 +679,10 @@  static int igt_mmap_offset_exhaustion(void *arg)
 		goto out;
 	}
 
-	err = create_mmap_offset(obj);
+	err = create_mmap_offset(obj, mmo);
 	if (err) {
 		pr_err("Unable to insert object into reclaimed hole\n");
+		kfree(mmo);
 		goto err_obj;
 	}
 
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index b7007cd78c6f..777eaa483676 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -678,8 +678,13 @@  static void revoke_mmaps(struct intel_gt *gt)
 			continue;
 
 		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
-		node = &vma->obj->base.vma_node;
+
+		if (!vma->mmo)
+			continue;
+
+		node = &vma->mmo->vma_node;
 		vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+
 		unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
 				    drm_vma_node_offset_addr(node) + vma_offset,
 				    vma->size,
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 8d7049792d62..5fe071640893 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2662,18 +2662,12 @@  const struct dev_pm_ops i915_pm_ops = {
 	.runtime_resume = intel_runtime_resume,
 };
 
-static const struct vm_operations_struct i915_gem_vm_ops = {
-	.fault = i915_gem_fault,
-	.open = drm_gem_vm_open,
-	.close = drm_gem_vm_close,
-};
-
 static const struct file_operations i915_driver_fops = {
 	.owner = THIS_MODULE,
 	.open = drm_open,
 	.release = drm_release,
 	.unlocked_ioctl = drm_ioctl,
-	.mmap = drm_gem_mmap,
+	.mmap = i915_gem_mmap,
 	.poll = drm_poll,
 	.read = drm_read,
 	.compat_ioctl = i915_compat_ioctl,
@@ -2762,7 +2756,6 @@  static struct drm_driver driver = {
 
 	.gem_close_object = i915_gem_close_object,
 	.gem_free_object_unlocked = i915_gem_free_object,
-	.gem_vm_ops = &i915_gem_vm_ops,
 
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -2773,7 +2766,6 @@  static struct drm_driver driver = {
 	.get_scanout_position = i915_get_crtc_scanoutpos,
 
 	.dumb_create = i915_gem_dumb_create,
-	.dumb_map_offset = i915_gem_mmap_gtt,
 	.ioctls = i915_ioctls,
 	.num_ioctls = ARRAY_SIZE(i915_ioctls),
 	.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1779f600fcfb..968c53bfb57b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1861,8 +1861,6 @@  i915_mutex_lock_interruptible(struct drm_device *dev)
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
-int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
-		      u32 handle, u64 *offset);
 int i915_gem_mmap_gtt_version(void);
 
 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
@@ -1886,6 +1884,7 @@  void i915_gem_driver_release(struct drm_i915_private *dev_priv);
 void i915_gem_suspend(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
 void i915_gem_resume(struct drm_i915_private *dev_priv);
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma);
 vm_fault_t i915_gem_fault(struct vm_fault *vmf);
 
 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 43c532756c7c..a1da9b6d031b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -145,6 +145,9 @@  int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 	list_splice(&still_in_list, &obj->vma.list);
 	spin_unlock(&obj->vma.lock);
 
+	if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE)
+		i915_gem_object_release_mmap_offset(obj);
+
 	return ret;
 }
 
@@ -854,7 +857,7 @@  void i915_gem_runtime_suspend(struct drm_i915_private *i915)
 
 	list_for_each_entry_safe(obj, on,
 				 &i915->ggtt.userfault_list, userfault_link)
-		__i915_gem_object_release_mmap(obj);
+		__i915_gem_object_release_mmap_gtt(obj);
 
 	/*
 	 * The fence will be lost when the device powers down. If any were
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index e5512f26e20a..8c1aab9b9627 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1054,7 +1054,7 @@  static void __i915_vma_iounmap(struct i915_vma *vma)
 
 void i915_vma_revoke_mmap(struct i915_vma *vma)
 {
-	struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
+	struct drm_vma_offset_node *node;
 	u64 vma_offset;
 
 	lockdep_assert_held(&vma->vm->mutex);
@@ -1066,10 +1066,15 @@  void i915_vma_revoke_mmap(struct i915_vma *vma)
 	GEM_BUG_ON(!vma->obj->userfault_count);
 
 	vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
-	unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
-			    drm_vma_node_offset_addr(node) + vma_offset,
-			    vma->size,
-			    1);
+
+	if (vma->mmo) {
+		node = &vma->mmo->vma_node;
+
+		unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
+				    drm_vma_node_offset_addr(node) + vma_offset,
+				    vma->size,
+				    1);
+	}
 
 	i915_vma_unset_userfault(vma);
 	if (!--vma->obj->userfault_count)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 465932813bc5..f09f4f513c41 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -63,6 +63,9 @@  struct i915_vma {
 	u64 display_alignment;
 	struct i915_page_sizes page_sizes;
 
+	/* mmap-offset associated with fencing for this vma */
+	struct i915_mmap_offset	*mmo;
+
 	u32 fence_size;
 	u32 fence_alignment;