diff mbox series

[RFC,2/2] drm/i915/gem: Introduce VM_WAIT, a futex-lite operation

Message ID 20200118212903.3606443-2-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show
Series [RFC,1/2] drm/i915/gem: Convert vm idr to xarray | expand

Commit Message

Chris Wilson Jan. 18, 2020, 9:29 p.m. UTC
Currently, we only allow waiting on the forward progress of an individual
GEM object, or of a GEM execbuf fence. The primary purpose of the fence
is to provide a scheduling primitive to order the execution flow of
batches (cf VkSempahore).

Userspace instead uses values in memory to implement client fences, and
has to mix busywaiting on the value coupled with a dma_fence in case it
needs to sleep. It has no intermediate step where it can wait on the
memory value itself to change, which is required for scenarios where the
dma_fence may incur too much execution latency.

The CPU equivalent is a futex-syscall used to setup a waiter/waker based
on a memory location. This is used to implement an efficient sleep for
pthread_mutex_t, where the fast uncontended path can be handled entirely
in userspace.

This patch implements a similar idea, where we take a virtual address in
the client's ppGTT and install an interrupt handler to wake up the
current task when the memory location passes the user supplied filter.
It also allows the user to emit their own MI_USER_INTERRUPT within their
batches after updating the value on the GPU to have sub-batch precision
on the wakeup.

Opens:

- on attaching the waiter, we enable interrupts on all engines,
irrespective of which are active to a VM.
 * we can optimise when to enable interrupts while the VM is active
 * we can extend the interface for the user to select which engines may
   wake us

- we could return an fd wrapping the comparison operation on the memory
address if we want to pass the waiter around different processes or
reuse the waiter (with poll() + read() like timerfd).

References: b2c97bc78919 ("anv/query: Busy-wait for available query entries")
References: https://gitlab.freedesktop.org/mesa/mesa/merge_requests/3279#note_377240
Testcase: igt/gem_vm_wait
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Jason Ekstrand <jason@jlekstrand.net>
Cc: Kristian H. Kristensen <hoegsberg@google.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/Makefile                |   1 +
 drivers/gpu/drm/i915/gem/i915_gem_ioctls.h   |   2 +
 drivers/gpu/drm/i915/gem/i915_gem_vm.c       | 380 +++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_breadcrumbs.c  |  43 +++
 drivers/gpu/drm/i915/gt/intel_engine.h       |   5 +
 drivers/gpu/drm/i915/gt/intel_engine_types.h |   1 +
 drivers/gpu/drm/i915/i915_drv.c              |   1 +
 include/uapi/drm/i915_drm.h                  |  36 ++
 8 files changed, 469 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_vm.c

Comments

Chris Wilson Jan. 18, 2020, 10:17 p.m. UTC | #1
Quoting Chris Wilson (2020-01-18 21:29:03)
> Currently, we only allow waiting on the forward progress of an individual
> GEM object, or of a GEM execbuf fence. The primary purpose of the fence
> is to provide a scheduling primitive to order the execution flow of
> batches (cf VkSempahore).
> 
> Userspace instead uses values in memory to implement client fences, and
> has to mix busywaiting on the value coupled with a dma_fence in case it
> needs to sleep. It has no intermediate step where it can wait on the
> memory value itself to change, which is required for scenarios where the
> dma_fence may incur too much execution latency.
> 
> The CPU equivalent is a futex-syscall used to setup a waiter/waker based
> on a memory location. This is used to implement an efficient sleep for
> pthread_mutex_t, where the fast uncontended path can be handled entirely
> in userspace.
> 
> This patch implements a similar idea, where we take a virtual address in
> the client's ppGTT and install an interrupt handler to wake up the
> current task when the memory location passes the user supplied filter.
> It also allows the user to emit their own MI_USER_INTERRUPT within their
> batches after updating the value on the GPU to have sub-batch precision
> on the wakeup.
> 
> Opens:
> 
> - on attaching the waiter, we enable interrupts on all engines,
> irrespective of which are active to a VM.
>  * we can optimise when to enable interrupts while the VM is active
>  * we can extend the interface for the user to select which engines may
>    wake us
> 
> - we could return an fd wrapping the comparison operation on the memory
> address if we want to pass the waiter around different processes or
> reuse the waiter (with poll() + read() like timerfd).

The other thing we could do is wrap up the comparator into a dma_fence
so we can use it for scheduling as well.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 3c88d7d8c764..5e1441cf12d8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -139,6 +139,7 @@  gem-y += \
 	gem/i915_gem_throttle.o \
 	gem/i915_gem_tiling.o \
 	gem/i915_gem_userptr.o \
+	gem/i915_gem_vm.o \
 	gem/i915_gem_wait.o \
 	gem/i915_gemfs.o
 i915-y += \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
index 87d8b27f426d..92f265c84290 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ioctls.h
@@ -48,5 +48,7 @@  int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
 			   struct drm_file *file);
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file);
+int i915_gem_vm_wait_ioctl(struct drm_device *dev, void *data,
+			   struct drm_file *file);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_vm.c b/drivers/gpu/drm/i915/gem/i915_gem_vm.c
new file mode 100644
index 000000000000..d9a1de3ec4a9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_vm.c
@@ -0,0 +1,380 @@ 
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/wait.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_utils.h>
+
+#include "i915_drv.h"
+#include "i915_gem_ioctls.h"
+#include "i915_user_extensions.h"
+
+struct iova_wake {
+	struct task_struct *tsk;
+	void *ptr;
+	u64 value;
+	u64 mask;
+	u16 width;
+	u16 op;
+	bool cache_coherent;
+};
+
+static bool iova_compare(const struct iova_wake *wake)
+{
+	u64 value = wake->value & wake->mask;
+	u64 target = 0;
+
+	GEM_BUG_ON(wake->width > sizeof(target));
+
+	if (!wake->cache_coherent)
+		drm_clflush_virt_range(wake->ptr, wake->width);
+	switch (wake->width) {
+	case 1: memcpy(&target, wake->ptr, 1); break;
+	case 2: memcpy(&target, wake->ptr, 2); break;
+	case 4: memcpy(&target, wake->ptr, 4); break;
+	case 8: memcpy(&target, wake->ptr, 8); break;
+	}
+	target &= wake->mask;
+
+	switch (wake->op) {
+	case I915_VM_WAIT_EQ:
+		return value == target;
+	case I915_VM_WAIT_NEQ:
+		return value != target;
+
+	case I915_VM_WAIT_GT:
+		return target > value;
+	case I915_VM_WAIT_GTE:
+		return target >= value;
+
+	case I915_VM_WAIT_LT:
+		return target < value;
+	case I915_VM_WAIT_LTE:
+		return target <= value;
+
+	case I915_VM_WAIT_AFTER:
+		switch (wake->width) {
+		case 1:  return (s8)(target - value) > 0;
+		case 2:  return (s16)(target - value) > 0;
+		case 4:  return (s32)(target - value) > 0;
+		default: return (s64)(target - value) > 0;
+		}
+
+	case I915_VM_WAIT_BEFORE:
+		switch (wake->width) {
+		case 1:  return (s8)(target - value) < 0;
+		case 2:  return (s16)(target - value) < 0;
+		case 4:  return (s32)(target - value) < 0;
+		default: return (s64)(target - value) < 0;
+		}
+
+	default:
+		return true;
+	}
+}
+
+static int iova_wake(wait_queue_entry_t *curr,
+		     unsigned int mode, int wake_flags,
+		     void *key)
+{
+	struct iova_wake *wake = curr->private;
+
+	if (!iova_compare(wake))
+		return 0;
+
+	return wake_up_process(wake->tsk);
+}
+
+static int iova_wake_map(struct i915_vma *vma,
+			 struct drm_i915_gem_vm_wait *arg,
+			 struct iova_wake *wake)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	u64 offset = arg->iova - vma->node.start;
+
+	wake->tsk = current;
+	wake->value = arg->value;
+	wake->mask = arg->mask;
+	wake->op = arg->op;
+
+	if (i915_gem_object_has_struct_page(obj)) {
+		struct page *page;
+
+		page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+		wake->ptr = kmap(page) + offset_in_page(offset);
+
+		wake->cache_coherent =
+			obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ;
+	} else {
+		void *ptr;
+
+		ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+		if (IS_ERR(ptr))
+			return PTR_ERR(ptr);
+
+		wake->ptr = ptr + offset;
+		wake->cache_coherent = true;
+	}
+
+	return 0;
+}
+
+static void iova_wake_unmap(struct i915_vma *vma,
+			    struct drm_i915_gem_vm_wait *arg,
+			    struct iova_wake *wake)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+
+	if (i915_gem_object_has_struct_page(obj)) {
+		u64 offset = arg->iova - vma->node.start;
+
+		kunmap(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
+	} else {
+		i915_gem_object_unpin_map(obj);
+	}
+}
+
+static struct i915_vma *
+find_vma_for_iova(struct i915_address_space *vm, u64 iova, unsigned int width)
+{
+	struct drm_mm_node *node;
+	struct i915_vma *vma;
+
+	if (mutex_lock_interruptible(&vm->mutex))
+		return ERR_PTR(-EINVAL);
+
+	node = __drm_mm_interval_first(&vm->mm, iova, iova + width);
+	if (!node || node->color == I915_COLOR_UNEVICTABLE) {
+		vma = ERR_PTR(-ENOENT);
+		goto out_unlock;
+	}
+
+	if (node->start > iova || iova + width > node->start + node->size) {
+		vma = ERR_PTR(-ENOENT);
+		goto out_unlock;
+	}
+
+	vma = container_of(node, typeof(*vma), node);
+	i915_active_acquire(&vma->active);
+
+out_unlock:
+	mutex_unlock(&vm->mutex);
+	return vma;
+}
+
+struct engine_wait {
+	struct wait_queue_entry wq_entry;
+	struct intel_engine_cs *engine;
+	struct engine_wait *next;
+};
+
+static int
+add_engine_wait(struct engine_wait **head,
+		struct intel_engine_cs *engine,
+		struct iova_wake *wake)
+{
+	struct engine_wait *wait;
+
+	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+	if (!wait)
+		return -ENOMEM;
+
+	wait->engine = engine;
+	wait->wq_entry.flags = 0;
+	wait->wq_entry.private = wake;
+	wait->wq_entry.func = iova_wake;
+	intel_engine_add_wait(engine, &wait->wq_entry);
+
+	wait->next = *head;
+	*head = wait;
+
+	return 0;
+}
+
+static int add_gt_wait(struct engine_wait **head,
+		       struct intel_gt *gt,
+		       struct iova_wake *wake)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err;
+
+	for_each_engine(engine, gt, id) {
+		err = add_engine_wait(head, engine, wake);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static void remove_waits(struct engine_wait *wait)
+{
+	while (wait) {
+		struct engine_wait *next = wait->next;
+
+		intel_engine_remove_wait(wait->engine, &wait->wq_entry);
+		kfree(wait);
+
+		wait = next;
+	}
+}
+
+static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+	/* nsecs_to_jiffies64() does not guard against overflow */
+	if (NSEC_PER_SEC % HZ &&
+	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
+		return MAX_JIFFY_OFFSET;
+
+	return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
+static unsigned long to_wait_timeout(const struct drm_i915_gem_vm_wait *arg)
+{
+	if (arg->flags & I915_VM_WAIT_ABSTIME)
+		return drm_timeout_abs_to_jiffies(arg->timeout);
+
+	if (arg->timeout < 0)
+		return MAX_SCHEDULE_TIMEOUT;
+
+	if (arg->timeout == 0)
+		return 0;
+
+	return nsecs_to_jiffies_timeout(arg->timeout);
+}
+
+int i915_gem_vm_wait_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file)
+{
+	struct drm_i915_file_private *file_priv = file->driver_priv;
+	struct drm_i915_gem_vm_wait *arg = data;
+	struct engine_wait *wait = NULL;
+	struct i915_address_space *vm;
+	struct iova_wake wake;
+	unsigned long timeout;
+	struct i915_vma *vma;
+	ktime_t start;
+	int err;
+
+	if (arg->flags & ~I915_VM_WAIT_ABSTIME)
+		return -EINVAL;
+
+	switch (arg->op) {
+	case I915_VM_WAIT_EQ:
+	case I915_VM_WAIT_NEQ:
+	case I915_VM_WAIT_GT:
+	case I915_VM_WAIT_GTE:
+	case I915_VM_WAIT_LT:
+	case I915_VM_WAIT_LTE:
+	case I915_VM_WAIT_AFTER:
+	case I915_VM_WAIT_BEFORE:
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	wake.width = fls64(arg->mask);
+	if (!wake.width)
+		return -EINVAL;
+
+	/* Restrict the iova to be "naturally" aligned */
+	wake.width = DIV_ROUND_UP(roundup_pow_of_two(wake.width), 8);
+	if (!IS_ALIGNED(arg->iova, wake.width))
+		return -EINVAL;
+
+	/* Natural alignment also means the iova cannot cross a page boundary */
+	GEM_BUG_ON(arg->iova >> PAGE_SHIFT !=
+		   (arg->iova + wake.width) >> PAGE_SHIFT);
+
+	rcu_read_lock();
+	vm = xa_load(&file_priv->vm_xa, arg->vm_id);
+	if (vm && !kref_get_unless_zero(&vm->ref))
+		vm = NULL;
+	rcu_read_unlock();
+	if (!vm)
+		return -ENOENT;
+
+	vma = find_vma_for_iova(vm, arg->iova, wake.width);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto out_vm;
+	}
+
+	err = iova_wake_map(vma, arg, &wake);
+	if (err)
+		goto out_vma;
+
+	err = i915_user_extensions(u64_to_user_ptr(arg->extensions),
+				   NULL, 0, &wake);
+	if (err)
+		goto out_wake;
+
+	if (iova_compare(&wake))
+		goto out_wake;
+
+	timeout = to_wait_timeout(arg);
+	if (!timeout) {
+		err = -ETIME;
+		goto out_vma;
+	}
+
+	err = add_gt_wait(&wait, vm->gt, &wake);
+	if (err)
+		goto out_wait;
+
+	start = ktime_get();
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		if (iova_compare(&wake))
+			break;
+
+		if (signal_pending(wake.tsk)) {
+			err = -ERESTARTSYS;
+			break;
+		}
+
+		if (!timeout) {
+			err = -ETIME;
+			break;
+		}
+
+		timeout = io_schedule_timeout(timeout);
+	}
+	__set_current_state(TASK_RUNNING);
+
+	if (!(arg->flags & I915_VM_WAIT_ABSTIME) && arg->timeout > 0) {
+		arg->timeout -= ktime_to_ns(ktime_sub(ktime_get(), start));
+		if (arg->timeout < 0)
+			arg->timeout = 0;
+
+		/*
+		 * Apparently ktime isn't accurate enough and occasionally has a
+		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+		 * things up to make the test happy. We allow up to 1 jiffy.
+		 *
+		 * This is a regression from the timespec->ktime conversion.
+		 */
+		if (err == -ETIME && !nsecs_to_jiffies(arg->timeout))
+			arg->timeout = 0;
+
+		/* Asked to wait beyond the jiffie/scheduler precision? */
+		if (err == -ETIME && arg->timeout)
+			err = -EAGAIN;
+	}
+
+out_wait:
+	remove_waits(wait);
+out_wake:
+	iova_wake_unmap(vma, arg, &wake);
+out_vma:
+	i915_active_release(&vma->active);
+out_vm:
+	i915_vm_put(vm);
+	return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 0ba524a414c6..cb6ad8d66917 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -210,6 +210,8 @@  static void signal_irq_work(struct irq_work *work)
 
 		i915_request_put(rq);
 	}
+
+	wake_up_all(&b->wq);
 }
 
 static bool __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
@@ -254,6 +256,7 @@  void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
 	INIT_LIST_HEAD(&b->signalers);
 
 	init_irq_work(&b->irq_work, signal_irq_work);
+	init_waitqueue_head(&b->wq);
 }
 
 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
@@ -349,6 +352,46 @@  void i915_request_cancel_breadcrumb(struct i915_request *rq)
 	spin_unlock(&b->irq_lock);
 }
 
+static void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+	spin_lock_irq(&b->irq_lock);
+	if (!b->irq_enabled++)
+		irq_enable(engine);
+	GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
+	spin_unlock_irq(&b->irq_lock);
+}
+
+static void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+	spin_lock_irq(&b->irq_lock);
+	GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
+	if (!--b->irq_enabled)
+		irq_disable(engine);
+	spin_unlock_irq(&b->irq_lock);
+}
+
+void intel_engine_add_wait(struct intel_engine_cs *engine,
+			   struct wait_queue_entry *wait)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+	intel_engine_pin_breadcrumbs_irq(engine);
+	add_wait_queue(&b->wq, wait);
+}
+
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+			      struct wait_queue_entry *wait)
+{
+	struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+	remove_wait_queue(&b->wq, wait);
+	intel_engine_unpin_breadcrumbs_irq(engine);
+}
+
 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
 				    struct drm_printer *p)
 {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 5df003061e44..dc00772dcba5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -210,6 +210,11 @@  void intel_engine_init_execlists(struct intel_engine_cs *engine);
 void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
 
+void intel_engine_add_wait(struct intel_engine_cs *engine,
+			   struct wait_queue_entry *wait);
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+			      struct wait_queue_entry *wait);
+
 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
 
 static inline void
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 77e68c7643de..415b12a6aef0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -349,6 +349,7 @@  struct intel_engine_cs {
 		struct list_head signalers;
 
 		struct irq_work irq_work; /* for use from inside irq_lock */
+		struct wait_queue_head wq;
 
 		unsigned int irq_enabled;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f7385abdd74b..7ab4039cc1e5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2747,6 +2747,7 @@  static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_VM_WAIT, i915_gem_vm_wait_ioctl, DRM_RENDER_ALLOW),
 };
 
 static struct drm_driver driver = {
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 829c0a48577f..421df6aa4520 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -359,6 +359,7 @@  typedef struct _drm_i915_sarea {
 #define DRM_I915_QUERY			0x39
 #define DRM_I915_GEM_VM_CREATE		0x3a
 #define DRM_I915_GEM_VM_DESTROY		0x3b
+#define DRM_I915_GEM_VM_WAIT		0x3c
 /* Must be kept compact -- no holes */
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -422,6 +423,7 @@  typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
 #define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
 #define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_WAIT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_WAIT, struct drm_i915_gem_vm_wait)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -1824,6 +1826,40 @@  struct drm_i915_gem_vm_control {
 	__u32 vm_id;
 };
 
+/*
+ * (*IOVA & MASK) OP (VALUE & MASK)
+ *
+ * OP:
+ * - EQ, NEQ
+ * - GT, GTE
+ * - LT, LTE
+ * - BEFORE, AFTER
+ *
+ */
+struct drm_i915_gem_vm_wait {
+	__u64 extensions;
+	__u64 iova;
+	__u32 vm_id;
+	__u16 op;
+#define I915_VM_WAIT_EQ		0
+#define I915_VM_WAIT_NEQ	1
+#define I915_VM_WAIT_GT		2
+#define I915_VM_WAIT_GTE	3
+#define I915_VM_WAIT_LT		4
+#define I915_VM_WAIT_LTE	5
+#define I915_VM_WAIT_BEFORE	6
+#define I915_VM_WAIT_AFTER	7
+	__u16 flags;
+#define I915_VM_WAIT_ABSTIME 0x1
+	__u64 value;
+	__u64 mask;
+#define I915_VM_WAIT_U8		0xffu
+#define I915_VM_WAIT_U16	0xffffu
+#define I915_VM_WAIT_U32	0xfffffffful
+#define I915_VM_WAIT_U64	0xffffffffffffffffull
+	__u64 timeout;
+};
+
 struct drm_i915_reg_read {
 	/*
 	 * Register offset.