diff mbox series

[2/2] drm/i915: add syncobj timeline support

Message ID 20190523114620.19335-3-lionel.g.landwerlin@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915: timeline semaphore support | expand

Commit Message

Lionel Landwerlin May 23, 2019, 11:46 a.m. UTC
Introduces a new parameters to execbuf so that we can specify syncobj
handles as well as timeline points.

This is needed for the submission side of the Vulkan timeline
semaphore (VK_KHR_timeline_semaphore extension).

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c            |   4 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 190 ++++++++++++++++-----
 include/uapi/drm/i915_drm.h                |  38 ++++-
 3 files changed, 182 insertions(+), 50 deletions(-)

Comments

Chris Wilson May 23, 2019, 11:52 a.m. UTC | #1
Quoting Lionel Landwerlin (2019-05-23 12:46:20)
> -               syncobj = drm_syncobj_find(file, fence.handle);
> -               if (!syncobj) {
> -                       DRM_DEBUG("Invalid syncobj handle provided\n");
> -                       err = -ENOENT;
> -                       goto err;
> +                       if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
> +                               err = -EINVAL;
> +                               goto err;
> +                       }
> +
> +                       if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
> +                               err = drm_syncobj_find_fence(
> +                                       file, user_fence.handle, user_fence.value,
> +                                       DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
> +                                       &syncobj, &fence);

Is this still a synchronous wait? That would be an unfortunate change in
behaviour and antithesis to having a scheduler.
-Chris
Lionel Landwerlin May 23, 2019, 1:46 p.m. UTC | #2
On 23/05/2019 12:52, Chris Wilson wrote:
> Quoting Lionel Landwerlin (2019-05-23 12:46:20)
>> -               syncobj = drm_syncobj_find(file, fence.handle);
>> -               if (!syncobj) {
>> -                       DRM_DEBUG("Invalid syncobj handle provided\n");
>> -                       err = -ENOENT;
>> -                       goto err;
>> +                       if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
>> +                               err = -EINVAL;
>> +                               goto err;
>> +                       }
>> +
>> +                       if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
>> +                               err = drm_syncobj_find_fence(
>> +                                       file, user_fence.handle, user_fence.value,
>> +                                       DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
>> +                                       &syncobj, &fence);
> Is this still a synchronous wait? That would be an unfortunate change in
> behaviour and antithesis to having a scheduler.
> -Chris
>
Not sure what you mean by synchronous wait.

We have to be able to call the submit before all the fences we depend on 
have materialized.


-Lionel
Chris Wilson May 23, 2019, 1:59 p.m. UTC | #3
Quoting Lionel Landwerlin (2019-05-23 14:46:42)
> On 23/05/2019 12:52, Chris Wilson wrote:
> > Quoting Lionel Landwerlin (2019-05-23 12:46:20)
> >> -               syncobj = drm_syncobj_find(file, fence.handle);
> >> -               if (!syncobj) {
> >> -                       DRM_DEBUG("Invalid syncobj handle provided\n");
> >> -                       err = -ENOENT;
> >> -                       goto err;
> >> +                       if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
> >> +                               err = -EINVAL;
> >> +                               goto err;
> >> +                       }
> >> +
> >> +                       if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
> >> +                               err = drm_syncobj_find_fence(
> >> +                                       file, user_fence.handle, user_fence.value,
> >> +                                       DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
> >> +                                       &syncobj, &fence);
> > Is this still a synchronous wait? That would be an unfortunate change in
> > behaviour and antithesis to having a scheduler.
> > -Chris
> >
> Not sure what you mean by synchronous wait.

drm_syncobj_find_fence() has an open-coded wait_event loop. That is
synchronous and inconsistent with using a scheduler; where one only need
to return a proxy fence that will be populated when the syncpt is known,
and be signaled as a result of that syncpt.
-Chris
Lionel Landwerlin May 23, 2019, 2:15 p.m. UTC | #4
On 23/05/2019 14:59, Chris Wilson wrote:
> Quoting Lionel Landwerlin (2019-05-23 14:46:42)
>> On 23/05/2019 12:52, Chris Wilson wrote:
>>> Quoting Lionel Landwerlin (2019-05-23 12:46:20)
>>>> -               syncobj = drm_syncobj_find(file, fence.handle);
>>>> -               if (!syncobj) {
>>>> -                       DRM_DEBUG("Invalid syncobj handle provided\n");
>>>> -                       err = -ENOENT;
>>>> -                       goto err;
>>>> +                       if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
>>>> +                               err = -EINVAL;
>>>> +                               goto err;
>>>> +                       }
>>>> +
>>>> +                       if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
>>>> +                               err = drm_syncobj_find_fence(
>>>> +                                       file, user_fence.handle, user_fence.value,
>>>> +                                       DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
>>>> +                                       &syncobj, &fence);
>>> Is this still a synchronous wait? That would be an unfortunate change in
>>> behaviour and antithesis to having a scheduler.
>>> -Chris
>>>
>> Not sure what you mean by synchronous wait.
> drm_syncobj_find_fence() has an open-coded wait_event loop. That is
> synchronous and inconsistent with using a scheduler; where one only need
> to return a proxy fence that will be populated when the syncpt is known,
> and be signaled as a result of that syncpt.
> -Chris
>

Right,


I see this changes the behavior for existing drm_syncobjs, which was not 
intended.


For timeline drm-syncobjs, we're aware this will block and might fail if 
the fence doesn't materialize.

The Vulkan spec doesn't give a specific amount of time of which the 
submit fails, apart from it being non zero.


Will fix, thanks a lot for pointing this out.


-Lionel
Lionel Landwerlin June 3, 2019, 4:29 p.m. UTC | #5
On 23/05/2019 16:59, Chris Wilson wrote:
> Quoting Lionel Landwerlin (2019-05-23 14:46:42)
>> On 23/05/2019 12:52, Chris Wilson wrote:
>>> Quoting Lionel Landwerlin (2019-05-23 12:46:20)
>>>> -               syncobj = drm_syncobj_find(file, fence.handle);
>>>> -               if (!syncobj) {
>>>> -                       DRM_DEBUG("Invalid syncobj handle provided\n");
>>>> -                       err = -ENOENT;
>>>> -                       goto err;
>>>> +                       if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
>>>> +                               err = -EINVAL;
>>>> +                               goto err;
>>>> +                       }
>>>> +
>>>> +                       if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
>>>> +                               err = drm_syncobj_find_fence(
>>>> +                                       file, user_fence.handle, user_fence.value,
>>>> +                                       DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
>>>> +                                       &syncobj, &fence);
>>> Is this still a synchronous wait? That would be an unfortunate change in
>>> behaviour and antithesis to having a scheduler.
>>> -Chris
>>>
>> Not sure what you mean by synchronous wait.
> drm_syncobj_find_fence() has an open-coded wait_event loop. That is
> synchronous and inconsistent with using a scheduler; where one only need
> to return a proxy fence that will be populated when the syncpt is known,
> and be signaled as a result of that syncpt.
> -Chris
>
Just to confirm, are you fine with the submission path of i915 doing the 
wait?

We have support for doing in Anv so that's an option if you prefer.

I don't have a preference :)


Thanks,


-Lionel
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 83d2eb9e74cb..d62ddf2fa5b5 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -444,6 +444,7 @@  static int i915_getparam_ioctl(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_EXEC_BATCH_FIRST:
 	case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
 	case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
+	case I915_PARAM_HAS_EXEC_FENCE_ARRAY2:
 		/* For the time being all of these are always true;
 		 * if some supported hardware does not have one of these
 		 * features this value needs to be provided from
@@ -3175,7 +3176,8 @@  static struct drm_driver driver = {
 	 */
 	.driver_features =
 	    DRIVER_GEM | DRIVER_PRIME |
-	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
+	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
+	    DRIVER_SYNCOBJ_TIMELINE,
 	.release = i915_driver_release,
 	.open = i915_driver_open,
 	.lastclose = i915_driver_lastclose,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8b85c91c3ea4..f681533c085a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -229,6 +229,13 @@  enum {
  * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
  */
 
+struct i915_drm_dma_fences {
+	struct drm_syncobj *syncobj; /* Use with ptr_mask_bits() */
+	struct dma_fence *dma_fence;
+	u64 value;
+	struct dma_fence_chain *chain_fence;
+};
+
 struct i915_execbuffer {
 	struct drm_i915_private *i915; /** i915 backpointer */
 	struct drm_file *file; /** per-file lookup tables and limits */
@@ -1932,7 +1939,7 @@  static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 		return false;
 
 	/* Kernel clipping was a DRI1 misfeature */
-	if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
+	if (!(exec->flags & (I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_ARRAY2))) {
 		if (exec->num_cliprects || exec->cliprects_ptr)
 			return false;
 	}
@@ -2182,25 +2189,30 @@  eb_select_engine(struct i915_execbuffer *eb,
 }
 
 static void
-__free_fence_array(struct drm_syncobj **fences, unsigned int n)
+__free_fence_array(struct i915_drm_dma_fences *fences, unsigned int n)
 {
-	while (n--)
-		drm_syncobj_put(ptr_mask_bits(fences[n], 2));
+	while (n--) {
+		drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2));
+		dma_fence_put(fences[n].dma_fence);
+		kfree(fences[n].chain_fence);
+	}
 	kvfree(fences);
 }
 
-static struct drm_syncobj **
+static struct i915_drm_dma_fences *
 get_fence_array(struct drm_i915_gem_execbuffer2 *args,
 		struct drm_file *file)
 {
 	const unsigned long nfences = args->num_cliprects;
 	struct drm_i915_gem_exec_fence __user *user;
-	struct drm_syncobj **fences;
+	struct drm_i915_gem_exec_fence2 __user *user2;
+	struct i915_drm_dma_fences *fences;
 	unsigned long n;
 	int err;
 
-	if (!(args->flags & I915_EXEC_FENCE_ARRAY))
-		return NULL;
+	if ((args->flags & (I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_ARRAY2)) ==
+	    (I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_ARRAY2))
+		return ERR_PTR(-EINVAL);
 
 	/* Check multiplication overflow for access_ok() and kvmalloc_array() */
 	BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
@@ -2209,40 +2221,121 @@  get_fence_array(struct drm_i915_gem_execbuffer2 *args,
 			    SIZE_MAX / sizeof(*fences)))
 		return ERR_PTR(-EINVAL);
 
-	user = u64_to_user_ptr(args->cliprects_ptr);
-	if (!access_ok(user, nfences * sizeof(*user)))
-		return ERR_PTR(-EFAULT);
+	if (args->flags & I915_EXEC_FENCE_ARRAY2) {
+		user2 = u64_to_user_ptr(args->cliprects_ptr);
+		if (!access_ok(user, nfences * sizeof(*user2)))
+			return ERR_PTR(-EFAULT);
+	} else {
+		user = u64_to_user_ptr(args->cliprects_ptr);
+		if (!access_ok(user, nfences * sizeof(*user)))
+			return ERR_PTR(-EFAULT);
+	}
 
 	fences = kvmalloc_array(nfences, sizeof(*fences),
 				__GFP_NOWARN | GFP_KERNEL);
 	if (!fences)
 		return ERR_PTR(-ENOMEM);
 
-	for (n = 0; n < nfences; n++) {
-		struct drm_i915_gem_exec_fence fence;
-		struct drm_syncobj *syncobj;
+	BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
+		     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
 
-		if (__copy_from_user(&fence, user++, sizeof(fence))) {
-			err = -EFAULT;
-			goto err;
-		}
+	if (args->flags & I915_EXEC_FENCE_ARRAY2) {
+		for (n = 0; n < nfences; n++) {
+			struct drm_i915_gem_exec_fence2 user_fence;
+			struct drm_syncobj *syncobj;
+			struct dma_fence *fence = NULL;
 
-		if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
-			err = -EINVAL;
-			goto err;
-		}
+			if (__copy_from_user(&user_fence, user2++, sizeof(user_fence))) {
+				err = -EFAULT;
+				goto err;
+			}
 
-		syncobj = drm_syncobj_find(file, fence.handle);
-		if (!syncobj) {
-			DRM_DEBUG("Invalid syncobj handle provided\n");
-			err = -ENOENT;
-			goto err;
+			if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
+				err = -EINVAL;
+				goto err;
+			}
+
+			if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
+				err = drm_syncobj_find_fence(
+					file, user_fence.handle, user_fence.value,
+					DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+					&syncobj, &fence);
+				if (err) {
+					DRM_DEBUG("Invalid syncobj handle or timeline value provided\n");
+					goto err;
+				}
+			} else {
+				syncobj = drm_syncobj_find(
+					file, user_fence.handle);
+				if (!syncobj) {
+					err = -ENOENT;
+					DRM_DEBUG("Invalid syncobj handle provided\n");
+					goto err;
+				}
+			}
+
+			if (user_fence.value != 0 && user_fence.flags & I915_EXEC_FENCE_SIGNAL) {
+				fences[n].chain_fence =
+					kmalloc(sizeof(*fences[n].chain_fence),
+						GFP_KERNEL);
+				if (!fences[n].chain_fence) {
+					dma_fence_put(fence);
+					drm_syncobj_put(syncobj);
+					err = -ENOMEM;
+					DRM_DEBUG("Unable to alloc chain_fence\n");
+					goto err;
+				}
+			} else {
+				fences[n].chain_fence = NULL;
+			}
+
+			fences[n].syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
+			fences[n].dma_fence = fence;
+			fences[n].value = user_fence.value;
 		}
+	} else {
+		for (n = 0; n < nfences; n++) {
+			struct drm_i915_gem_exec_fence user_fence;
+			struct drm_syncobj *syncobj;
+			struct dma_fence *fence;
+
+			if (__copy_from_user(&user_fence, user++, sizeof(user_fence))) {
+				err = -EFAULT;
+				goto err;
+			}
+
+			if (user_fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
+				err = -EINVAL;
+				goto err;
+			}
 
-		BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
-			     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
+			/* If we're just signaling a syncobj, no need to get
+			 * the fence.
+			 */
+			if (user_fence.flags & I915_EXEC_FENCE_WAIT) {
+				err = drm_syncobj_find_fence(
+					file, user_fence.handle, 0, 0, &syncobj, &fence);
+				if (err) {
+					DRM_DEBUG("Invalid syncobj handle provided\n");
+					goto err;
+				}
+			} else {
+				syncobj = drm_syncobj_find(file, user_fence.handle);
+				if (!syncobj) {
+					DRM_DEBUG("Invalid syncobj handle provided\n");
+					goto err;
+				}
+				fence = NULL;
+			}
+
+			BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
+				     ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
 
-		fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
+			fences[n].syncobj = ptr_pack_bits(syncobj, user_fence.flags, 2);
+			fences[n].dma_fence = fence;
+			fences[n].value = 0;
+			fences[n].chain_fence = NULL;
+		}
 	}
 
 	return fences;
@@ -2254,7 +2347,7 @@  get_fence_array(struct drm_i915_gem_execbuffer2 *args,
 
 static void
 put_fence_array(struct drm_i915_gem_execbuffer2 *args,
-		struct drm_syncobj **fences)
+		struct i915_drm_dma_fences *fences)
 {
 	if (fences)
 		__free_fence_array(fences, args->num_cliprects);
@@ -2262,7 +2355,7 @@  put_fence_array(struct drm_i915_gem_execbuffer2 *args,
 
 static int
 await_fence_array(struct i915_execbuffer *eb,
-		  struct drm_syncobj **fences)
+		  struct i915_drm_dma_fences *fences)
 {
 	const unsigned int nfences = eb->args->num_cliprects;
 	unsigned int n;
@@ -2270,19 +2363,14 @@  await_fence_array(struct i915_execbuffer *eb,
 
 	for (n = 0; n < nfences; n++) {
 		struct drm_syncobj *syncobj;
-		struct dma_fence *fence;
 		unsigned int flags;
 
-		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
+		syncobj = ptr_unpack_bits(fences[n].syncobj, &flags, 2);
 		if (!(flags & I915_EXEC_FENCE_WAIT))
 			continue;
 
-		fence = drm_syncobj_fence_get(syncobj);
-		if (!fence)
-			return -EINVAL;
-
-		err = i915_request_await_dma_fence(eb->request, fence);
-		dma_fence_put(fence);
+		err = i915_request_await_dma_fence(eb->request,
+						   fences[n].dma_fence);
 		if (err < 0)
 			return err;
 	}
@@ -2292,7 +2380,7 @@  await_fence_array(struct i915_execbuffer *eb,
 
 static void
 signal_fence_array(struct i915_execbuffer *eb,
-		   struct drm_syncobj **fences)
+		   struct i915_drm_dma_fences *fences)
 {
 	const unsigned int nfences = eb->args->num_cliprects;
 	struct dma_fence * const fence = &eb->request->fence;
@@ -2302,11 +2390,21 @@  signal_fence_array(struct i915_execbuffer *eb,
 		struct drm_syncobj *syncobj;
 		unsigned int flags;
 
-		syncobj = ptr_unpack_bits(fences[n], &flags, 2);
+		syncobj = ptr_unpack_bits(fences[n].syncobj, &flags, 2);
 		if (!(flags & I915_EXEC_FENCE_SIGNAL))
 			continue;
 
-		drm_syncobj_replace_fence(syncobj, fence);
+		if (fences[n].chain_fence) {
+			drm_syncobj_add_point(syncobj, fences[n].chain_fence,
+					      fence, fences[n].value);
+			/*
+			 * The chain's ownership is transfered to the
+			 * timeline.
+			 */
+			fences[n].chain_fence = NULL;
+		} else {
+			drm_syncobj_replace_fence(syncobj, fence);
+		}
 	}
 }
 
@@ -2315,7 +2413,7 @@  i915_gem_do_execbuffer(struct drm_device *dev,
 		       struct drm_file *file,
 		       struct drm_i915_gem_execbuffer2 *args,
 		       struct drm_i915_gem_exec_object2 *exec,
-		       struct drm_syncobj **fences)
+		       struct i915_drm_dma_fences *fences)
 {
 	struct i915_execbuffer eb;
 	struct dma_fence *in_fence = NULL;
@@ -2705,7 +2803,7 @@  i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_execbuffer2 *args = data;
 	struct drm_i915_gem_exec_object2 *exec2_list;
-	struct drm_syncobj **fences = NULL;
+	struct i915_drm_dma_fences *fences = NULL;
 	const size_t count = args->buffer_count;
 	int err;
 
@@ -2733,7 +2831,7 @@  i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
 		return -EFAULT;
 	}
 
-	if (args->flags & I915_EXEC_FENCE_ARRAY) {
+	if (args->flags & (I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_ARRAY2)) {
 		fences = get_fence_array(args, file);
 		if (IS_ERR(fences)) {
 			kvfree(exec2_list);
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 328d05e77d9f..eaf7b89360fd 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -610,6 +610,12 @@  typedef struct drm_i915_irq_wait {
  * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
  */
 #define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * drm_i915_gem_exec_fence2 structures.  See I915_EXEC_FENCE_ARRAY.
+ */
+#define I915_PARAM_HAS_EXEC_FENCE_ARRAY2  54
+
 /* Must be kept compact -- no holes and well documented */
 
 typedef struct drm_i915_getparam {
@@ -1006,6 +1012,24 @@  struct drm_i915_gem_exec_fence {
 	__u32 flags;
 };
 
+struct drm_i915_gem_exec_fence2 {
+	/**
+	 * User's handle for a drm_syncobj to wait on or signal.
+	 */
+	__u32 handle;
+
+	/**
+	 * Same flags as drm_i915_gem_exec_fence.
+	 */
+	__u32 flags;
+
+	/**
+	 * A point for a timeline drm_syncobj to wait on or signal. Must be 0
+	 * for a binary drm_syncobj.
+	 */
+	__u64 value;
+};
+
 struct drm_i915_gem_execbuffer2 {
 	/**
 	 * List of gem_exec_object2 structs
@@ -1022,8 +1046,10 @@  struct drm_i915_gem_execbuffer2 {
 	__u32 num_cliprects;
 	/**
 	 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
-	 * is not set.  If I915_EXEC_FENCE_ARRAY is set, then this is a
-	 * struct drm_i915_gem_exec_fence *fences.
+	 * & I915_EXEC_FENCE_ARRAY2 are not set. If I915_EXEC_FENCE_ARRAY is
+	 * set, then this is a struct drm_i915_gem_exec_fence *fences. If
+	 * I915_EXEC_FENCE_ARRAY2 is set, then this is a struct
+	 * drm_i915_gem_exec_fence2 *fences.
 	 */
 	__u64 cliprects_ptr;
 #define I915_EXEC_RING_MASK              (0x3f)
@@ -1141,7 +1167,13 @@  struct drm_i915_gem_execbuffer2 {
  */
 #define I915_EXEC_FENCE_SUBMIT		(1 << 20)
 
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
+/* Setting I915_FENCE_ARRAY2 implies that num_cliprects and cliprects_ptr
+ * define an array of i915_gem_exec_fence2 structures which specify a set of
+ * dma fences to wait upon or signal.
+ */
+#define I915_EXEC_FENCE_ARRAY2   (1<<22)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY2<<1))
 
 #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \