diff mbox

[3/4] drm/i915: Allow userspace to hint that the relocations were known

Message ID 1352560547-1423-3-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Nov. 10, 2012, 3:15 p.m. UTC
Userspace is able to hint to the kernel that its command stream and
auxiliary state buffers already hold the correct presumed addresses and
so the relocation process may be skipped if the kernel does not need to
move any buffers in preparation for the execbuffer. Thus for the common
case where the allotment of buffers is static between batches, we can
avoid the overhead of individually checking the relocation entries.

Note that this requires userspace to supply the domain tracking and
requests for workarounds itself that would otherwise be computed based
upon the relocation entries.

Using copywinwin10 as an example that is dependent upon emitting a lot
of relocations (2 per operation), we see improvements of:

c2d/gm45: 618000.0/sec to 632000.0/sec.
i3-330m: 748000.0/sec to 830000.0/sec.

(measured relative to a baseline with neither optimisations applied).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_dma.c            |    3 ++
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   50 ++++++++++++++++++----------
 include/uapi/drm/i915_drm.h                |   11 ++++++
 3 files changed, 46 insertions(+), 18 deletions(-)

Comments

Daniel Vetter Nov. 10, 2012, 5:16 p.m. UTC | #1
On Sat, Nov 10, 2012 at 03:15:46PM +0000, Chris Wilson wrote:
> Userspace is able to hint to the kernel that its command stream and
> auxiliary state buffers already hold the correct presumed addresses and
> so the relocation process may be skipped if the kernel does not need to
> move any buffers in preparation for the execbuffer. Thus for the common
> case where the allotment of buffers is static between batches, we can
> avoid the overhead of individually checking the relocation entries.
> 
> Note that this requires userspace to supply the domain tracking and
> requests for workarounds itself that would otherwise be computed based
> upon the relocation entries.
> 
> Using copywinwin10 as an example that is dependent upon emitting a lot
> of relocations (2 per operation), we see improvements of:
> 
> c2d/gm45: 618000.0/sec to 632000.0/sec.
> i3-330m: 748000.0/sec to 830000.0/sec.
> 
> (measured relative to a baseline with neither optimisations applied).
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Nice stuff! A few requests from you bastard maintainer though for this
patch series:

- An i-g-t testcase for the reloc.handle checking, both for the
  out-of-bounds case for the LUT and the "no such object" for the old
  case. At least I couldn't find anything like that on a quick check.

- Also an i-g-t test to check whether the gen6 "global gtt mapping for
  pipe control writes" w/a works would be great, since we've fumbled that.
  Again for both the old and new ways of doing things.

- I think we should start checking for not yet defined flag values
  everywhere in execbuf and return -EINVAL - both to make the ioctl more
  robust against broken userspace (we've already had such an OOPS-causing
  issue with cpu domains in relocations). And to ensure that we can keep
  on extending things. Obviously needs to come with an i-g-t check. I know
  that every extension will then break old i-g-t versions of that tests,
  but imo the more rigid abi checking is worth that tradeoff.

The above i-g-t stuff could be a nice exercise for reviewers ... </hint>

On the patch itself:

- Some small comment on EXEC_OBJECT_NEEDS_GTT explaing that we need this
  for a gen6 w/a and that it's valid only with the NO_RELOC mode.

- Can we abolish the pending_read/write_domains and just go with a
  per-object GPU_WRITE flag? Afaik that's all we need with the
  flushing_list gone. To avoid a massive rewrite of the code I'm thinking
  of just keeping around a pending_gpu_write bool (since reads are
  implicit) and then using that to fill out generic gpu domains in
  i915_gem_execbuffer_move_to_active. E.g. set all gpu read domains if
  there is no write, otherwise just set the render domain in both.

While reading through the code I've also noticed that we could replace the
obj->write_domain check in there (for updating the write_seqno) with a
pending_gpu_write check - the former was required due to the delayed
flushing caused by the flushing_list.
-Daniel

> ---
>  drivers/gpu/drm/i915/i915_dma.c            |    3 ++
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |   50 ++++++++++++++++++----------
>  include/uapi/drm/i915_drm.h                |   11 ++++++
>  3 files changed, 46 insertions(+), 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
> index 4beb7bc..a0c4b4f 100644
> --- a/drivers/gpu/drm/i915/i915_dma.c
> +++ b/drivers/gpu/drm/i915/i915_dma.c
> @@ -1017,6 +1017,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
>  	case I915_PARAM_HAS_SECURE_BATCHES:
>  		value = capable(CAP_SYS_ADMIN);
>  		break;
> +	case I915_PARAM_HAS_EXEC_NO_RELOC:
> +		value = 1;
> +		break;
>  	default:
>  		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
>  				 param->param);
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 1738ebd..30beea6 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -412,7 +412,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
>  
>  static int
>  i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
> -				   struct intel_ring_buffer *ring)
> +				   struct intel_ring_buffer *ring,
> +				   bool *need_reloc)
>  {
>  	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>  	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
> @@ -452,7 +453,18 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
>  		obj->has_aliasing_ppgtt_mapping = 1;
>  	}
>  
> -	entry->offset = obj->gtt_offset;
> +	if (entry->offset != obj->gtt_offset) {
> +		entry->offset = obj->gtt_offset;
> +		*need_reloc = true;
> +	}
> +
> +	obj->base.pending_read_domains = entry->rsvd1 & 0xffffffff;
> +	obj->base.pending_write_domain = (entry->rsvd1 >> 32) & 0xffffffff;
> +
> +	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
> +	    !obj->has_global_gtt_mapping)
> +		i915_gem_gtt_bind_object(obj, obj->cache_level);
> +
>  	return 0;
>  }
>  
> @@ -478,7 +490,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
>  static int
>  i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
>  			    struct drm_file *file,
> -			    struct list_head *objects)
> +			    struct list_head *objects,
> +			    bool *need_relocs)
>  {
>  	struct drm_i915_gem_object *obj;
>  	struct list_head ordered_objects;
> @@ -502,8 +515,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
>  		else
>  			list_move_tail(&obj->exec_list, &ordered_objects);
>  
> -		obj->base.pending_read_domains = 0;
> -		obj->base.pending_write_domain = 0;
>  		obj->pending_fenced_gpu_access = false;
>  	}
>  	list_splice(&ordered_objects, objects);
> @@ -541,7 +552,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
>  			    (need_fence && !obj->map_and_fenceable))
>  				ret = i915_gem_object_unbind(obj);
>  			else
> -				ret = i915_gem_execbuffer_reserve_object(obj, ring);
> +				ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
>  			if (ret)
>  				goto err;
>  		}
> @@ -551,7 +562,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
>  			if (obj->gtt_space)
>  				continue;
>  
> -			ret = i915_gem_execbuffer_reserve_object(obj, ring);
> +			ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
>  			if (ret)
>  				goto err;
>  		}
> @@ -571,16 +582,18 @@ err:		/* Decrement pin count for bound objects */
>  
>  static int
>  i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
> +				  struct drm_i915_gem_execbuffer2 *args,
>  				  struct drm_file *file,
>  				  struct intel_ring_buffer *ring,
>  				  struct eb_objects *eb,
> -				  struct drm_i915_gem_exec_object2 *exec,
> -				  int count)
> +				  struct drm_i915_gem_exec_object2 *exec)
>  {
>  	struct drm_i915_gem_relocation_entry *reloc;
>  	struct drm_i915_gem_object *obj;
> +	bool need_relocs;
>  	int *reloc_offset;
>  	int i, total, ret;
> +	int count = args->buffer_count;
>  
>  	/* We may process another execbuffer during the unlock... */
>  	while (!list_empty(&eb->objects)) {
> @@ -635,7 +648,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
>  	if (ret)
>  		goto err;
>  
> -	ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects);
> +	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
> +	ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
>  	if (ret)
>  		goto err;
>  
> @@ -848,10 +862,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  	struct intel_ring_buffer *ring;
>  	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
>  	u32 exec_start, exec_len;
> -	u32 seqno;
> -	u32 mask;
> -	u32 flags;
> +	u32 seqno, mask, flags;
>  	int ret, mode, i;
> +	bool need_relocs;
>  
>  	if (!i915_gem_check_execbuffer(args)) {
>  		DRM_DEBUG("execbuf with invalid offset/length\n");
> @@ -993,17 +1006,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  			       exec_list);
>  
>  	/* Move the objects en-masse into the GTT, evicting if necessary. */
> -	ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects);
> +	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
> +	ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
>  	if (ret)
>  		goto err;
>  
>  	/* The objects are in their final locations, apply the relocations. */
> -	ret = i915_gem_execbuffer_relocate(dev, eb);
> +	if (need_relocs)
> +		ret = i915_gem_execbuffer_relocate(dev, eb);
>  	if (ret) {
>  		if (ret == -EFAULT) {
> -			ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
> -								eb, exec,
> -								args->buffer_count);
> +			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
> +								eb, exec);
>  			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
>  		}
>  		if (ret)
> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
> index 067b98e..7657d3e 100644
> --- a/include/uapi/drm/i915_drm.h
> +++ b/include/uapi/drm/i915_drm.h
> @@ -309,6 +309,7 @@ typedef struct drm_i915_irq_wait {
>  #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
>  #define I915_PARAM_RSVD_FOR_FUTURE_USE	 22
>  #define I915_PARAM_HAS_SECURE_BATCHES	 23
> +#define I915_PARAM_HAS_EXEC_NO_RELOC	 24
>  
>  typedef struct drm_i915_getparam {
>  	int param;
> @@ -629,7 +630,10 @@ struct drm_i915_gem_exec_object2 {
>  	__u64 offset;
>  
>  #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
> +#define EXEC_OBJECT_NEEDS_GTT	(1<<1)
>  	__u64 flags;
> +
> +	/** Low 32-bits for read-domains, high 32-bits for write-domains. */
>  	__u64 rsvd1;
>  	__u64 rsvd2;
>  };
> @@ -679,6 +683,13 @@ struct drm_i915_gem_execbuffer2 {
>   */
>  #define I915_EXEC_SECURE		(1<<9)
>  
> +/** Provide a hint to the kernel that the command stream and auxilliary
> + * state buffers already holds the correct presumed addresses and so the
> + * relocation process may be skipped if no buffers need to be moved in
> + * preparation for the execbuffer.
> + */
> +#define I915_EXEC_NO_RELOC		(1<<10)
> +
>  #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
>  #define i915_execbuffer2_set_context_id(eb2, context) \
>  	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
> -- 
> 1.7.10.4
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Chris Wilson Nov. 10, 2012, 6:08 p.m. UTC | #2
On Sat, 10 Nov 2012 18:16:16 +0100, Daniel Vetter <daniel@ffwll.ch> wrote:
> - Can we abolish the pending_read/write_domains and just go with a
>   per-object GPU_WRITE flag? Afaik that's all we need with the
>   flushing_list gone. To avoid a massive rewrite of the code I'm thinking
>   of just keeping around a pending_gpu_write bool (since reads are
>   implicit) and then using that to fill out generic gpu domains in
>   i915_gem_execbuffer_move_to_active. E.g. set all gpu read domains if
>   there is no write, otherwise just set the render domain in both.

Just a quick comment. I started with just a WRITE flag, then realised I
needed to mark the read domains as well (for GPU activity tracking). So
I concluded that just passing along the domains was easy enough for
userspace and better for future-proofing. That is definitely one of the
places where we need to be careful with designing the API - more ideas
are welcome.
-Chris
Daniel Vetter Nov. 10, 2012, 8:17 p.m. UTC | #3
On Sat, Nov 10, 2012 at 7:08 PM, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> On Sat, 10 Nov 2012 18:16:16 +0100, Daniel Vetter <daniel@ffwll.ch> wrote:
>> - Can we abolish the pending_read/write_domains and just go with a
>>   per-object GPU_WRITE flag? Afaik that's all we need with the
>>   flushing_list gone. To avoid a massive rewrite of the code I'm thinking
>>   of just keeping around a pending_gpu_write bool (since reads are
>>   implicit) and then using that to fill out generic gpu domains in
>>   i915_gem_execbuffer_move_to_active. E.g. set all gpu read domains if
>>   there is no write, otherwise just set the render domain in both.
>
> Just a quick comment. I started with just a WRITE flag, then realised I
> needed to mark the read domains as well (for GPU activity tracking). So
> I concluded that just passing along the domains was easy enough for
> userspace and better for future-proofing. That is definitely one of the
> places where we need to be careful with designing the API - more ideas
> are welcome.

Hm, I've thought we could get away for the gpu activity tracking by
unconditionally assuming that any passed-in object is in is in all
read domains. For cache flushing it doesn't matter what we set anyway
since we invalidate/flush unconditionally all caches before/after each
batch. And for activity tracking we unconditionally put all objects in
a given execbuf to the front of the active list (with updated
ring/seqno). The only important thing is to keep the write tracking to
not break our various read/read optimizations. Since we allow in-batch
write domain changes already (and hence more than one write domain
set) we could even do a simple

obj->base.read_domains = GPU_DOMAINS;
if (obj->pending_gpu_write) obj->base.write_domain = GPU_DOMAINS;

For ring-sync/semaphores we already do an unconditional call to
i915_gem_object_sync. So what do I miss?
-Daniel
Chris Wilson Nov. 11, 2012, 12:10 p.m. UTC | #4
On Sat, 10 Nov 2012 21:17:05 +0100, Daniel Vetter <daniel@ffwll.ch> wrote:
> Hm, I've thought we could get away for the gpu activity tracking by
> unconditionally assuming that any passed-in object is in is in all
> read domains. For cache flushing it doesn't matter what we set anyway
> since we invalidate/flush unconditionally all caches before/after each
> batch. And for activity tracking we unconditionally put all objects in
> a given execbuf to the front of the active list (with updated
> ring/seqno). The only important thing is to keep the write tracking to
> not break our various read/read optimizations.

One overlooked aspect is that the domain tracking helps with debugging
and identifying what the buffers are used for. It has paid dividends
many times when reading error-states, and certainly will prove to be
useful again in future.
-Chris
Daniel Vetter Nov. 11, 2012, 1:09 p.m. UTC | #5
On Sun, Nov 11, 2012 at 1:10 PM, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> On Sat, 10 Nov 2012 21:17:05 +0100, Daniel Vetter <daniel@ffwll.ch> wrote:
>> Hm, I've thought we could get away for the gpu activity tracking by
>> unconditionally assuming that any passed-in object is in is in all
>> read domains. For cache flushing it doesn't matter what we set anyway
>> since we invalidate/flush unconditionally all caches before/after each
>> batch. And for activity tracking we unconditionally put all objects in
>> a given execbuf to the front of the active list (with updated
>> ring/seqno). The only important thing is to keep the write tracking to
>> not break our various read/read optimizations.
>
> One overlooked aspect is that the domain tracking helps with debugging
> and identifying what the buffers are used for. It has paid dividends
> many times when reading error-states, and certainly will prove to be
> useful again in future.

Hm, good point. Otoh why don't we allow userspace to simply pass in an
arbitrary debug tag in that u64, carry the latest one around in the bo
and dump that everywhere (in addition to the domains)?
-Daniel
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4beb7bc..a0c4b4f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1017,6 +1017,9 @@  static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_SECURE_BATCHES:
 		value = capable(CAP_SYS_ADMIN);
 		break;
+	case I915_PARAM_HAS_EXEC_NO_RELOC:
+		value = 1;
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1738ebd..30beea6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -412,7 +412,8 @@  i915_gem_execbuffer_relocate(struct drm_device *dev,
 
 static int
 i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
-				   struct intel_ring_buffer *ring)
+				   struct intel_ring_buffer *ring,
+				   bool *need_reloc)
 {
 	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
@@ -452,7 +453,18 @@  i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
 		obj->has_aliasing_ppgtt_mapping = 1;
 	}
 
-	entry->offset = obj->gtt_offset;
+	if (entry->offset != obj->gtt_offset) {
+		entry->offset = obj->gtt_offset;
+		*need_reloc = true;
+	}
+
+	obj->base.pending_read_domains = entry->rsvd1 & 0xffffffff;
+	obj->base.pending_write_domain = (entry->rsvd1 >> 32) & 0xffffffff;
+
+	if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
+	    !obj->has_global_gtt_mapping)
+		i915_gem_gtt_bind_object(obj, obj->cache_level);
+
 	return 0;
 }
 
@@ -478,7 +490,8 @@  i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			    struct drm_file *file,
-			    struct list_head *objects)
+			    struct list_head *objects,
+			    bool *need_relocs)
 {
 	struct drm_i915_gem_object *obj;
 	struct list_head ordered_objects;
@@ -502,8 +515,6 @@  i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 		else
 			list_move_tail(&obj->exec_list, &ordered_objects);
 
-		obj->base.pending_read_domains = 0;
-		obj->base.pending_write_domain = 0;
 		obj->pending_fenced_gpu_access = false;
 	}
 	list_splice(&ordered_objects, objects);
@@ -541,7 +552,7 @@  i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			    (need_fence && !obj->map_and_fenceable))
 				ret = i915_gem_object_unbind(obj);
 			else
-				ret = i915_gem_execbuffer_reserve_object(obj, ring);
+				ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
 			if (ret)
 				goto err;
 		}
@@ -551,7 +562,7 @@  i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
 			if (obj->gtt_space)
 				continue;
 
-			ret = i915_gem_execbuffer_reserve_object(obj, ring);
+			ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
 			if (ret)
 				goto err;
 		}
@@ -571,16 +582,18 @@  err:		/* Decrement pin count for bound objects */
 
 static int
 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+				  struct drm_i915_gem_execbuffer2 *args,
 				  struct drm_file *file,
 				  struct intel_ring_buffer *ring,
 				  struct eb_objects *eb,
-				  struct drm_i915_gem_exec_object2 *exec,
-				  int count)
+				  struct drm_i915_gem_exec_object2 *exec)
 {
 	struct drm_i915_gem_relocation_entry *reloc;
 	struct drm_i915_gem_object *obj;
+	bool need_relocs;
 	int *reloc_offset;
 	int i, total, ret;
+	int count = args->buffer_count;
 
 	/* We may process another execbuffer during the unlock... */
 	while (!list_empty(&eb->objects)) {
@@ -635,7 +648,8 @@  i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 	if (ret)
 		goto err;
 
-	ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects);
+	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+	ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
 	if (ret)
 		goto err;
 
@@ -848,10 +862,9 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	struct intel_ring_buffer *ring;
 	u32 ctx_id = i915_execbuffer2_get_context_id(*args);
 	u32 exec_start, exec_len;
-	u32 seqno;
-	u32 mask;
-	u32 flags;
+	u32 seqno, mask, flags;
 	int ret, mode, i;
+	bool need_relocs;
 
 	if (!i915_gem_check_execbuffer(args)) {
 		DRM_DEBUG("execbuf with invalid offset/length\n");
@@ -993,17 +1006,18 @@  i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			       exec_list);
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
-	ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects);
+	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
+	ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
 	if (ret)
 		goto err;
 
 	/* The objects are in their final locations, apply the relocations. */
-	ret = i915_gem_execbuffer_relocate(dev, eb);
+	if (need_relocs)
+		ret = i915_gem_execbuffer_relocate(dev, eb);
 	if (ret) {
 		if (ret == -EFAULT) {
-			ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
-								eb, exec,
-								args->buffer_count);
+			ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
+								eb, exec);
 			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 		}
 		if (ret)
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 067b98e..7657d3e 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -309,6 +309,7 @@  typedef struct drm_i915_irq_wait {
 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
 #define I915_PARAM_RSVD_FOR_FUTURE_USE	 22
 #define I915_PARAM_HAS_SECURE_BATCHES	 23
+#define I915_PARAM_HAS_EXEC_NO_RELOC	 24
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -629,7 +630,10 @@  struct drm_i915_gem_exec_object2 {
 	__u64 offset;
 
 #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+#define EXEC_OBJECT_NEEDS_GTT	(1<<1)
 	__u64 flags;
+
+	/** Low 32-bits for read-domains, high 32-bits for write-domains. */
 	__u64 rsvd1;
 	__u64 rsvd2;
 };
@@ -679,6 +683,13 @@  struct drm_i915_gem_execbuffer2 {
  */
 #define I915_EXEC_SECURE		(1<<9)
 
+/** Provide a hint to the kernel that the command stream and auxilliary
+ * state buffers already holds the correct presumed addresses and so the
+ * relocation process may be skipped if no buffers need to be moved in
+ * preparation for the execbuffer.
+ */
+#define I915_EXEC_NO_RELOC		(1<<10)
+
 #define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
 	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK