diff mbox

drm/i915: Reserve space improvements

Message ID 1435595818-26467-1-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison June 29, 2015, 4:36 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

An earlier patch was added to reserve space in the ring buffer for the
commands issued during 'add_request()'. The initial version was
pessimistic in the way it handled buffer wrapping and would cause
premature wraps and thus waste ring space.

This patch updates the code to better handle the wrap case. It no
longer enforces that the space being asked for and the reserved space
are a single contiguous block. Instead, it allows the reserve to be on
the far end of a wrap operation. It still guarantees that the space is
available so when the wrap occurs, no wait will happen. Thus the wrap
cannot fail which is the whole point of the exercise.

Also fixed a merge failure with some comments from the original patch.

v2: Incorporated suggestion by David Gordon to move the wrap code
inside the prepare function and thus allow a single combined
wait_for_space() call rather than doing one before the wrap and
another after. This also makes the prepare code much simpler and
easier to follow.

For: VIZ-5115
CC: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c        | 72 +++++++++++++-------------
 drivers/gpu/drm/i915/intel_ringbuffer.c | 89 ++++++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  4 +-
 3 files changed, 88 insertions(+), 77 deletions(-)

Comments

Shuang He June 30, 2015, 7:26 a.m. UTC | #1
Tested-By: Intel Graphics QA PRTS (Patch Regression Test System Contact: shuang.he@intel.com)
Task id: 6668
-------------------------------------Summary-------------------------------------
Platform          Delta          drm-intel-nightly          Series Applied
ILK                                  302/302              302/302
SNB                                  312/316              312/316
IVB                                  343/343              343/343
BYT                 -2              287/287              285/287
-------------------------------------Detailed-------------------------------------
Platform  Test                                drm-intel-nightly          Series Applied
*BYT  igt@gem_partial_pwrite_pread@reads      PASS(1)      FAIL(1)
*BYT  igt@gem_partial_pwrite_pread@reads-display      PASS(1)      FAIL(1)
Note: You need to pay more attention to line start with '*'
Tomas Elf June 30, 2015, 11:33 a.m. UTC | #2
On 29/06/2015 17:36, John.C.Harrison@Intel.com wrote:
> From: John Harrison <John.C.Harrison@Intel.com>
>
> An earlier patch was added to reserve space in the ring buffer for the
> commands issued during 'add_request()'. The initial version was
> pessimistic in the way it handled buffer wrapping and would cause
> premature wraps and thus waste ring space.
>
> This patch updates the code to better handle the wrap case. It no
> longer enforces that the space being asked for and the reserved space
> are a single contiguous block. Instead, it allows the reserve to be on
> the far end of a wrap operation. It still guarantees that the space is
> available so when the wrap occurs, no wait will happen. Thus the wrap
> cannot fail which is the whole point of the exercise.
>
> Also fixed a merge failure with some comments from the original patch.
>
> v2: Incorporated suggestion by David Gordon to move the wrap code
> inside the prepare function and thus allow a single combined
> wait_for_space() call rather than doing one before the wrap and
> another after. This also makes the prepare code much simpler and
> easier to follow.
>
> For: VIZ-5115
> CC: Daniel Vetter <daniel@ffwll.ch>
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> ---
>   drivers/gpu/drm/i915/intel_lrc.c        | 72 +++++++++++++-------------
>   drivers/gpu/drm/i915/intel_ringbuffer.c | 89 ++++++++++++++++++---------------
>   drivers/gpu/drm/i915/intel_ringbuffer.h |  4 +-
>   3 files changed, 88 insertions(+), 77 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index b36e064..a41936b 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -663,12 +663,12 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
>   	unsigned space;
>   	int ret;
>
> -	/* The whole point of reserving space is to not wait! */
> -	WARN_ON(ringbuf->reserved_in_use);
> -
>   	if (intel_ring_space(ringbuf) >= bytes)
>   		return 0;
>
> +	/* The whole point of reserving space is to not wait! */
> +	WARN_ON(ringbuf->reserved_in_use);
> +
>   	list_for_each_entry(target, &ring->request_list, list) {
>   		/*
>   		 * The request queue is per-engine, so can contain requests
> @@ -718,22 +718,11 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>   	execlists_context_queue(request);
>   }
>
> -static int logical_ring_wrap_buffer(struct drm_i915_gem_request *req)
> +static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
>   {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
>   	uint32_t __iomem *virt;
>   	int rem = ringbuf->size - ringbuf->tail;
>
> -	/* Can't wrap if space has already been reserved! */
> -	WARN_ON(ringbuf->reserved_in_use);
> -
> -	if (ringbuf->space < rem) {
> -		int ret = logical_ring_wait_for_space(req, rem);
> -
> -		if (ret)
> -			return ret;
> -	}
> -
>   	virt = ringbuf->virtual_start + ringbuf->tail;
>   	rem /= 4;
>   	while (rem--)
> @@ -741,40 +730,49 @@ static int logical_ring_wrap_buffer(struct drm_i915_gem_request *req)
>
>   	ringbuf->tail = 0;
>   	intel_ring_update_space(ringbuf);
> -
> -	return 0;
>   }
>
>   static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
>   {
>   	struct intel_ringbuffer *ringbuf = req->ringbuf;
> -	int ret;
> -
> -	/*
> -	 * Add on the reserved size to the request to make sure that after
> -	 * the intended commands have been emitted, there is guaranteed to
> -	 * still be enough free space to send them to the hardware.
> -	 */
> -	if (!ringbuf->reserved_in_use)
> -		bytes += ringbuf->reserved_size;
> -
> -	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
> -		ret = logical_ring_wrap_buffer(req);
> -		if (unlikely(ret))
> -			return ret;
> +	int remain = ringbuf->effective_size - ringbuf->tail;
> +	int ret, total_bytes, wait_bytes = 0;
> +	bool need_wrap = false;
>
> -		if(ringbuf->reserved_size) {
> -			uint32_t size = ringbuf->reserved_size;
> +	if (ringbuf->reserved_in_use)
> +		total_bytes = bytes;
> +	else
> +		total_bytes = bytes + ringbuf->reserved_size;
>
> -			intel_ring_reserved_space_cancel(ringbuf);
> -			intel_ring_reserved_space_reserve(ringbuf, size);
> +	if (unlikely(bytes > remain)) {
> +		/*
> +		 * Not enough space for the basic request. So need to flush
> +		 * out the remainder and then wait for base + reserved.
> +		 */
> +		wait_bytes = remain + total_bytes;
> +		need_wrap = true;
> +	} else {
> +		if (unlikely(total_bytes > remain)) {
> +			/*
> +			 * The base request will fit but the reserved space
> +			 * falls off the end. So only need to to wait for the
> +			 * reserved size after flushing out the remainder.
> +			 */
> +			wait_bytes = remain + ringbuf->reserved_size;
> +			need_wrap = true;
> +		} else if (total_bytes > ringbuf->space) {
> +			/* No wrapping required, just waiting. */
> +			wait_bytes = total_bytes;
>   		}
>   	}
>
> -	if (unlikely(ringbuf->space < bytes)) {
> -		ret = logical_ring_wait_for_space(req, bytes);
> +	if (wait_bytes) {
> +		ret = logical_ring_wait_for_space(req, wait_bytes);

Here we're potentially waiting for an amount of space that is 
optimistically computed (ringbuf->effective_size - ringbuf->tail). If we 
want to stay consistent, that is stay pessimistic, we should use 
ringbuf->size - ringbuf->tail so that we're waiting for the biggest 
possible size here.

>   		if (unlikely(ret))
>   			return ret;
> +
> +		if (need_wrap)
> +			__wrap_ring_buffer(ringbuf);
>   	}
>
>   	return 0;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index af7c12e..9b10019 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2121,12 +2121,12 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
>   	unsigned space;
>   	int ret;
>
> -	/* The whole point of reserving space is to not wait! */
> -	WARN_ON(ringbuf->reserved_in_use);
> -
>   	if (intel_ring_space(ringbuf) >= n)
>   		return 0;
>
> +	/* The whole point of reserving space is to not wait! */
> +	WARN_ON(ringbuf->reserved_in_use);
> +
>   	list_for_each_entry(request, &ring->request_list, list) {
>   		space = __intel_ring_space(request->postfix, ringbuf->tail,
>   					   ringbuf->size);
> @@ -2145,21 +2145,11 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
>   	return 0;
>   }
>
> -static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
> +static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
>   {
>   	uint32_t __iomem *virt;
> -	struct intel_ringbuffer *ringbuf = ring->buffer;
>   	int rem = ringbuf->size - ringbuf->tail;
>
> -	/* Can't wrap if space has already been reserved! */
> -	WARN_ON(ringbuf->reserved_in_use);
> -
> -	if (ringbuf->space < rem) {
> -		int ret = ring_wait_for_space(ring, rem);
> -		if (ret)
> -			return ret;
> -	}
> -
>   	virt = ringbuf->virtual_start + ringbuf->tail;
>   	rem /= 4;
>   	while (rem--)
> @@ -2167,8 +2157,6 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
>
>   	ringbuf->tail = 0;
>   	intel_ring_update_space(ringbuf);
> -
> -	return 0;
>   }
>
>   int intel_ring_idle(struct intel_engine_cs *ring)
> @@ -2238,9 +2226,21 @@ void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
>   void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
>   {
>   	WARN_ON(!ringbuf->reserved_in_use);
> -	WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
> -	     "request reserved size too small: %d vs %d!\n",
> -	     ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
> +	if (ringbuf->tail > ringbuf->reserved_tail) {
> +		WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
> +		     "request reserved size too small: %d vs %d!\n",
> +		     ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
> +	} else {
> +		/*
> +		 * The ring was wrapped while the reserved space was in use.
> +		 * That means that some unknown amount of the ring tail was
> +		 * no-op filled and skipped. Thus simply adding the ring size
> +		 * to the tail and doing the above space check will not work.
> +		 * Rather than attempt to track how much tail was skipped,
> +		 * it is much simpler to say that also skipping the sanity
> +		 * check every once in a while is not a big issue.
> +		 */
> +	}
>
>   	ringbuf->reserved_size   = 0;
>   	ringbuf->reserved_in_use = false;
> @@ -2249,33 +2249,44 @@ void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
>   static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
>   {
>   	struct intel_ringbuffer *ringbuf = ring->buffer;
> -	int ret;
> -
> -	/*
> -	 * Add on the reserved size to the request to make sure that after
> -	 * the intended commands have been emitted, there is guaranteed to
> -	 * still be enough free space to send them to the hardware.
> -	 */
> -	if (!ringbuf->reserved_in_use)
> -		bytes += ringbuf->reserved_size;
> -
> -	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
> -		ret = intel_wrap_ring_buffer(ring);
> -		if (unlikely(ret))
> -			return ret;
> +	int remain = ringbuf->effective_size - ringbuf->tail;
> +	int ret, total_bytes, wait_bytes = 0;
> +	bool need_wrap = false;
>
> -		if(ringbuf->reserved_size) {
> -			uint32_t size = ringbuf->reserved_size;
> +	if (ringbuf->reserved_in_use)
> +		total_bytes = bytes;
> +	else
> +		total_bytes = bytes + ringbuf->reserved_size;
>
> -			intel_ring_reserved_space_cancel(ringbuf);
> -			intel_ring_reserved_space_reserve(ringbuf, size);
> +	if (unlikely(bytes > remain)) {
> +		/*
> +		 * Not enough space for the basic request. So need to flush
> +		 * out the remainder and then wait for base + reserved.
> +		 */
> +		wait_bytes = remain + total_bytes;
> +		need_wrap = true;
> +	} else {
> +		if (unlikely(total_bytes > remain)) {
> +			/*
> +			 * The base request will fit but the reserved space
> +			 * falls off the end. So only need to to wait for the
> +			 * reserved size after flushing out the remainder.
> +			 */
> +			wait_bytes = remain + ringbuf->reserved_size;
> +			need_wrap = true;
> +		} else if (total_bytes > ringbuf->space) {
> +			/* No wrapping required, just waiting. */
> +			wait_bytes = total_bytes;
>   		}
>   	}
>
> -	if (unlikely(ringbuf->space < bytes)) {
> -		ret = ring_wait_for_space(ring, bytes);
> +	if (wait_bytes) {
> +		ret = ring_wait_for_space(ring, wait_bytes);

Same issue here as for the LRC implementation.

Thanks,
Tomas

>   		if (unlikely(ret))
>   			return ret;
> +
> +		if (need_wrap)
> +			__wrap_ring_buffer(ringbuf);
>   	}
>
>   	return 0;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 0e2bbc6..304cac4 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -473,7 +473,6 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
>    * will always have sufficient room to do its stuff. The request creation
>    * code calls this automatically.
>    */
> -int intel_ring_reserve_space(struct drm_i915_gem_request *request);
>   void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
>   /* Cancel the reservation, e.g. because the request is being discarded. */
>   void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
> @@ -482,4 +481,7 @@ void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
>   /* Finish with the reserved space - for use by i915_add_request() only. */
>   void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
>
> +/* Legacy ringbuffer specific portion of reservation code: */
> +int intel_ring_reserve_space(struct drm_i915_gem_request *request);
> +
>   #endif /* _INTEL_RINGBUFFER_H_ */
>
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index b36e064..a41936b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -663,12 +663,12 @@  static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
 	unsigned space;
 	int ret;
 
-	/* The whole point of reserving space is to not wait! */
-	WARN_ON(ringbuf->reserved_in_use);
-
 	if (intel_ring_space(ringbuf) >= bytes)
 		return 0;
 
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
 	list_for_each_entry(target, &ring->request_list, list) {
 		/*
 		 * The request queue is per-engine, so can contain requests
@@ -718,22 +718,11 @@  intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	execlists_context_queue(request);
 }
 
-static int logical_ring_wrap_buffer(struct drm_i915_gem_request *req)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	uint32_t __iomem *virt;
 	int rem = ringbuf->size - ringbuf->tail;
 
-	/* Can't wrap if space has already been reserved! */
-	WARN_ON(ringbuf->reserved_in_use);
-
-	if (ringbuf->space < rem) {
-		int ret = logical_ring_wait_for_space(req, rem);
-
-		if (ret)
-			return ret;
-	}
-
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
@@ -741,40 +730,49 @@  static int logical_ring_wrap_buffer(struct drm_i915_gem_request *req)
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
-
-	return 0;
 }
 
 static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
 {
 	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	int ret;
-
-	/*
-	 * Add on the reserved size to the request to make sure that after
-	 * the intended commands have been emitted, there is guaranteed to
-	 * still be enough free space to send them to the hardware.
-	 */
-	if (!ringbuf->reserved_in_use)
-		bytes += ringbuf->reserved_size;
-
-	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = logical_ring_wrap_buffer(req);
-		if (unlikely(ret))
-			return ret;
+	int remain = ringbuf->effective_size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
 
-		if(ringbuf->reserved_size) {
-			uint32_t size = ringbuf->reserved_size;
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
 
-			intel_ring_reserved_space_cancel(ringbuf);
-			intel_ring_reserved_space_reserve(ringbuf, size);
+	if (unlikely(bytes > remain)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So only need to to wait for the
+			 * reserved size after flushing out the remainder.
+			 */
+			wait_bytes = remain + ringbuf->reserved_size;
+			need_wrap = true;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
 		}
 	}
 
-	if (unlikely(ringbuf->space < bytes)) {
-		ret = logical_ring_wait_for_space(req, bytes);
+	if (wait_bytes) {
+		ret = logical_ring_wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
 			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index af7c12e..9b10019 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2121,12 +2121,12 @@  static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 	unsigned space;
 	int ret;
 
-	/* The whole point of reserving space is to not wait! */
-	WARN_ON(ringbuf->reserved_in_use);
-
 	if (intel_ring_space(ringbuf) >= n)
 		return 0;
 
+	/* The whole point of reserving space is to not wait! */
+	WARN_ON(ringbuf->reserved_in_use);
+
 	list_for_each_entry(request, &ring->request_list, list) {
 		space = __intel_ring_space(request->postfix, ringbuf->tail,
 					   ringbuf->size);
@@ -2145,21 +2145,11 @@  static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
 	return 0;
 }
 
-static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
+static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
 {
 	uint32_t __iomem *virt;
-	struct intel_ringbuffer *ringbuf = ring->buffer;
 	int rem = ringbuf->size - ringbuf->tail;
 
-	/* Can't wrap if space has already been reserved! */
-	WARN_ON(ringbuf->reserved_in_use);
-
-	if (ringbuf->space < rem) {
-		int ret = ring_wait_for_space(ring, rem);
-		if (ret)
-			return ret;
-	}
-
 	virt = ringbuf->virtual_start + ringbuf->tail;
 	rem /= 4;
 	while (rem--)
@@ -2167,8 +2157,6 @@  static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
 
 	ringbuf->tail = 0;
 	intel_ring_update_space(ringbuf);
-
-	return 0;
 }
 
 int intel_ring_idle(struct intel_engine_cs *ring)
@@ -2238,9 +2226,21 @@  void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
 {
 	WARN_ON(!ringbuf->reserved_in_use);
-	WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
-	     "request reserved size too small: %d vs %d!\n",
-	     ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+	if (ringbuf->tail > ringbuf->reserved_tail) {
+		WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
+		     "request reserved size too small: %d vs %d!\n",
+		     ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
+	} else {
+		/*
+		 * The ring was wrapped while the reserved space was in use.
+		 * That means that some unknown amount of the ring tail was
+		 * no-op filled and skipped. Thus simply adding the ring size
+		 * to the tail and doing the above space check will not work.
+		 * Rather than attempt to track how much tail was skipped,
+		 * it is much simpler to say that also skipping the sanity
+		 * check every once in a while is not a big issue.
+		 */
+	}
 
 	ringbuf->reserved_size   = 0;
 	ringbuf->reserved_in_use = false;
@@ -2249,33 +2249,44 @@  void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
 static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
 {
 	struct intel_ringbuffer *ringbuf = ring->buffer;
-	int ret;
-
-	/*
-	 * Add on the reserved size to the request to make sure that after
-	 * the intended commands have been emitted, there is guaranteed to
-	 * still be enough free space to send them to the hardware.
-	 */
-	if (!ringbuf->reserved_in_use)
-		bytes += ringbuf->reserved_size;
-
-	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
-		ret = intel_wrap_ring_buffer(ring);
-		if (unlikely(ret))
-			return ret;
+	int remain = ringbuf->effective_size - ringbuf->tail;
+	int ret, total_bytes, wait_bytes = 0;
+	bool need_wrap = false;
 
-		if(ringbuf->reserved_size) {
-			uint32_t size = ringbuf->reserved_size;
+	if (ringbuf->reserved_in_use)
+		total_bytes = bytes;
+	else
+		total_bytes = bytes + ringbuf->reserved_size;
 
-			intel_ring_reserved_space_cancel(ringbuf);
-			intel_ring_reserved_space_reserve(ringbuf, size);
+	if (unlikely(bytes > remain)) {
+		/*
+		 * Not enough space for the basic request. So need to flush
+		 * out the remainder and then wait for base + reserved.
+		 */
+		wait_bytes = remain + total_bytes;
+		need_wrap = true;
+	} else {
+		if (unlikely(total_bytes > remain)) {
+			/*
+			 * The base request will fit but the reserved space
+			 * falls off the end. So only need to to wait for the
+			 * reserved size after flushing out the remainder.
+			 */
+			wait_bytes = remain + ringbuf->reserved_size;
+			need_wrap = true;
+		} else if (total_bytes > ringbuf->space) {
+			/* No wrapping required, just waiting. */
+			wait_bytes = total_bytes;
 		}
 	}
 
-	if (unlikely(ringbuf->space < bytes)) {
-		ret = ring_wait_for_space(ring, bytes);
+	if (wait_bytes) {
+		ret = ring_wait_for_space(ring, wait_bytes);
 		if (unlikely(ret))
 			return ret;
+
+		if (need_wrap)
+			__wrap_ring_buffer(ringbuf);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0e2bbc6..304cac4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -473,7 +473,6 @@  static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
  * will always have sufficient room to do its stuff. The request creation
  * code calls this automatically.
  */
-int intel_ring_reserve_space(struct drm_i915_gem_request *request);
 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size);
 /* Cancel the reservation, e.g. because the request is being discarded. */
 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);
@@ -482,4 +481,7 @@  void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf);
 /* Finish with the reserved space - for use by i915_add_request() only. */
 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf);
 
+/* Legacy ringbuffer specific portion of reservation code: */
+int intel_ring_reserve_space(struct drm_i915_gem_request *request);
+
 #endif /* _INTEL_RINGBUFFER_H_ */