diff mbox

[0784/1094] drm/i915: Flush request queue when waiting for ring space

Message ID 1413889294-31328-785-git-send-email-dheerajx.s.jamwal@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Dheeraj Jamwal Oct. 21, 2014, 10:56 a.m. UTC
From: Chris Wilson <chris@chris-wilson.co.uk>

During the review of

commit 1f70999f9052f5a1b0ce1a55aff3808f2ec9fe42
Author: Chris Wilson <chris@chris-wilson.co.uk>
Date:   Mon Jan 27 22:43:07 2014 +0000

    drm/i915: Prevent recursion by retiring requests when the ring is full

Ville raised the point that our interaction with request->tail was
likely to foul up other uses elsewhere (such as hang check comparing
ACTHD against requests).

However, we also need to restore the implicit retire requests that certain
test cases depend upon (e.g. igt/gem_exec_lut_handle), this raises the
spectre that the ppgtt will randomly call i915_gpu_idle() and recurse
back into intel_ring_begin().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=78023
Reviewed-by: Brad Volkin <bradley.d.volkin@intel.com>
[danvet: Remove now unused 'tail' variable as spotted by Brad.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
(cherry picked from commit 1cf0ba14740d96fbf6f58a201f000a34b74f4725)

Signed-off-by: Dheeraj Jamwal <dheerajx.s.jamwal@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |    1 +
 drivers/gpu/drm/i915/i915_gem.c         |    3 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c |   39 +++++++++++--------------------
 3 files changed, 16 insertions(+), 27 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e5eb651..284b8ce 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2205,6 +2205,7 @@  struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_ring_buffer *ring);
 
 bool i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a8f5697..029d8b0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -59,7 +59,6 @@  static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
 static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-static void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
 				  enum i915_cache_level level)
@@ -2565,7 +2564,7 @@  void i915_gem_reset(struct drm_device *dev)
 /**
  * This function clears the request list as sequence numbers are passed.
  */
-static void
+void
 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
 	uint32_t seqno;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e6c7a4d..9907d66 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -40,14 +40,19 @@ 
  */
 #define CACHELINE_BYTES 64
 
-static inline int ring_space(struct intel_ring_buffer *ring)
+static inline int __ring_space(int head, int tail, int size)
 {
-	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
+	int space = head - (tail + I915_RING_FREE_SPACE);
 	if (space < 0)
-		space += ring->size;
+		space += size;
 	return space;
 }
 
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+	return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size);
+}
+
 static bool intel_ring_stopped(struct intel_ring_buffer *ring)
 {
 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -1482,7 +1487,7 @@  void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
 {
 	struct drm_i915_gem_request *request;
-	u32 seqno = 0, tail;
+	u32 seqno = 0;
 	int ret;
 
 	if (ring->last_retired_head != -1) {
@@ -1495,26 +1500,10 @@  static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
 	}
 
 	list_for_each_entry(request, &ring->request_list, list) {
-		int space;
-
-		if (request->tail == -1)
-			continue;
-
-		space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
-		if (space < 0)
-			space += ring->size;
-		if (space >= n) {
+		if (__ring_space(request->tail, ring->tail, ring->size) >= n) {
 			seqno = request->seqno;
-			tail = request->tail;
 			break;
 		}
-
-		/* Consume this request in case we need more space than
-		 * is available and so need to prevent a race between
-		 * updating last_retired_head and direct reads of
-		 * I915_RING_HEAD. It also provides a nice sanity check.
-		 */
-		request->tail = -1;
 	}
 
 	if (seqno == 0)
@@ -1524,11 +1513,11 @@  static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
 	if (ret)
 		return ret;
 
-	ring->head = tail;
-	ring->space = ring_space(ring);
-	if (WARN_ON(ring->space < n))
-		return -ENOSPC;
+	i915_gem_retire_requests_ring(ring);
+	ring->head = ring->last_retired_head;
+	ring->last_retired_head = -1;
 
+	ring->space = ring_space(ring);
 	return 0;
 }