diff mbox

[17/21] drm/i915: Repeat retiring of requests until the seqno is stable

Message ID 1302945465-32115-18-git-send-email-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson April 16, 2011, 9:17 a.m. UTC
The list walking over objects and requests to retire may take a
significant amount of time, enough time for the GPU to have finished
more batches. So repeat the list walking until the GPU seqno is stable.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |   66 ++++++++++++++++++++-------------------
 1 files changed, 34 insertions(+), 32 deletions(-)

Comments

Daniel Vetter April 16, 2011, 1:45 p.m. UTC | #1
I'm gonna trust you not to have fat-fingered the indent change ;-)

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b9515ac..58e77d6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1952,47 +1952,45 @@  i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 
 	WARN_ON(i915_verify_lists(ring->dev));
 
-	seqno = ring->get_seqno(ring);
+	do {
+		seqno = ring->get_seqno(ring);
 
-	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
-		if (seqno >= ring->sync_seqno[i])
-			ring->sync_seqno[i] = 0;
+		while (!list_empty(&ring->request_list)) {
+			struct drm_i915_gem_request *request;
 
-	while (!list_empty(&ring->request_list)) {
-		struct drm_i915_gem_request *request;
+			request = list_first_entry(&ring->request_list,
+						   struct drm_i915_gem_request,
+						   list);
 
-		request = list_first_entry(&ring->request_list,
-					   struct drm_i915_gem_request,
-					   list);
-
-		if (!i915_seqno_passed(seqno, request->seqno))
-			break;
+			if (!i915_seqno_passed(seqno, request->seqno))
+				break;
 
-		trace_i915_gem_request_retire(ring, request->seqno);
+			trace_i915_gem_request_retire(ring, request->seqno);
 
-		list_del(&request->list);
-		i915_gem_request_remove_from_client(request);
-		kfree(request);
-	}
+			list_del(&request->list);
+			i915_gem_request_remove_from_client(request);
+			kfree(request);
+		}
 
-	/* Move any buffers on the active list that are no longer referenced
-	 * by the ringbuffer to the flushing/inactive lists as appropriate.
-	 */
-	while (!list_empty(&ring->active_list)) {
-		struct drm_i915_gem_object *obj;
+		/* Move any buffers on the active list that are no longer referenced
+		 * by the ringbuffer to the flushing/inactive lists as appropriate.
+		 */
+		while (!list_empty(&ring->active_list)) {
+			struct drm_i915_gem_object *obj;
 
-		obj= list_first_entry(&ring->active_list,
-				      struct drm_i915_gem_object,
-				      ring_list);
+			obj = list_first_entry(&ring->active_list,
+					       struct drm_i915_gem_object,
+					       ring_list);
 
-		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
-			break;
+			if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+				break;
 
-		if (obj->base.write_domain != 0)
-			i915_gem_object_move_to_flushing(obj);
-		else
-			i915_gem_object_move_to_inactive(obj);
-	}
+			if (obj->base.write_domain != 0)
+				i915_gem_object_move_to_flushing(obj);
+			else
+				i915_gem_object_move_to_inactive(obj);
+		}
+	} while (seqno != ring->get_seqno(ring));
 
 	if (unlikely(ring->trace_irq_seqno &&
 		     i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
@@ -2000,6 +1998,10 @@  i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 		ring->trace_irq_seqno = 0;
 	}
 
+	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
+		if (seqno >= ring->sync_seqno[i])
+			ring->sync_seqno[i] = 0;
+
 	WARN_ON(i915_verify_lists(ring->dev));
 }