diff mbox

[21/29] drm/i915: Convert 'i915_seqno_passed' calls into 'i915_gem_request_completed'

Message ID 1414694481-15724-22-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison Oct. 30, 2014, 6:41 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

Almost everywhere that caled i915_seqno_passed() was really asking 'has the
given seqno popped out of the hardware yet?'. Thus it had to query the current
hardware seqno and then do a signed delta comparison (which copes with wrapping
around zero but not with seqno values more than 2GB apart, although the latter
is unlikely!).

Now that the majority of seqno instances have been replaced with request
structures, it is possible to convert this test to be request based as well.
There is now a 'i915_gem_request_completed()' function which takes a request and
returns true or false as appropriate. The ultimate aim is that this will be
simply returning a cached internal value: e.g. '_completed(req) { return
req->compelted; }'. However, at the moment, the implementation actualy falls
back to the original 'seqno_passed()' mechanism. This is to be improved in later
patches in the series.

This checkin almost all _seqno_passed() calls. The only one left is in the
semaphore code which still requires seqnos not request structures.

For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c  |    3 +--
 drivers/gpu/drm/i915/i915_drv.h      |   18 ++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem.c      |   22 +++++++---------------
 drivers/gpu/drm/i915/i915_irq.c      |    3 +--
 drivers/gpu/drm/i915/intel_display.c |   11 +++--------
 5 files changed, 30 insertions(+), 27 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 15ad322..f26c2b2 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -549,8 +549,7 @@  static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 					   i915_gem_request_get_seqno(work->flip_queued_req),
 					   dev_priv->next_seqno,
 					   work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
-					   i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
-							     i915_gem_request_get_seqno(work->flip_queued_req)));
+					   i915_gem_request_completed(work->flip_queued_req, true));
 			} else
 				seq_printf(m, "Flip not associated with any ring\n");
 			seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6137b51..1d30dcb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1997,6 +1997,12 @@  static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
 	*pdst = src;
 }
 
+/*
+ * XXX: i915_gem_request_completed should be here but currently needs the
+ * definition of i915_seqno_passed() which is below. It will be moved in
+ * a later patch when the call to i915_seqno_passed() is obsoleted...
+ */
+
 struct drm_i915_file_private {
 	struct drm_i915_private *dev_priv;
 	struct drm_file *file;
@@ -3060,6 +3066,18 @@  wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
 	}
 }
 
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
+					      bool lazy_coherency)
+{
+	u32 seqno;
+
+	BUG_ON(req == NULL);
+
+	seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+
+	return i915_seqno_passed(seqno, req->seqno);
+}
+
 static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
 				      struct drm_i915_gem_request *req)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 917a01b..2daafeb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1167,7 +1167,7 @@  static int __wait_request(struct drm_i915_gem_request *req,
 
 	WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
 
-	if (i915_seqno_passed(ring->get_seqno(ring, true), req->seqno))
+	if (i915_gem_request_completed(req, true))
 		return 0;
 
 	timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
@@ -1203,7 +1203,7 @@  static int __wait_request(struct drm_i915_gem_request *req,
 			break;
 		}
 
-		if (i915_seqno_passed(ring->get_seqno(ring, false), req->seqno)) {
+		if (i915_gem_request_completed(req, false)) {
 			ret = 0;
 			break;
 		}
@@ -2251,8 +2251,7 @@  i915_gem_object_retire(struct drm_i915_gem_object *obj)
 	if (ring == NULL)
 		return;
 
-	if (i915_seqno_passed(ring->get_seqno(ring, true),
-			      i915_gem_request_get_seqno(obj->last_read_req)))
+	if (i915_gem_request_completed(obj->last_read_req, true))
 		i915_gem_object_move_to_inactive(obj);
 }
 
@@ -2512,12 +2511,9 @@  struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring)
 {
 	struct drm_i915_gem_request *request;
-	u32 completed_seqno;
-
-	completed_seqno = ring->get_seqno(ring, false);
 
 	list_for_each_entry(request, &ring->request_list, list) {
-		if (i915_seqno_passed(completed_seqno, request->seqno))
+		if (i915_gem_request_completed(request, false))
 			continue;
 
 		return request;
@@ -2652,15 +2648,11 @@  void i915_gem_request_unreference_irq(struct drm_i915_gem_request *req)
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 {
-	uint32_t seqno;
-
 	if (list_empty(&ring->request_list))
 		return;
 
 	WARN_ON(i915_verify_lists(ring->dev));
 
-	seqno = ring->get_seqno(ring, true);
-
 	/* Move any buffers on the active list that are no longer referenced
 	 * by the ringbuffer to the flushing/inactive lists as appropriate,
 	 * before we free the context associated with the requests.
@@ -2672,7 +2664,7 @@  i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 				      struct drm_i915_gem_object,
 				      ring_list);
 
-		if (!i915_seqno_passed(seqno, i915_gem_request_get_seqno(obj->last_read_req)))
+		if (!i915_gem_request_completed(obj->last_read_req, true))
 			break;
 
 		i915_gem_object_move_to_inactive(obj);
@@ -2687,7 +2679,7 @@  i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 					   struct drm_i915_gem_request,
 					   list);
 
-		if (!i915_seqno_passed(seqno, request->seqno))
+		if (!i915_gem_request_completed(request, true))
 			break;
 
 		trace_i915_gem_request_retire(request);
@@ -2714,7 +2706,7 @@  i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 	}
 
 	if (unlikely(ring->trace_irq_req &&
-		     i915_seqno_passed(seqno, i915_gem_request_get_seqno(ring->trace_irq_req)))) {
+		     i915_gem_request_completed(ring->trace_irq_req, true))) {
 		ring->irq_put(ring);
 		i915_gem_request_assign(&ring->trace_irq_req, NULL);
 	}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 003b268..21e1d69 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2727,8 +2727,7 @@  static bool
 ring_idle(struct intel_engine_cs *ring)
 {
 	return (list_empty(&ring->request_list) ||
-		i915_seqno_passed(ring->get_seqno(ring, false),
-				  i915_gem_request_get_seqno(ring_last_request(ring))));
+		i915_gem_request_completed(ring_last_request(ring), false));
 }
 
 static bool
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 44005a2..eca4240 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9346,8 +9346,7 @@  static int intel_postpone_flip(struct drm_i915_gem_object *obj)
 
 	ring = obj->ring;
 
-	if (i915_seqno_passed(ring->get_seqno(ring, true),
-			      i915_gem_request_get_seqno(obj->last_write_req)))
+	if (i915_gem_request_completed(obj->last_write_req, true))
 		return 0;
 
 	ret = i915_gem_check_olr(obj->last_write_req);
@@ -9365,9 +9364,6 @@  void intel_notify_mmio_flip(struct intel_engine_cs *ring)
 	struct drm_i915_private *dev_priv = to_i915(ring->dev);
 	struct intel_crtc *intel_crtc;
 	unsigned long irq_flags;
-	u32 seqno;
-
-	seqno = ring->get_seqno(ring, false);
 
 	spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
 	for_each_intel_crtc(ring->dev, intel_crtc) {
@@ -9380,7 +9376,7 @@  void intel_notify_mmio_flip(struct intel_engine_cs *ring)
 		if (ring->id != mmio_flip->ring_id)
 			continue;
 
-		if (i915_seqno_passed(seqno, i915_gem_request_get_seqno(mmio_flip->req))) {
+		if (i915_gem_request_completed(mmio_flip->req, false)) {
 			intel_do_mmio_flip(intel_crtc);
 			i915_gem_request_assign(&mmio_flip->req, NULL);
 			ring->irq_put(ring);
@@ -9451,8 +9447,7 @@  static bool __intel_pageflip_stall_check(struct drm_device *dev,
 
 	if (work->flip_ready_vblank == 0) {
 		if (work->flip_queued_ring) {
-			if (!i915_seqno_passed(work->flip_queued_ring->get_seqno(work->flip_queued_ring, true),
-				       i915_gem_request_get_seqno(work->flip_queued_req)))
+			if (!i915_gem_request_completed(work->flip_queued_req, true))
 				return false;
 
 			i915_gem_request_unreference_irq(work->flip_queued_req);