diff mbox

[v3,08/28] drm/i915: Make 'i915_gem_check_olr' actually check by request not seqno

Message ID 1416854990-1920-9-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison Nov. 24, 2014, 6:49 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

Updated the _check_olr() function to actually take a request object and compare
it to the OLR rather than extracting seqnos and comparing those.

Note that there is one use case where the request object being processed is no
longer available at that point in the call stack. Hence a temporary copy of the
original function is still present (but called _check_ols() instead). This will
be removed in a subsequent patch.

Also, downgraded a BUG_ON to a WARN_ON as apparently the former is frowned upon
for shipping code.

For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Thomas Daniel <Thomas.Daniel@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h |   18 +++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem.c |   28 +++++++++++-----------------
 2 files changed, 28 insertions(+), 18 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5c00cf3..62ffdb5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2585,7 +2585,7 @@  bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
 				      bool interruptible);
-int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
@@ -3125,4 +3125,20 @@  wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
 	}
 }
 
+/* XXX: Temporary solution to be removed later in patch series. */
+static inline int __must_check i915_gem_check_ols(
+				     struct intel_engine_cs *ring, u32 seqno)
+{
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+	ret = 0;
+	if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
+		ret = i915_add_request(ring, NULL);
+
+	return ret;
+}
+/* XXX: Temporary solution to be removed later in patch series. */
+
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9b3e03b..b07de69 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1153,19 +1153,18 @@  i915_gem_check_wedge(struct i915_gpu_error *error,
 }
 
 /*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
+ * Compare arbitrary request against outstanding lazy request. Emit on match.
  */
 int
-i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
+i915_gem_check_olr(struct drm_i915_gem_request *req)
 {
 	int ret;
 
-	BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
 
 	ret = 0;
-	if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
-		ret = i915_add_request(ring, NULL);
+	if (req == req->ring->outstanding_lazy_request)
+		ret = i915_add_request(req->ring, NULL);
 
 	return ret;
 }
@@ -1328,7 +1327,7 @@  i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
 	if (ret)
 		return ret;
 
-	ret = i915_gem_check_olr(ring, seqno);
+	ret = i915_gem_check_ols(ring, seqno);
 	if (ret)
 		return ret;
 
@@ -1398,7 +1397,6 @@  i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring = obj->ring;
 	unsigned reset_counter;
-	u32 seqno;
 	int ret;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1408,22 +1406,19 @@  i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	if (!req)
 		return 0;
 
-	seqno = i915_gem_request_get_seqno(req);
-	WARN_ON(seqno == 0);
-
 	ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
 	if (ret)
 		return ret;
 
-	ret = i915_gem_check_olr(ring, seqno);
+	ret = i915_gem_check_olr(req);
 	if (ret)
 		return ret;
 
 	reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 	i915_gem_request_reference(req);
 	mutex_unlock(&dev->struct_mutex);
-	ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
-				file_priv);
+	ret = __i915_wait_seqno(ring, i915_gem_request_get_seqno(req),
+				reset_counter, true, NULL, file_priv);
 	mutex_lock(&dev->struct_mutex);
 	i915_gem_request_unreference(req);
 	if (ret)
@@ -2883,8 +2878,7 @@  i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 	int ret;
 
 	if (obj->active) {
-		ret = i915_gem_check_olr(obj->ring,
-			     i915_gem_request_get_seqno(obj->last_read_req));
+		ret = i915_gem_check_olr(obj->last_read_req);
 		if (ret)
 			return ret;
 
@@ -3014,7 +3008,7 @@  i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (seqno <= from->semaphore.sync_seqno[idx])
 		return 0;
 
-	ret = i915_gem_check_olr(obj->ring, seqno);
+	ret = i915_gem_check_olr(obj->last_read_req);
 	if (ret)
 		return ret;