diff mbox

[RFC,28/44] drm/i915: Added scheduler flush calls to ring throttle and idle functions

Message ID 1403803475-16337-29-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison June 26, 2014, 5:24 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

When requesting that all GPU work is completed, it is now necessary to get the
scheduler involved in order to flush out work that queued and not yet submitted.
---
 drivers/gpu/drm/i915/i915_gem.c       |   16 +++++++++++++++-
 drivers/gpu/drm/i915/i915_scheduler.c |    7 +++++++
 drivers/gpu/drm/i915/i915_scheduler.h |    5 +++++
 3 files changed, 27 insertions(+), 1 deletion(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aa1e0b2..1c508b7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3049,6 +3049,10 @@  int i915_gpu_idle(struct drm_device *dev)
 
 	/* Flush everything onto the inactive list. */
 	for_each_ring(ring, dev_priv, i) {
+		ret = I915_SCHEDULER_FLUSH_ALL(ring, true);
+		if (ret < 0)
+			return ret;
+
 		ret = i915_switch_context(ring, ring->default_context);
 		if (ret)
 			return ret;
@@ -4088,7 +4092,7 @@  i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	struct intel_engine_cs *ring = NULL;
 	unsigned reset_counter;
 	u32 seqno = 0;
-	int ret;
+	int i, ret;
 
 	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
 	if (ret)
@@ -4098,6 +4102,16 @@  i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (ret)
 		return ret;
 
+	for_each_ring(ring, dev_priv, i) {
+		/* Need a mechanism to flush out scheduler entries that were
+		 * submitted more than 'recent_enough' time ago as well! In the
+		 * meantime, just flush everything out to ensure that entries
+		 * can not sit around indefinitely. */
+		ret = I915_SCHEDULER_FLUSH_ALL(ring, false);
+		if (ret < 0)
+			return ret;
+	}
+
 	spin_lock(&file_priv->mm.lock);
 	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
 		if (time_after_eq(request->emitted_jiffies, recent_enough))
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d579bab..6b6827f 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -125,6 +125,13 @@  int i915_scheduler_flush_seqno(struct intel_engine_cs *ring, bool is_locked,
 	return 0;
 }
 
+int i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked)
+{
+	/* Do stuff... */
+
+	return 0;
+}
+
 bool i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring,
 			       uint32_t seqno, bool *completed)
 {
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 3811359..898d2bb 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -58,9 +58,13 @@  struct i915_scheduler_queue_entry {
 };
 
 #ifdef CONFIG_DRM_I915_SCHEDULER
+#   define I915_SCHEDULER_FLUSH_ALL(ring, locked)                            \
+		i915_scheduler_flush(ring, locked)
+
 #   define I915_SCHEDULER_FLUSH_SEQNO(ring, locked, seqno)                   \
 		i915_scheduler_flush_seqno(ring, locked, seqno)
 #else
+#   define I915_SCHEDULER_FLUSH_ALL(ring, locked)               0
 #   define I915_SCHEDULER_FLUSH_SEQNO(ring, locked, seqno)      0
 #endif
 
@@ -81,6 +85,7 @@  struct i915_scheduler {
 
 int         i915_scheduler_fly_seqno(struct intel_engine_cs *ring, uint32_t seqno);
 int         i915_scheduler_remove(struct intel_engine_cs *ring);
+int         i915_scheduler_flush(struct intel_engine_cs *ring, bool is_locked);
 int         i915_scheduler_flush_seqno(struct intel_engine_cs *ring,
 				       bool is_locked, uint32_t seqno);
 bool        i915_scheduler_is_seqno_in_flight(struct intel_engine_cs *ring,