diff mbox

[RFC,29/44] drm/i915: Hook scheduler into intel_ring_idle()

Message ID 1403803475-16337-30-git-send-email-John.C.Harrison@Intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

John Harrison June 26, 2014, 5:24 p.m. UTC
From: John Harrison <John.C.Harrison@Intel.com>

The code to wait for a ring to be idle ends by calling __wait_seqno() on the
value in the last request structure. However, with a scheduler, there may be
work queued up but not yet submitted. There is also the possiblity of
pre-emption re-ordering work after it has been submitted. Thus the last request
structure at the current moment is not necessarily the last piece of work by the
time that particular seqno has completed.

It is not possible to force the scheduler to submit all work from inside the
ring idle function as it might not be a safe place to do so. Instead, it must
simply return early if the scheduler has outstanding work and roll back as far
as releasing the driver mutex lock and the returning the system to a consistent
state.
---
 drivers/gpu/drm/i915/i915_scheduler.c   |   12 ++++++++++++
 drivers/gpu/drm/i915/i915_scheduler.h   |    1 +
 drivers/gpu/drm/i915/intel_ringbuffer.c |    8 ++++++++
 3 files changed, 21 insertions(+)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 6b6827f..6a10a76 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -165,6 +165,13 @@  int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
 	return 0;
 }
 
+bool i915_scheduler_is_idle(struct intel_engine_cs *ring)
+{
+	/* Do stuff... */
+
+	return true;
+}
+
 #else   /* CONFIG_DRM_I915_SCHEDULER */
 
 int i915_scheduler_init(struct drm_device *dev)
@@ -177,6 +184,11 @@  int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file)
 	return 0;
 }
 
+bool i915_scheduler_is_idle(struct intel_engine_cs *ring)
+{
+	return true;
+}
+
 int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
 {
 	return i915_gem_do_execbuffer_final(&qe->params);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 898d2bb..1b3d51a 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -74,6 +74,7 @@  int         i915_scheduler_closefile(struct drm_device *dev,
 				     struct drm_file *file);
 int         i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe);
 int         i915_scheduler_handle_IRQ(struct intel_engine_cs *ring);
+bool        i915_scheduler_is_idle(struct intel_engine_cs *ring);
 
 #ifdef CONFIG_DRM_I915_SCHEDULER
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1ef0cbd..1ad162b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1651,6 +1651,14 @@  int intel_ring_idle(struct intel_engine_cs *ring)
 			return ret;
 	}
 
+	/* If there is anything outstanding within the scheduler then give up
+	 * now as the submission of such work requires the mutex lock. While
+	 * the lock is definitely held at this point (i915_wait_seqno will BUG
+	 * if called without), the driver is not necessarily at a safe point
+	 * to start submitting ring work. */
+	if (!i915_scheduler_is_idle(ring))
+		return -EAGAIN;
+
 	/* Wait upon the last request to be completed */
 	if (list_empty(&ring->request_list))
 		return 0;