diff mbox

[28/38] drm/i915: Queue the idling context switch after all other timelines

Message ID 20160920083012.2754-29-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson Sept. 20, 2016, 8:30 a.m. UTC
Before suspend, we wait for the switch to the kernel context. In order
for all the other context images to be complete upon suspend, that
switch must be the last operation by the GPU (i.e. this idling request
must not overtake any pending requests). To make this request execute last,
we make it depend on every other inflight request.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c         | 12 ++++++++++++
 drivers/gpu/drm/i915/i915_gem_context.c | 21 +++++++++++++++------
 2 files changed, 27 insertions(+), 6 deletions(-)

Comments

Joonas Lahtinen Sept. 26, 2016, 8:49 a.m. UTC | #1
On ti, 2016-09-20 at 09:30 +0100, Chris Wilson wrote:
> +__maybe_unused static bool is_kernel_context(struct drm_i915_private *dev_priv)

__maybe_unused definitely most definitely does not belong here. Need
solve the GEM_BUG_ON otherwise. Also, "is_in_kernel_context" as you
have "switch_to_kernel_context" as counterpart.

> @@ -929,21 +929,30 @@ int i915_switch_context(struct drm_i915_gem_request *req)
>  int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
>  {
>  	struct intel_engine_cs *engine;
> +	struct i915_gem_timeline *timeline;
> 

Lockdep here? While we're around and add more code.

With the __maybe_unused removed;

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 768190594c4f..b10d26d3254d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4259,6 +4259,17 @@  void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
 		i915_gem_object_put(obj);
 }
 
+__maybe_unused static bool is_kernel_context(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	for_each_engine(engine, dev_priv)
+		if (engine->last_context != dev_priv->kernel_context)
+			return false;
+
+	return true;
+}
+
 int i915_gem_suspend(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4288,6 +4299,7 @@  int i915_gem_suspend(struct drm_device *dev)
 
 	i915_gem_retire_requests(dev_priv);
 
+	GEM_BUG_ON(!is_kernel_context(dev_priv));
 	i915_gem_context_lost(dev_priv);
 	mutex_unlock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 1d2ab73a8f43..fe45cd7640b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -929,21 +929,30 @@  int i915_switch_context(struct drm_i915_gem_request *req)
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
+	struct i915_gem_timeline *timeline;
 
 	for_each_engine(engine, dev_priv) {
 		struct drm_i915_gem_request *req;
 		int ret;
 
-		if (engine->last_context == NULL)
-			continue;
-
-		if (engine->last_context == dev_priv->kernel_context)
-			continue;
-
 		req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
+		/* Queue this switch after all other activity */
+		list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
+			struct drm_i915_gem_request *prev;
+			struct intel_timeline *tl;
+
+			tl = &timeline->engine[engine->id];
+			prev = i915_gem_active_raw(&tl->last_request,
+						   &dev_priv->drm.struct_mutex);
+			if (prev)
+				i915_sw_fence_await_sw_fence(&req->submit,
+							     &prev->submit,
+							     NULL, GFP_KERNEL);
+		}
+
 		ret = i915_switch_context(req);
 		i915_add_request_no_flush(req);
 		if (ret)