diff mbox

drm/i915: Split out parking from the idle worker for reuse

Message ID 20180406143339.3234-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson April 6, 2018, 2:33 p.m. UTC
We will want to park GEM before disengaging the drive^W^W^W unwedging.
Since we already do the work for idling, expose the guts as a new
function that we can then reuse.

v2: Just skip if already parked; makes it more forgiving to use by
future callers.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
Even with the follow up patch on hold, I think this will be useful when
we figure out the right order of operations in reset and stands by itself
as an improvement.

Any objections to pushing this by itself?
-Chris
---
 drivers/gpu/drm/i915/i915_gem.c | 69 ++++++++++++++++++++-------------
 1 file changed, 41 insertions(+), 28 deletions(-)

Comments

Michal Wajdeczko April 6, 2018, 3:18 p.m. UTC | #1
On Fri, 06 Apr 2018 16:33:39 +0200, Chris Wilson  
<chris@chris-wilson.co.uk> wrote:

> We will want to park GEM before disengaging the drive^W^W^W unwedging.
> Since we already do the work for idling, expose the guts as a new
> function that we can then reuse.
>
> v2: Just skip if already parked; makes it more forgiving to use by
> future callers.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
> Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
> Even with the follow up patch on hold, I think this will be useful when
> we figure out the right order of operations in reset and stands by itself
> as an improvement.
>
> Any objections to pushing this by itself?
> -Chris

I would only suggest to make this new function more symmetrical to
"mark_busy" from i915_request.c both in naming and location ;)

/michal

> ---
>  drivers/gpu/drm/i915/i915_gem.c | 69 ++++++++++++++++++++-------------
>  1 file changed, 41 insertions(+), 28 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c  
> b/drivers/gpu/drm/i915/i915_gem.c
> index 9650a7b10c5f..134529598a84 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -136,6 +136,46 @@ int i915_mutex_lock_interruptible(struct drm_device  
> *dev)
>  	return 0;
>  }
> +static u32 i915_gem_park(struct drm_i915_private *i915)
> +{
> +	lockdep_assert_held(&i915->drm.struct_mutex);
> +	GEM_BUG_ON(i915->gt.active_requests);
> +
> +	if (!i915->gt.awake)
> +		return I915_EPOCH_INVALID;
> +
> +	GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
> +
> +	/*
> +	 * Be paranoid and flush a concurrent interrupt to make sure
> +	 * we don't reactivate any irq tasklets after parking.
> +	 *
> +	 * FIXME: Note that even though we have waited for execlists to be  
> idle,
> +	 * there may still be an in-flight interrupt even though the CSB
> +	 * is now empty. synchronize_irq() makes sure that a residual interrupt
> +	 * is completed before we continue, but it doesn't prevent the HW from
> +	 * raising a spurious interrupt later. To complete the shield we should
> +	 * coordinate disabling the CS irq with flushing the interrupts.
> +	 */
> +	synchronize_irq(i915->drm.irq);
> +
> +	intel_engines_park(i915);
> +	i915_gem_timelines_park(i915);
> +
> +	i915_pmu_gt_parked(i915);
> +
> +	i915->gt.awake = false;
> +
> +	if (INTEL_GEN(i915) >= 6)
> +		gen6_rps_idle(i915);
> +
> +	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
> +
> +	intel_runtime_pm_put(i915);
> +
> +	return i915->gt.epoch;
> +}
> +
>  int
>  i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
>  			    struct drm_file *file)
> @@ -3496,36 +3536,9 @@ i915_gem_idle_work_handler(struct work_struct  
> *work)
>  	if (new_requests_since_last_retire(dev_priv))
>  		goto out_unlock;
> -	/*
> -	 * Be paranoid and flush a concurrent interrupt to make sure
> -	 * we don't reactivate any irq tasklets after parking.
> -	 *
> -	 * FIXME: Note that even though we have waited for execlists to be  
> idle,
> -	 * there may still be an in-flight interrupt even though the CSB
> -	 * is now empty. synchronize_irq() makes sure that a residual interrupt
> -	 * is completed before we continue, but it doesn't prevent the HW from
> -	 * raising a spurious interrupt later. To complete the shield we should
> -	 * coordinate disabling the CS irq with flushing the interrupts.
> -	 */
> -	synchronize_irq(dev_priv->drm.irq);
> -
> -	intel_engines_park(dev_priv);
> -	i915_gem_timelines_park(dev_priv);
> +	epoch = i915_gem_park(dev_priv);
> -	i915_pmu_gt_parked(dev_priv);
> -
> -	GEM_BUG_ON(!dev_priv->gt.awake);
> -	dev_priv->gt.awake = false;
> -	epoch = dev_priv->gt.epoch;
> -	GEM_BUG_ON(epoch == I915_EPOCH_INVALID);
>  	rearm_hangcheck = false;
> -
> -	if (INTEL_GEN(dev_priv) >= 6)
> -		gen6_rps_idle(dev_priv);
> -
> -	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
> -
> -	intel_runtime_pm_put(dev_priv);
>  out_unlock:
>  	mutex_unlock(&dev_priv->drm.struct_mutex);
Chris Wilson April 6, 2018, 3:21 p.m. UTC | #2
Quoting Michal Wajdeczko (2018-04-06 16:18:53)
> On Fri, 06 Apr 2018 16:33:39 +0200, Chris Wilson  
> <chris@chris-wilson.co.uk> wrote:
> 
> > We will want to park GEM before disengaging the drive^W^W^W unwedging.
> > Since we already do the work for idling, expose the guts as a new
> > function that we can then reuse.
> >
> > v2: Just skip if already parked; makes it more forgiving to use by
> > future callers.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
> > Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > ---
> > Even with the follow up patch on hold, I think this will be useful when
> > we figure out the right order of operations in reset and stands by itself
> > as an improvement.
> >
> > Any objections to pushing this by itself?
> > -Chris
> 
> I would only suggest to make this new function more symmetrical to
> "mark_busy" from i915_request.c both in naming and location ;)

Fine, we'll pull back unpark and export that as well!
-Chris
Chris Wilson April 6, 2018, 3:25 p.m. UTC | #3
Quoting Chris Wilson (2018-04-06 16:21:04)
> Quoting Michal Wajdeczko (2018-04-06 16:18:53)
> > On Fri, 06 Apr 2018 16:33:39 +0200, Chris Wilson  
> > <chris@chris-wilson.co.uk> wrote:
> > 
> > > We will want to park GEM before disengaging the drive^W^W^W unwedging.
> > > Since we already do the work for idling, expose the guts as a new
> > > function that we can then reuse.
> > >
> > > v2: Just skip if already parked; makes it more forgiving to use by
> > > future callers.
> > >
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
> > > Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
> > > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > > Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > > ---
> > > Even with the follow up patch on hold, I think this will be useful when
> > > we figure out the right order of operations in reset and stands by itself
> > > as an improvement.
> > >
> > > Any objections to pushing this by itself?
> > > -Chris
> > 
> > I would only suggest to make this new function more symmetrical to
> > "mark_busy" from i915_request.c both in naming and location ;)
> 
> Fine, we'll pull back unpark and export that as well!

The tricky part is trying to get the split correct; i915_request really
shouldn't be doing the GEM unpark itself, but that is, or at least was,
the convenient point to place it. The rationale for placing it there was
for autoenabling rps, but that can be rearranged now so maybe it will be
doable to push the mark_busy/unpark into the execbuf ioctl, i.e. back to
the GEM level.

Hmm.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9650a7b10c5f..134529598a84 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -136,6 +136,46 @@  int i915_mutex_lock_interruptible(struct drm_device *dev)
 	return 0;
 }
 
+static u32 i915_gem_park(struct drm_i915_private *i915)
+{
+	lockdep_assert_held(&i915->drm.struct_mutex);
+	GEM_BUG_ON(i915->gt.active_requests);
+
+	if (!i915->gt.awake)
+		return I915_EPOCH_INVALID;
+
+	GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
+
+	/*
+	 * Be paranoid and flush a concurrent interrupt to make sure
+	 * we don't reactivate any irq tasklets after parking.
+	 *
+	 * FIXME: Note that even though we have waited for execlists to be idle,
+	 * there may still be an in-flight interrupt even though the CSB
+	 * is now empty. synchronize_irq() makes sure that a residual interrupt
+	 * is completed before we continue, but it doesn't prevent the HW from
+	 * raising a spurious interrupt later. To complete the shield we should
+	 * coordinate disabling the CS irq with flushing the interrupts.
+	 */
+	synchronize_irq(i915->drm.irq);
+
+	intel_engines_park(i915);
+	i915_gem_timelines_park(i915);
+
+	i915_pmu_gt_parked(i915);
+
+	i915->gt.awake = false;
+
+	if (INTEL_GEN(i915) >= 6)
+		gen6_rps_idle(i915);
+
+	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
+
+	intel_runtime_pm_put(i915);
+
+	return i915->gt.epoch;
+}
+
 int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file)
@@ -3496,36 +3536,9 @@  i915_gem_idle_work_handler(struct work_struct *work)
 	if (new_requests_since_last_retire(dev_priv))
 		goto out_unlock;
 
-	/*
-	 * Be paranoid and flush a concurrent interrupt to make sure
-	 * we don't reactivate any irq tasklets after parking.
-	 *
-	 * FIXME: Note that even though we have waited for execlists to be idle,
-	 * there may still be an in-flight interrupt even though the CSB
-	 * is now empty. synchronize_irq() makes sure that a residual interrupt
-	 * is completed before we continue, but it doesn't prevent the HW from
-	 * raising a spurious interrupt later. To complete the shield we should
-	 * coordinate disabling the CS irq with flushing the interrupts.
-	 */
-	synchronize_irq(dev_priv->drm.irq);
-
-	intel_engines_park(dev_priv);
-	i915_gem_timelines_park(dev_priv);
+	epoch = i915_gem_park(dev_priv);
 
-	i915_pmu_gt_parked(dev_priv);
-
-	GEM_BUG_ON(!dev_priv->gt.awake);
-	dev_priv->gt.awake = false;
-	epoch = dev_priv->gt.epoch;
-	GEM_BUG_ON(epoch == I915_EPOCH_INVALID);
 	rearm_hangcheck = false;
-
-	if (INTEL_GEN(dev_priv) >= 6)
-		gen6_rps_idle(dev_priv);
-
-	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
-
-	intel_runtime_pm_put(dev_priv);
 out_unlock:
 	mutex_unlock(&dev_priv->drm.struct_mutex);