diff mbox

[29/36] drm/i915: Simplify rc6/rps enabling

Message ID 20180314093748.8541-29-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson March 14, 2018, 9:37 a.m. UTC
As we know that whenever the GT is awake, rc6 and rps are enabled (if
available), then we can remove the individual tracking and enabling to
the gen6_rps_busy/gen6_rps_idle() (now called intel_gt_pm_busy and
intel_gt_pm_idle) entry points.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c  |   6 +-
 drivers/gpu/drm/i915/i915_drv.c      |   3 -
 drivers/gpu/drm/i915/i915_drv.h      |  19 +--
 drivers/gpu/drm/i915/i915_gem.c      |  23 +--
 drivers/gpu/drm/i915/i915_request.c  |   4 +-
 drivers/gpu/drm/i915/i915_sysfs.c    |   6 +-
 drivers/gpu/drm/i915/intel_display.c |   4 +-
 drivers/gpu/drm/i915/intel_gt_pm.c   | 273 +++++++++++++----------------------
 drivers/gpu/drm/i915/intel_gt_pm.h   |   7 +-
 9 files changed, 125 insertions(+), 220 deletions(-)

Comments

sagar.a.kamble@intel.com March 16, 2018, 2:28 p.m. UTC | #1
On 3/14/2018 3:07 PM, Chris Wilson wrote:
> As we know that whenever the GT is awake, rc6 and rps are enabled (if
> available), then we can remove the individual tracking and enabling to
> the gen6_rps_busy/gen6_rps_idle() (now called intel_gt_pm_busy and
> intel_gt_pm_idle) entry points.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_debugfs.c  |   6 +-
>   drivers/gpu/drm/i915/i915_drv.c      |   3 -
>   drivers/gpu/drm/i915/i915_drv.h      |  19 +--
>   drivers/gpu/drm/i915/i915_gem.c      |  23 +--
>   drivers/gpu/drm/i915/i915_request.c  |   4 +-
>   drivers/gpu/drm/i915/i915_sysfs.c    |   6 +-
>   drivers/gpu/drm/i915/intel_display.c |   4 +-
>   drivers/gpu/drm/i915/intel_gt_pm.c   | 273 +++++++++++++----------------------
>   drivers/gpu/drm/i915/intel_gt_pm.h   |   7 +-
>   9 files changed, 125 insertions(+), 220 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index ea7a30ce53e0..cfecc2509224 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -2191,9 +2191,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
>   	struct drm_file *file;
>   
> -	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
>   	seq_printf(m, "GPU busy? %s [%d requests]\n",
>   		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
> +	seq_printf(m, "RPS active? %s\n", yesno(rps->active));
>   	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
>   	seq_printf(m, "Boosts outstanding? %d\n",
>   		   atomic_read(&rps->num_waiters));
> @@ -2226,9 +2226,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
>   		   atomic_read(&rps->boosts));
>   	mutex_unlock(&dev->filelist_mutex);
>   
> -	if (INTEL_GEN(dev_priv) >= 6 &&
> -	    rps->enabled &&
> -	    dev_priv->gt.active_requests) {
> +	if (INTEL_GEN(dev_priv) >= 6 && dev_priv->gt.awake) {
>   		u32 rpup, rpupei;
>   		u32 rpdown, rpdownei;
>   
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index 11eaaf679450..80acd0a06786 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -2575,9 +2575,6 @@ static int intel_runtime_suspend(struct device *kdev)
>   	struct drm_i915_private *dev_priv = to_i915(dev);
>   	int ret;
>   
> -	if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
> -		return -ENODEV;
> -
>   	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
>   		return -ENODEV;
>   
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 0acabfd1e3e7..0973622431bd 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -731,14 +731,10 @@ struct intel_rps_ei {
>   
>   struct intel_rps {
>   	struct mutex lock;
> -
> -	/*
> -	 * work, interrupts_enabled and pm_iir are protected by
> -	 * dev_priv->irq_lock
> -	 */
>   	struct work_struct work;
> -	bool interrupts_enabled;
> -	u32 pm_iir;
> +
> +	bool active;
> +	u32 pm_iir; /* protected by dev_priv->irq_lock */
>   
>   	/* PM interrupt bits that should never be masked */
>   	u32 pm_intrmsk_mbz;
> @@ -774,7 +770,6 @@ struct intel_rps {
>   	int last_adj;
>   	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
>   
> -	bool enabled;
>   	atomic_t num_waiters;
>   	atomic_t boosts;
>   
> @@ -783,14 +778,13 @@ struct intel_rps {
>   };
>   
>   struct intel_rc6 {
> -	bool enabled;
>   	u64 prev_hw_residency[4];
>   	u64 cur_residency[4];
>   };
>   
> -struct intel_gen6_power_mgmt {
> -	struct intel_rps rps;
> +struct intel_gt_pm {
>   	struct intel_rc6 rc6;
> +	struct intel_rps rps;
>   
>   	u32 imr;
>   	u32 ier;
> @@ -1777,8 +1771,7 @@ struct drm_i915_private {
>   	/* Cannot be determined by PCIID. You must always read a register. */
>   	u32 edram_cap;
>   
> -	/* gen6+ GT PM state */
> -	struct intel_gen6_power_mgmt gt_pm;
> +	struct intel_gt_pm gt_pm;
>   
>   	/* ilk-only ips/rps state. Everything in here is protected by the global
>   	 * mchdev_lock in intel_pm.c */
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 8a5bf1e26515..9f5b3a2a8b61 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -388,10 +388,8 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
>   	 * forcing the clocks too high for the whole system, we only allow
>   	 * each client to waitboost once in a busy period.
>   	 */
> -	if (rps_client && !i915_request_started(rq)) {
> -		if (INTEL_GEN(rq->i915) >= 6)
> -			gen6_rps_boost(rq, rps_client);
> -	}
> +	if (rps_client && !i915_request_started(rq))
> +		intel_rps_boost(rq, rps_client);
>   
>   	timeout = i915_request_wait(rq, flags, timeout);
>   
> @@ -3165,15 +3163,9 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
>   
>   	i915_gem_restore_fences(dev_priv);
>   
> -	if (dev_priv->gt_pm.rc6.enabled) {
> -		dev_priv->gt_pm.rc6.enabled = false;
> -		intel_gt_pm_enable_rc6(dev_priv);
> -	}
> -
> -	if (dev_priv->gt.awake) {
> -		if (INTEL_GEN(dev_priv) >= 6)
> -			gen6_rps_busy(dev_priv);
> -	}
> +	intel_gt_pm_enable_rc6(dev_priv);
> +	if (dev_priv->gt.awake)
> +		intel_gt_pm_busy(dev_priv);
These changes can also be skipped if patch 31 is moved ahead in queue
>   }
>   
>   void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
> @@ -3529,15 +3521,14 @@ i915_gem_idle_work_handler(struct work_struct *work)
>   
>   	i915_pmu_gt_parked(dev_priv);
>   
> +	intel_gt_pm_idle(dev_priv);
> +
>   	GEM_BUG_ON(!dev_priv->gt.awake);
>   	dev_priv->gt.awake = false;
>   	epoch = dev_priv->gt.epoch;
>   	GEM_BUG_ON(epoch == I915_EPOCH_INVALID);
>   	rearm_hangcheck = false;
>   
> -	if (INTEL_GEN(dev_priv) >= 6)
> -		gen6_rps_idle(dev_priv);
> -
>   	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
>   
>   	intel_runtime_pm_put(dev_priv);
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index 6b589cffd00e..605770191ceb 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -274,9 +274,9 @@ static void mark_busy(struct drm_i915_private *i915)
>   	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
>   		i915->gt.epoch = 1;
>   
> +	intel_gt_pm_busy(i915);
>   	i915_update_gfx_val(i915);
> -	if (INTEL_GEN(i915) >= 6)
> -		gen6_rps_busy(i915);
> +
>   	i915_pmu_gt_unparked(i915);
>   
>   	intel_engines_unpark(i915);
> diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
> index a72aab28399f..db9d55fe449b 100644
> --- a/drivers/gpu/drm/i915/i915_sysfs.c
> +++ b/drivers/gpu/drm/i915/i915_sysfs.c
> @@ -377,7 +377,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
>   			  intel_gpu_freq(dev_priv, val));
>   
>   	rps->max_freq_softlimit = val;
> -	schedule_work(&rps->work);
> +	if (rps->active)
this check can be removed as intel_rps_work checks it
> +		schedule_work(&rps->work);
>   
>   unlock:
>   	mutex_unlock(&rps->lock);
> @@ -419,7 +420,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
>   	}
>   
>   	rps->min_freq_softlimit = val;
> -	schedule_work(&rps->work);
> +	if (rps->active)
> +		schedule_work(&rps->work);
>   
>   unlock:
>   	mutex_unlock(&rps->lock);
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 00e7f61fa8df..fc1e567e253b 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -12640,7 +12640,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
>   	 * vblank without our intervention, so leave RPS alone.
>   	 */
>   	if (!i915_request_started(rq))
> -		gen6_rps_boost(rq, NULL);
> +		intel_rps_boost(rq, NULL);
>   	i915_request_put(rq);
>   
>   	drm_crtc_vblank_put(wait->crtc);
> @@ -12658,7 +12658,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
>   	if (!dma_fence_is_i915(fence))
>   		return;
>   
> -	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
> +	if (!HAS_RPS(to_i915(crtc->dev)))
>   		return;
>   
>   	if (drm_crtc_vblank_get(crtc))
> diff --git a/drivers/gpu/drm/i915/intel_gt_pm.c b/drivers/gpu/drm/i915/intel_gt_pm.c
> index 883f442ed41e..8630c30a7e48 100644
> --- a/drivers/gpu/drm/i915/intel_gt_pm.c
> +++ b/drivers/gpu/drm/i915/intel_gt_pm.c
> @@ -326,15 +326,11 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
>    */
>   static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
>   {
> -	struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -
> -	if (val != rps->cur_freq) {
> +	if (val != dev_priv->gt_pm.rps.cur_freq) {
>   		if (INTEL_GEN(dev_priv) >= 9)
> -			I915_WRITE(GEN6_RPNSWREQ,
> -				   GEN9_FREQUENCY(val));
> +			I915_WRITE(GEN6_RPNSWREQ, GEN9_FREQUENCY(val));
>   		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
> -			I915_WRITE(GEN6_RPNSWREQ,
> -				   HSW_FREQUENCY(val));
> +			I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(val));
>   		else
>   			I915_WRITE(GEN6_RPNSWREQ,
>   				   GEN6_FREQUENCY(val) |
> @@ -351,9 +347,6 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
>   	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
>   	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
>   
> -	rps->cur_freq = val;
> -	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
> -
>   	return 0;
>   }
>   
> @@ -376,48 +369,17 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
>   	gen6_set_rps_thresholds(dev_priv, val);
>   	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
>   
> -	dev_priv->gt_pm.rps.cur_freq = val;
> -	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
> -
>   	return 0;
>   }
>   
> -/*
> - * vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
> - *
> - * If Gfx is Idle, then
> - * 1. Forcewake Media well.
> - * 2. Request idle freq.
> - * 3. Release Forcewake of Media well.
> - */
> -static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
> +static int __intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
>   {
> -	struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -	u32 val = rps->idle_freq;
> -	int err;
> -
> -	if (rps->cur_freq <= val)
> -		return;
> -
> -	/*
> -	 * The punit delays the write of the frequency and voltage until it
> -	 * determines the GPU is awake. During normal usage we don't want to
> -	 * waste power changing the frequency if the GPU is sleeping (rc6).
> -	 * However, the GPU and driver is now idle and we do not want to delay
> -	 * switching to minimum voltage (reducing power whilst idle) as we do
> -	 * not expect to be woken in the near future and so must flush the
> -	 * change by waking the device.
> -	 *
> -	 * We choose to take the media powerwell (either would do to trick the
> -	 * punit into committing the voltage change) as that takes a lot less
> -	 * power than the render powerwell.
> -	 */
> -	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
> -	err = valleyview_set_rps(dev_priv, val);
> -	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
> -
> -	if (err)
> -		DRM_ERROR("Failed to set RPS for idle\n");
> +	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
> +		return valleyview_set_rps(dev_priv, val);
> +	else if (INTEL_GEN(dev_priv) >= 6)
> +		return gen6_set_rps(dev_priv, val);
> +	else
> +		return 0;
>   }
>   
>   static int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
> @@ -426,20 +388,20 @@ static int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
>   	int err;
>   
>   	lockdep_assert_held(&rps->lock);
> +	GEM_BUG_ON(!rps->active);
>   	GEM_BUG_ON(val > rps->max_freq);
>   	GEM_BUG_ON(val < rps->min_freq);
>   
> -	if (!rps->enabled) {
> +	err = __intel_set_rps(dev_priv, val);
> +	if (err)
> +		return err;
> +
> +	if (val != rps->cur_freq) {
> +		trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
>   		rps->cur_freq = val;
> -		return 0;
>   	}
>   
> -	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
> -		err = valleyview_set_rps(dev_priv, val);
> -	else
> -		err = gen6_set_rps(dev_priv, val);
> -
> -	return err;
> +	return 0;
>   }
>   
>   static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
> @@ -524,18 +486,11 @@ static void enable_rps_interrupts(struct drm_i915_private *dev_priv)
>   {
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
>   
> -	if (READ_ONCE(rps->interrupts_enabled))
> -		return;
> -
>   	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
>   		return;
>   
>   	spin_lock_irq(&dev_priv->irq_lock);
> -	WARN_ON_ONCE(rps->pm_iir);
> -	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & rps->pm_events);
> -	rps->interrupts_enabled = true;
>   	gen6_enable_pm_irq(dev_priv, rps->pm_events);
> -
>   	spin_unlock_irq(&dev_priv->irq_lock);
>   }
>   
> @@ -543,29 +498,15 @@ static void disable_rps_interrupts(struct drm_i915_private *dev_priv)
>   {
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
>   
> -	if (!READ_ONCE(rps->interrupts_enabled))
> -		return;
> -
>   	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
>   		return;
>   
>   	spin_lock_irq(&dev_priv->irq_lock);
> -	rps->interrupts_enabled = false;
> -
>   	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
> -
>   	gen6_disable_pm_irq(dev_priv, rps->pm_events);
> -
>   	spin_unlock_irq(&dev_priv->irq_lock);
> -	synchronize_irq(dev_priv->drm.irq);
>   
> -	/* Now that we will not be generating any more work, flush any
> -	 * outstanding tasks. As we are called on the RPS idle path,
> -	 * we will reset the GPU to minimum frequencies, so the current
> -	 * state of the worker can be discarded.
> -	 */
> -	cancel_work_sync(&rps->work);
> -	gen6_reset_rps_interrupts(dev_priv);
> +	synchronize_irq(dev_priv->drm.irq);
>   }
>   
>   static void vlv_c0_read(struct drm_i915_private *dev_priv,
> @@ -632,6 +573,9 @@ static void intel_rps_work(struct work_struct *work)
>   
>   	mutex_lock(&rps->lock);
>   
> +	if (!rps->active)
> +		goto unlock;
> +
>   	min = rps->min_freq_softlimit;
>   	max = rps->max_freq_softlimit;
>   	if (client_boost && max < rps->boost_freq)
> @@ -680,107 +624,125 @@ static void intel_rps_work(struct work_struct *work)
>   		adj = 0;
>   	}
>   
> -	mutex_unlock(&rps->lock);
> -
>   	if (pm_iir) {
>   		spin_lock_irq(&i915->irq_lock);
> -		if (rps->interrupts_enabled)
> -			gen6_unmask_pm_irq(i915, rps->pm_events);
> +		gen6_unmask_pm_irq(i915, rps->pm_events);
>   		spin_unlock_irq(&i915->irq_lock);
>   		rps->last_adj = adj;
>   	}
> +
> +unlock:
> +	mutex_unlock(&rps->lock);
>   }
>   
>   void intel_gt_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
>   {
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
>   
> -	if (pm_iir & rps->pm_events) {
> +	if (rps->active && pm_iir & rps->pm_events) {
rps->active is updated under struct_mutex rps->lock so i think it will 
not be synchronized properly
>   		spin_lock(&dev_priv->irq_lock);
>   		gen6_mask_pm_irq(dev_priv, pm_iir & rps->pm_events);
> -		if (rps->interrupts_enabled) {
> -			rps->pm_iir |= pm_iir & rps->pm_events;
> -			schedule_work(&rps->work);
> -		}
> +		rps->pm_iir |= pm_iir & rps->pm_events;
>   		spin_unlock(&dev_priv->irq_lock);
> +
> +		schedule_work(&rps->work);
>   	}
>   }
>   
> -void gen6_rps_busy(struct drm_i915_private *dev_priv)
> +void intel_gt_pm_busy(struct drm_i915_private *dev_priv)
>   {
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
> +	u8 freq;
>   
>   	if (!HAS_RPS(dev_priv))
>   		return;
>   
> -	mutex_lock(&rps->lock);
> -	if (rps->enabled) {
> -		u8 freq;
> +	GEM_BUG_ON(rps->pm_iir);
> +	GEM_BUG_ON(rps->active);
this BUG_ON should move under rps->lock
>   
> -		I915_WRITE(GEN6_PMINTRMSK,
> -			   gen6_rps_pm_mask(dev_priv, rps->cur_freq));
> +	mutex_lock(&rps->lock);
> +	rps->active = true;
>   
> -		enable_rps_interrupts(dev_priv);
> -		memset(&rps->ei, 0, sizeof(rps->ei));
> +	/*
> +	 * Use the user's desired frequency as a guide, but for better
> +	 * performance, jump directly to RPe as our starting frequency.
> +	 */
> +	freq = max(rps->cur_freq, rps->efficient_freq);
> +	if (intel_set_rps(dev_priv,
> +			  clamp(freq,
> +				rps->min_freq_softlimit,
> +				rps->max_freq_softlimit)))
> +		DRM_DEBUG_DRIVER("Failed to set busy frequency\n");
>   
> -		/*
> -		 * Use the user's desired frequency as a guide, but for better
> -		 * performance, jump directly to RPe as our starting frequency.
> -		 */
> -		freq = max(rps->cur_freq,
> -			   rps->efficient_freq);
> +	rps->last_adj = 0;
>   
> -		if (intel_set_rps(dev_priv,
> -				  clamp(freq,
> -					rps->min_freq_softlimit,
> -					rps->max_freq_softlimit)))
> -			DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
> +	if (INTEL_GEN(dev_priv) >= 6) {
> +		memset(&rps->ei, 0, sizeof(rps->ei));
> +		enable_rps_interrupts(dev_priv);
>   	}
> +
>   	mutex_unlock(&rps->lock);
>   }
>   
> -void gen6_rps_idle(struct drm_i915_private *dev_priv)
> +void intel_gt_pm_idle(struct drm_i915_private *dev_priv)
>   {
>   	struct intel_rps *rps = &dev_priv->gt_pm.rps;
>   
> -	if (!HAS_RPS(dev_priv))
> +	if (!rps->active)
this too
>   		return;
>   
> -	/*
> -	 * Flush our bottom-half so that it does not race with us
> -	 * setting the idle frequency and so that it is bounded by
> -	 * our rpm wakeref. And then disable the interrupts to stop any
> -	 * futher RPS reclocking whilst we are asleep.
> -	 */
> +	mutex_lock(&rps->lock);
> +
>   	disable_rps_interrupts(dev_priv);
>   
this is not protected by INTEL_GEN() >=6 check.
Other than this changes look good to me.
> -	mutex_lock(&rps->lock);
> -	if (rps->enabled) {
> -		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
> -			vlv_set_rps_idle(dev_priv);
> -		else
> -			gen6_set_rps(dev_priv, rps->idle_freq);
> -		rps->last_adj = 0;
> +	if (rps->cur_freq > rps->idle_freq) {
> +		/*
> +		 * The punit delays the write of the frequency and voltage
> +		 * until it determines the GPU is awake. During normal usage we
> +		 * don't want to waste power changing the frequency if the GPU
> +		 * is sleeping (rc6).  However, the GPU and driver is now idle
> +		 * and we do not want to delay switching to minimum voltage
> +		 * (reducing power whilst idle) as we do not expect to be woken
> +		 * in the near future and so must flush the change by waking
> +		 * the device.
> +		 *
> +		 * We choose to take the media powerwell (either would do to
> +		 * trick the punit into committing the voltage change) as that
> +		 * takes a lot less power than the render powerwell.
> +		 */
> +		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
> +		if (__intel_set_rps(dev_priv, rps->idle_freq))
> +			DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
> +		rps->cur_freq = rps->idle_freq;
> +		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
> +	}
> +
> +	if (INTEL_GEN(dev_priv) >= 6) {
>   		I915_WRITE(GEN6_PMINTRMSK,
>   			   gen6_sanitize_rps_pm_mask(dev_priv, ~0));
>   	}
> +
> +	rps->last_adj = 0;
> +	rps->active = false;
>   	mutex_unlock(&rps->lock);
> +
> +	/*
> +	 * Now that we will not be generating any more work, flush any
> +	 * outstanding tasks. As we are called on the RPS idle path,
> +	 * we will reset the GPU to minimum frequencies, so the current
> +	 * state of the worker can be discarded.
> +	 */
> +	cancel_work_sync(&rps->work);
> +	gen6_reset_rps_interrupts(dev_priv);
>   }
>   
> -void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *client)
> +void intel_rps_boost(struct i915_request *rq, struct intel_rps_client *client)
>   {
>   	struct intel_rps *rps = &rq->i915->gt_pm.rps;
>   	unsigned long flags;
>   	bool boost;
>   
> -	if (!HAS_RPS(rq->i915))
> -		return;
> -
> -	/*
> -	 * This is intentionally racy! We peek at the state here, then
> -	 * validate inside the RPS worker.
> -	 */
> -	if (!rps->enabled)
> +	if (!READ_ONCE(rps->active))
>   		return;
>   
>   	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
> @@ -992,20 +954,6 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
>   	}
>   }
>   
> -static void reset_rps(struct drm_i915_private *dev_priv,
> -		      int (*set)(struct drm_i915_private *, u8))
> -{
> -	struct intel_rps *rps = &dev_priv->gt_pm.rps;
> -	u8 freq = rps->cur_freq;
> -
> -	/* force a reset */
> -	rps->power = -1;
> -	rps->cur_freq = -1;
> -
> -	if (set(dev_priv, freq))
> -		DRM_ERROR("Failed to reset RPS to initial values\n");
> -}
> -
>   /* See the Gen9_GT_PM_Programming_Guide doc for the below */
>   static void gen9_enable_rps(struct drm_i915_private *dev_priv)
>   {
> @@ -1027,7 +975,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
>   	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
>   	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers.
>   	 */
> -	reset_rps(dev_priv, gen6_set_rps);
>   
>   	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
>   }
> @@ -1197,8 +1144,6 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
>   		   GEN6_RP_UP_BUSY_AVG |
>   		   GEN6_RP_DOWN_IDLE_AVG);
>   
> -	reset_rps(dev_priv, gen6_set_rps);
> -
>   	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
>   }
>   
> @@ -1298,8 +1243,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
>   	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
>   	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
>   
> -	reset_rps(dev_priv, gen6_set_rps);
> -
>   	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
>   }
>   
> @@ -1813,8 +1756,6 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
>   	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
>   	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
>   
> -	reset_rps(dev_priv, valleyview_set_rps);
> -
>   	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
>   }
>   
> @@ -1899,8 +1840,6 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
>   	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
>   	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
>   
> -	reset_rps(dev_priv, valleyview_set_rps);
> -
>   	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
>   }
>   
> @@ -2385,10 +2324,7 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
>   
>   void intel_gt_pm_sanitize(struct drm_i915_private *dev_priv)
>   {
> -	dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
>   	intel_gt_pm_disable_rps(dev_priv);
> -
> -	dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
>   	intel_gt_pm_disable_rc6(dev_priv);
>   
>   	if (INTEL_GEN(dev_priv) < 11)
> @@ -2487,9 +2423,6 @@ static void __enable_rc6(struct drm_i915_private *dev_priv)
>   {
>   	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
>   
> -	if (dev_priv->gt_pm.rc6.enabled)
> -		return;
> -
>   	if (IS_CHERRYVIEW(dev_priv))
>   		cherryview_enable_rc6(dev_priv);
>   	else if (IS_VALLEYVIEW(dev_priv))
> @@ -2500,8 +2433,6 @@ static void __enable_rc6(struct drm_i915_private *dev_priv)
>   		gen8_enable_rc6(dev_priv);
>   	else if (INTEL_GEN(dev_priv) >= 6)
>   		gen6_enable_rc6(dev_priv);
> -
> -	dev_priv->gt_pm.rc6.enabled = true;
>   }
>   
>   static void __enable_rps(struct drm_i915_private *dev_priv)
> @@ -2510,9 +2441,6 @@ static void __enable_rps(struct drm_i915_private *dev_priv)
>   
>   	lockdep_assert_held(&rps->lock);
>   
> -	if (rps->enabled)
> -		return;
> -
>   	if (IS_CHERRYVIEW(dev_priv)) {
>   		cherryview_enable_rps(dev_priv);
>   	} else if (IS_VALLEYVIEW(dev_priv)) {
> @@ -2536,7 +2464,12 @@ static void __enable_rps(struct drm_i915_private *dev_priv)
>   	WARN_ON(rps->efficient_freq < rps->min_freq);
>   	WARN_ON(rps->efficient_freq > rps->max_freq);
>   
> -	rps->enabled = true;
> +	/* Force a reset */
> +	rps->cur_freq = rps->max_freq;
> +	rps->power = -1;
> +	__intel_set_rps(dev_priv, rps->idle_freq);
> +
> +	rps->cur_freq = rps->idle_freq;
>   }
>   
>   void intel_gt_pm_enable_rc6(struct drm_i915_private *dev_priv)
> @@ -2563,9 +2496,6 @@ static void __disable_rc6(struct drm_i915_private *dev_priv)
>   {
>   	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
>   
> -	if (!dev_priv->gt_pm.rc6.enabled)
> -		return;
> -
>   	if (INTEL_GEN(dev_priv) >= 9)
>   		gen9_disable_rc6(dev_priv);
>   	else if (IS_CHERRYVIEW(dev_priv))
> @@ -2574,8 +2504,6 @@ static void __disable_rc6(struct drm_i915_private *dev_priv)
>   		valleyview_disable_rc6(dev_priv);
>   	else if (INTEL_GEN(dev_priv) >= 6)
>   		gen6_disable_rc6(dev_priv);
> -
> -	dev_priv->gt_pm.rc6.enabled = false;
>   }
>   
>   void intel_gt_pm_disable_rc6(struct drm_i915_private *dev_priv)
> @@ -2589,9 +2517,6 @@ static void __disable_rps(struct drm_i915_private *dev_priv)
>   {
>   	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
>   
> -	if (!dev_priv->gt_pm.rps.enabled)
> -		return;
> -
>   	if (INTEL_GEN(dev_priv) >= 9)
>   		gen9_disable_rps(dev_priv);
>   	else if (IS_CHERRYVIEW(dev_priv))
> @@ -2602,8 +2527,6 @@ static void __disable_rps(struct drm_i915_private *dev_priv)
>   		gen6_disable_rps(dev_priv);
>   	else if (INTEL_GEN(dev_priv) >= 5)
>   		ironlake_disable_drps(dev_priv);
> -
> -	dev_priv->gt_pm.rps.enabled = false;
>   }
>   
>   void intel_gt_pm_disable_rps(struct drm_i915_private *dev_priv)
> diff --git a/drivers/gpu/drm/i915/intel_gt_pm.h b/drivers/gpu/drm/i915/intel_gt_pm.h
> index 5975c63f46bf..314912c15126 100644
> --- a/drivers/gpu/drm/i915/intel_gt_pm.h
> +++ b/drivers/gpu/drm/i915/intel_gt_pm.h
> @@ -42,11 +42,12 @@ void intel_gt_pm_disable_rps(struct drm_i915_private *dev_priv);
>   void intel_gt_pm_enable_rc6(struct drm_i915_private *dev_priv);
>   void intel_gt_pm_disable_rc6(struct drm_i915_private *dev_priv);
>   
> +void intel_gt_pm_busy(struct drm_i915_private *dev_priv);
> +void intel_gt_pm_idle(struct drm_i915_private *dev_priv);
> +
>   void intel_gt_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
>   
> -void gen6_rps_busy(struct drm_i915_private *dev_priv);
> -void gen6_rps_idle(struct drm_i915_private *dev_priv);
> -void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
> +void intel_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
>   
>   int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
>   int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
Chris Wilson April 10, 2018, 12:45 p.m. UTC | #2
Quoting Sagar Arun Kamble (2018-03-16 14:28:27)
> 
> 
> On 3/14/2018 3:07 PM, Chris Wilson wrote:
> >   void intel_gt_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
> >   {
> >       struct intel_rps *rps = &dev_priv->gt_pm.rps;
> >   
> > -     if (pm_iir & rps->pm_events) {
> > +     if (rps->active && pm_iir & rps->pm_events) {
> rps->active is updated under struct_mutex rps->lock so i think it will 
> not be synchronized properly

It's an optimistic read, later on inside the worker is where we do the
check. On the enable path, it doesn't matter as we don't care about the
early interrupt, there will be more and we want to set our own
frequency; on the disable path the interrupt is serialised.

> >               spin_lock(&dev_priv->irq_lock);
> >               gen6_mask_pm_irq(dev_priv, pm_iir & rps->pm_events);
> > -             if (rps->interrupts_enabled) {
> > -                     rps->pm_iir |= pm_iir & rps->pm_events;
> > -                     schedule_work(&rps->work);
> > -             }
> > +             rps->pm_iir |= pm_iir & rps->pm_events;
> >               spin_unlock(&dev_priv->irq_lock);
> > +
> > +             schedule_work(&rps->work);
> >       }
> >   }
> >   
> > -void gen6_rps_busy(struct drm_i915_private *dev_priv)
> > +void intel_gt_pm_busy(struct drm_i915_private *dev_priv)
> >   {
> >       struct intel_rps *rps = &dev_priv->gt_pm.rps;
> > +     u8 freq;
> >   
> >       if (!HAS_RPS(dev_priv))
> >               return;
> >   
> > -     mutex_lock(&rps->lock);
> > -     if (rps->enabled) {
> > -             u8 freq;
> > +     GEM_BUG_ON(rps->pm_iir);
> > +     GEM_BUG_ON(rps->active);
> this BUG_ON should move under rps->lock

It's sufficiently serialised by the caller.

> >   
> > -             I915_WRITE(GEN6_PMINTRMSK,
> > -                        gen6_rps_pm_mask(dev_priv, rps->cur_freq));
> > +     mutex_lock(&rps->lock);
> > +     rps->active = true;
> >   
> > -             enable_rps_interrupts(dev_priv);
> > -             memset(&rps->ei, 0, sizeof(rps->ei));
> > +     /*
> > +      * Use the user's desired frequency as a guide, but for better
> > +      * performance, jump directly to RPe as our starting frequency.
> > +      */
> > +     freq = max(rps->cur_freq, rps->efficient_freq);
> > +     if (intel_set_rps(dev_priv,
> > +                       clamp(freq,
> > +                             rps->min_freq_softlimit,
> > +                             rps->max_freq_softlimit)))
> > +             DRM_DEBUG_DRIVER("Failed to set busy frequency\n");
> >   
> > -             /*
> > -              * Use the user's desired frequency as a guide, but for better
> > -              * performance, jump directly to RPe as our starting frequency.
> > -              */
> > -             freq = max(rps->cur_freq,
> > -                        rps->efficient_freq);
> > +     rps->last_adj = 0;
> >   
> > -             if (intel_set_rps(dev_priv,
> > -                               clamp(freq,
> > -                                     rps->min_freq_softlimit,
> > -                                     rps->max_freq_softlimit)))
> > -                     DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
> > +     if (INTEL_GEN(dev_priv) >= 6) {
> > +             memset(&rps->ei, 0, sizeof(rps->ei));
> > +             enable_rps_interrupts(dev_priv);
> >       }
> > +
> >       mutex_unlock(&rps->lock);
> >   }
> >   
> > -void gen6_rps_idle(struct drm_i915_private *dev_priv)
> > +void intel_gt_pm_idle(struct drm_i915_private *dev_priv)
> >   {
> >       struct intel_rps *rps = &dev_priv->gt_pm.rps;
> >   
> > -     if (!HAS_RPS(dev_priv))
> > +     if (!rps->active)
> this too

Again, serialised by the caller. This is important later...

> >               return;
> >   
> > -     /*
> > -      * Flush our bottom-half so that it does not race with us
> > -      * setting the idle frequency and so that it is bounded by
> > -      * our rpm wakeref. And then disable the interrupts to stop any
> > -      * futher RPS reclocking whilst we are asleep.
> > -      */
> > +     mutex_lock(&rps->lock);
> > +
> >       disable_rps_interrupts(dev_priv);
> >   
> this is not protected by INTEL_GEN() >=6 check.

We don't guard this for it has to handle all gen.
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ea7a30ce53e0..cfecc2509224 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2191,9 +2191,9 @@  static int i915_rps_boost_info(struct seq_file *m, void *data)
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 	struct drm_file *file;
 
-	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
 	seq_printf(m, "GPU busy? %s [%d requests]\n",
 		   yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
+	seq_printf(m, "RPS active? %s\n", yesno(rps->active));
 	seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
 	seq_printf(m, "Boosts outstanding? %d\n",
 		   atomic_read(&rps->num_waiters));
@@ -2226,9 +2226,7 @@  static int i915_rps_boost_info(struct seq_file *m, void *data)
 		   atomic_read(&rps->boosts));
 	mutex_unlock(&dev->filelist_mutex);
 
-	if (INTEL_GEN(dev_priv) >= 6 &&
-	    rps->enabled &&
-	    dev_priv->gt.active_requests) {
+	if (INTEL_GEN(dev_priv) >= 6 && dev_priv->gt.awake) {
 		u32 rpup, rpupei;
 		u32 rpdown, rpdownei;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 11eaaf679450..80acd0a06786 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2575,9 +2575,6 @@  static int intel_runtime_suspend(struct device *kdev)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	int ret;
 
-	if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
-		return -ENODEV;
-
 	if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
 		return -ENODEV;
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0acabfd1e3e7..0973622431bd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -731,14 +731,10 @@  struct intel_rps_ei {
 
 struct intel_rps {
 	struct mutex lock;
-
-	/*
-	 * work, interrupts_enabled and pm_iir are protected by
-	 * dev_priv->irq_lock
-	 */
 	struct work_struct work;
-	bool interrupts_enabled;
-	u32 pm_iir;
+
+	bool active;
+	u32 pm_iir; /* protected by dev_priv->irq_lock */
 
 	/* PM interrupt bits that should never be masked */
 	u32 pm_intrmsk_mbz;
@@ -774,7 +770,6 @@  struct intel_rps {
 	int last_adj;
 	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
 
-	bool enabled;
 	atomic_t num_waiters;
 	atomic_t boosts;
 
@@ -783,14 +778,13 @@  struct intel_rps {
 };
 
 struct intel_rc6 {
-	bool enabled;
 	u64 prev_hw_residency[4];
 	u64 cur_residency[4];
 };
 
-struct intel_gen6_power_mgmt {
-	struct intel_rps rps;
+struct intel_gt_pm {
 	struct intel_rc6 rc6;
+	struct intel_rps rps;
 
 	u32 imr;
 	u32 ier;
@@ -1777,8 +1771,7 @@  struct drm_i915_private {
 	/* Cannot be determined by PCIID. You must always read a register. */
 	u32 edram_cap;
 
-	/* gen6+ GT PM state */
-	struct intel_gen6_power_mgmt gt_pm;
+	struct intel_gt_pm gt_pm;
 
 	/* ilk-only ips/rps state. Everything in here is protected by the global
 	 * mchdev_lock in intel_pm.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8a5bf1e26515..9f5b3a2a8b61 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -388,10 +388,8 @@  i915_gem_object_wait_fence(struct dma_fence *fence,
 	 * forcing the clocks too high for the whole system, we only allow
 	 * each client to waitboost once in a busy period.
 	 */
-	if (rps_client && !i915_request_started(rq)) {
-		if (INTEL_GEN(rq->i915) >= 6)
-			gen6_rps_boost(rq, rps_client);
-	}
+	if (rps_client && !i915_request_started(rq))
+		intel_rps_boost(rq, rps_client);
 
 	timeout = i915_request_wait(rq, flags, timeout);
 
@@ -3165,15 +3163,9 @@  void i915_gem_reset(struct drm_i915_private *dev_priv)
 
 	i915_gem_restore_fences(dev_priv);
 
-	if (dev_priv->gt_pm.rc6.enabled) {
-		dev_priv->gt_pm.rc6.enabled = false;
-		intel_gt_pm_enable_rc6(dev_priv);
-	}
-
-	if (dev_priv->gt.awake) {
-		if (INTEL_GEN(dev_priv) >= 6)
-			gen6_rps_busy(dev_priv);
-	}
+	intel_gt_pm_enable_rc6(dev_priv);
+	if (dev_priv->gt.awake)
+		intel_gt_pm_busy(dev_priv);
 }
 
 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
@@ -3529,15 +3521,14 @@  i915_gem_idle_work_handler(struct work_struct *work)
 
 	i915_pmu_gt_parked(dev_priv);
 
+	intel_gt_pm_idle(dev_priv);
+
 	GEM_BUG_ON(!dev_priv->gt.awake);
 	dev_priv->gt.awake = false;
 	epoch = dev_priv->gt.epoch;
 	GEM_BUG_ON(epoch == I915_EPOCH_INVALID);
 	rearm_hangcheck = false;
 
-	if (INTEL_GEN(dev_priv) >= 6)
-		gen6_rps_idle(dev_priv);
-
 	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
 
 	intel_runtime_pm_put(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 6b589cffd00e..605770191ceb 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -274,9 +274,9 @@  static void mark_busy(struct drm_i915_private *i915)
 	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
 		i915->gt.epoch = 1;
 
+	intel_gt_pm_busy(i915);
 	i915_update_gfx_val(i915);
-	if (INTEL_GEN(i915) >= 6)
-		gen6_rps_busy(i915);
+
 	i915_pmu_gt_unparked(i915);
 
 	intel_engines_unpark(i915);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index a72aab28399f..db9d55fe449b 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -377,7 +377,8 @@  static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 			  intel_gpu_freq(dev_priv, val));
 
 	rps->max_freq_softlimit = val;
-	schedule_work(&rps->work);
+	if (rps->active)
+		schedule_work(&rps->work);
 
 unlock:
 	mutex_unlock(&rps->lock);
@@ -419,7 +420,8 @@  static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 	}
 
 	rps->min_freq_softlimit = val;
-	schedule_work(&rps->work);
+	if (rps->active)
+		schedule_work(&rps->work);
 
 unlock:
 	mutex_unlock(&rps->lock);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 00e7f61fa8df..fc1e567e253b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12640,7 +12640,7 @@  static int do_rps_boost(struct wait_queue_entry *_wait,
 	 * vblank without our intervention, so leave RPS alone.
 	 */
 	if (!i915_request_started(rq))
-		gen6_rps_boost(rq, NULL);
+		intel_rps_boost(rq, NULL);
 	i915_request_put(rq);
 
 	drm_crtc_vblank_put(wait->crtc);
@@ -12658,7 +12658,7 @@  static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
 	if (!dma_fence_is_i915(fence))
 		return;
 
-	if (INTEL_GEN(to_i915(crtc->dev)) < 6)
+	if (!HAS_RPS(to_i915(crtc->dev)))
 		return;
 
 	if (drm_crtc_vblank_get(crtc))
diff --git a/drivers/gpu/drm/i915/intel_gt_pm.c b/drivers/gpu/drm/i915/intel_gt_pm.c
index 883f442ed41e..8630c30a7e48 100644
--- a/drivers/gpu/drm/i915/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/intel_gt_pm.c
@@ -326,15 +326,11 @@  static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
  */
 static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
-	if (val != rps->cur_freq) {
+	if (val != dev_priv->gt_pm.rps.cur_freq) {
 		if (INTEL_GEN(dev_priv) >= 9)
-			I915_WRITE(GEN6_RPNSWREQ,
-				   GEN9_FREQUENCY(val));
+			I915_WRITE(GEN6_RPNSWREQ, GEN9_FREQUENCY(val));
 		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
-			I915_WRITE(GEN6_RPNSWREQ,
-				   HSW_FREQUENCY(val));
+			I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(val));
 		else
 			I915_WRITE(GEN6_RPNSWREQ,
 				   GEN6_FREQUENCY(val) |
@@ -351,9 +347,6 @@  static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
 	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-	rps->cur_freq = val;
-	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
 	return 0;
 }
 
@@ -376,48 +369,17 @@  static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
 	gen6_set_rps_thresholds(dev_priv, val);
 	I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
 
-	dev_priv->gt_pm.rps.cur_freq = val;
-	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
 	return 0;
 }
 
-/*
- * vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
- *
- * If Gfx is Idle, then
- * 1. Forcewake Media well.
- * 2. Request idle freq.
- * 3. Release Forcewake of Media well.
- */
-static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
+static int __intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
 {
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u32 val = rps->idle_freq;
-	int err;
-
-	if (rps->cur_freq <= val)
-		return;
-
-	/*
-	 * The punit delays the write of the frequency and voltage until it
-	 * determines the GPU is awake. During normal usage we don't want to
-	 * waste power changing the frequency if the GPU is sleeping (rc6).
-	 * However, the GPU and driver is now idle and we do not want to delay
-	 * switching to minimum voltage (reducing power whilst idle) as we do
-	 * not expect to be woken in the near future and so must flush the
-	 * change by waking the device.
-	 *
-	 * We choose to take the media powerwell (either would do to trick the
-	 * punit into committing the voltage change) as that takes a lot less
-	 * power than the render powerwell.
-	 */
-	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
-	err = valleyview_set_rps(dev_priv, val);
-	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
-
-	if (err)
-		DRM_ERROR("Failed to set RPS for idle\n");
+	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+		return valleyview_set_rps(dev_priv, val);
+	else if (INTEL_GEN(dev_priv) >= 6)
+		return gen6_set_rps(dev_priv, val);
+	else
+		return 0;
 }
 
 static int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
@@ -426,20 +388,20 @@  static int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
 	int err;
 
 	lockdep_assert_held(&rps->lock);
+	GEM_BUG_ON(!rps->active);
 	GEM_BUG_ON(val > rps->max_freq);
 	GEM_BUG_ON(val < rps->min_freq);
 
-	if (!rps->enabled) {
+	err = __intel_set_rps(dev_priv, val);
+	if (err)
+		return err;
+
+	if (val != rps->cur_freq) {
+		trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 		rps->cur_freq = val;
-		return 0;
 	}
 
-	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-		err = valleyview_set_rps(dev_priv, val);
-	else
-		err = gen6_set_rps(dev_priv, val);
-
-	return err;
+	return 0;
 }
 
 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
@@ -524,18 +486,11 @@  static void enable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
-	if (READ_ONCE(rps->interrupts_enabled))
-		return;
-
 	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
 		return;
 
 	spin_lock_irq(&dev_priv->irq_lock);
-	WARN_ON_ONCE(rps->pm_iir);
-	WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & rps->pm_events);
-	rps->interrupts_enabled = true;
 	gen6_enable_pm_irq(dev_priv, rps->pm_events);
-
 	spin_unlock_irq(&dev_priv->irq_lock);
 }
 
@@ -543,29 +498,15 @@  static void disable_rps_interrupts(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
-	if (!READ_ONCE(rps->interrupts_enabled))
-		return;
-
 	if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
 		return;
 
 	spin_lock_irq(&dev_priv->irq_lock);
-	rps->interrupts_enabled = false;
-
 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
-
 	gen6_disable_pm_irq(dev_priv, rps->pm_events);
-
 	spin_unlock_irq(&dev_priv->irq_lock);
-	synchronize_irq(dev_priv->drm.irq);
 
-	/* Now that we will not be generating any more work, flush any
-	 * outstanding tasks. As we are called on the RPS idle path,
-	 * we will reset the GPU to minimum frequencies, so the current
-	 * state of the worker can be discarded.
-	 */
-	cancel_work_sync(&rps->work);
-	gen6_reset_rps_interrupts(dev_priv);
+	synchronize_irq(dev_priv->drm.irq);
 }
 
 static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -632,6 +573,9 @@  static void intel_rps_work(struct work_struct *work)
 
 	mutex_lock(&rps->lock);
 
+	if (!rps->active)
+		goto unlock;
+
 	min = rps->min_freq_softlimit;
 	max = rps->max_freq_softlimit;
 	if (client_boost && max < rps->boost_freq)
@@ -680,107 +624,125 @@  static void intel_rps_work(struct work_struct *work)
 		adj = 0;
 	}
 
-	mutex_unlock(&rps->lock);
-
 	if (pm_iir) {
 		spin_lock_irq(&i915->irq_lock);
-		if (rps->interrupts_enabled)
-			gen6_unmask_pm_irq(i915, rps->pm_events);
+		gen6_unmask_pm_irq(i915, rps->pm_events);
 		spin_unlock_irq(&i915->irq_lock);
 		rps->last_adj = adj;
 	}
+
+unlock:
+	mutex_unlock(&rps->lock);
 }
 
 void intel_gt_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
-	if (pm_iir & rps->pm_events) {
+	if (rps->active && pm_iir & rps->pm_events) {
 		spin_lock(&dev_priv->irq_lock);
 		gen6_mask_pm_irq(dev_priv, pm_iir & rps->pm_events);
-		if (rps->interrupts_enabled) {
-			rps->pm_iir |= pm_iir & rps->pm_events;
-			schedule_work(&rps->work);
-		}
+		rps->pm_iir |= pm_iir & rps->pm_events;
 		spin_unlock(&dev_priv->irq_lock);
+
+		schedule_work(&rps->work);
 	}
 }
 
-void gen6_rps_busy(struct drm_i915_private *dev_priv)
+void intel_gt_pm_busy(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
+	u8 freq;
 
 	if (!HAS_RPS(dev_priv))
 		return;
 
-	mutex_lock(&rps->lock);
-	if (rps->enabled) {
-		u8 freq;
+	GEM_BUG_ON(rps->pm_iir);
+	GEM_BUG_ON(rps->active);
 
-		I915_WRITE(GEN6_PMINTRMSK,
-			   gen6_rps_pm_mask(dev_priv, rps->cur_freq));
+	mutex_lock(&rps->lock);
+	rps->active = true;
 
-		enable_rps_interrupts(dev_priv);
-		memset(&rps->ei, 0, sizeof(rps->ei));
+	/*
+	 * Use the user's desired frequency as a guide, but for better
+	 * performance, jump directly to RPe as our starting frequency.
+	 */
+	freq = max(rps->cur_freq, rps->efficient_freq);
+	if (intel_set_rps(dev_priv,
+			  clamp(freq,
+				rps->min_freq_softlimit,
+				rps->max_freq_softlimit)))
+		DRM_DEBUG_DRIVER("Failed to set busy frequency\n");
 
-		/*
-		 * Use the user's desired frequency as a guide, but for better
-		 * performance, jump directly to RPe as our starting frequency.
-		 */
-		freq = max(rps->cur_freq,
-			   rps->efficient_freq);
+	rps->last_adj = 0;
 
-		if (intel_set_rps(dev_priv,
-				  clamp(freq,
-					rps->min_freq_softlimit,
-					rps->max_freq_softlimit)))
-			DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
+	if (INTEL_GEN(dev_priv) >= 6) {
+		memset(&rps->ei, 0, sizeof(rps->ei));
+		enable_rps_interrupts(dev_priv);
 	}
+
 	mutex_unlock(&rps->lock);
 }
 
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
+void intel_gt_pm_idle(struct drm_i915_private *dev_priv)
 {
 	struct intel_rps *rps = &dev_priv->gt_pm.rps;
 
-	if (!HAS_RPS(dev_priv))
+	if (!rps->active)
 		return;
 
-	/*
-	 * Flush our bottom-half so that it does not race with us
-	 * setting the idle frequency and so that it is bounded by
-	 * our rpm wakeref. And then disable the interrupts to stop any
-	 * futher RPS reclocking whilst we are asleep.
-	 */
+	mutex_lock(&rps->lock);
+
 	disable_rps_interrupts(dev_priv);
 
-	mutex_lock(&rps->lock);
-	if (rps->enabled) {
-		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-			vlv_set_rps_idle(dev_priv);
-		else
-			gen6_set_rps(dev_priv, rps->idle_freq);
-		rps->last_adj = 0;
+	if (rps->cur_freq > rps->idle_freq) {
+		/*
+		 * The punit delays the write of the frequency and voltage
+		 * until it determines the GPU is awake. During normal usage we
+		 * don't want to waste power changing the frequency if the GPU
+		 * is sleeping (rc6).  However, the GPU and driver is now idle
+		 * and we do not want to delay switching to minimum voltage
+		 * (reducing power whilst idle) as we do not expect to be woken
+		 * in the near future and so must flush the change by waking
+		 * the device.
+		 *
+		 * We choose to take the media powerwell (either would do to
+		 * trick the punit into committing the voltage change) as that
+		 * takes a lot less power than the render powerwell.
+		 */
+		intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+		if (__intel_set_rps(dev_priv, rps->idle_freq))
+			DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
+		rps->cur_freq = rps->idle_freq;
+		intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+	}
+
+	if (INTEL_GEN(dev_priv) >= 6) {
 		I915_WRITE(GEN6_PMINTRMSK,
 			   gen6_sanitize_rps_pm_mask(dev_priv, ~0));
 	}
+
+	rps->last_adj = 0;
+	rps->active = false;
 	mutex_unlock(&rps->lock);
+
+	/*
+	 * Now that we will not be generating any more work, flush any
+	 * outstanding tasks. As we are called on the RPS idle path,
+	 * we will reset the GPU to minimum frequencies, so the current
+	 * state of the worker can be discarded.
+	 */
+	cancel_work_sync(&rps->work);
+	gen6_reset_rps_interrupts(dev_priv);
 }
 
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *client)
+void intel_rps_boost(struct i915_request *rq, struct intel_rps_client *client)
 {
 	struct intel_rps *rps = &rq->i915->gt_pm.rps;
 	unsigned long flags;
 	bool boost;
 
-	if (!HAS_RPS(rq->i915))
-		return;
-
-	/*
-	 * This is intentionally racy! We peek at the state here, then
-	 * validate inside the RPS worker.
-	 */
-	if (!rps->enabled)
+	if (!READ_ONCE(rps->active))
 		return;
 
 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
@@ -992,20 +954,6 @@  static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
 	}
 }
 
-static void reset_rps(struct drm_i915_private *dev_priv,
-		      int (*set)(struct drm_i915_private *, u8))
-{
-	struct intel_rps *rps = &dev_priv->gt_pm.rps;
-	u8 freq = rps->cur_freq;
-
-	/* force a reset */
-	rps->power = -1;
-	rps->cur_freq = -1;
-
-	if (set(dev_priv, freq))
-		DRM_ERROR("Failed to reset RPS to initial values\n");
-}
-
 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
 static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 {
@@ -1027,7 +975,6 @@  static void gen9_enable_rps(struct drm_i915_private *dev_priv)
 	 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
 	 * RP_INTERRUPT_LIMITS & RPNSWREQ registers.
 	 */
-	reset_rps(dev_priv, gen6_set_rps);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
@@ -1197,8 +1144,6 @@  static void gen8_enable_rps(struct drm_i915_private *dev_priv)
 		   GEN6_RP_UP_BUSY_AVG |
 		   GEN6_RP_DOWN_IDLE_AVG);
 
-	reset_rps(dev_priv, gen6_set_rps);
-
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
@@ -1298,8 +1243,6 @@  static void gen6_enable_rps(struct drm_i915_private *dev_priv)
 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
-	reset_rps(dev_priv, gen6_set_rps);
-
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
@@ -1813,8 +1756,6 @@  static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-	reset_rps(dev_priv, valleyview_set_rps);
-
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
@@ -1899,8 +1840,6 @@  static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
 	DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
 	DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
 
-	reset_rps(dev_priv, valleyview_set_rps);
-
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
@@ -2385,10 +2324,7 @@  static void intel_init_emon(struct drm_i915_private *dev_priv)
 
 void intel_gt_pm_sanitize(struct drm_i915_private *dev_priv)
 {
-	dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
 	intel_gt_pm_disable_rps(dev_priv);
-
-	dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */
 	intel_gt_pm_disable_rc6(dev_priv);
 
 	if (INTEL_GEN(dev_priv) < 11)
@@ -2487,9 +2423,6 @@  static void __enable_rc6(struct drm_i915_private *dev_priv)
 {
 	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
 
-	if (dev_priv->gt_pm.rc6.enabled)
-		return;
-
 	if (IS_CHERRYVIEW(dev_priv))
 		cherryview_enable_rc6(dev_priv);
 	else if (IS_VALLEYVIEW(dev_priv))
@@ -2500,8 +2433,6 @@  static void __enable_rc6(struct drm_i915_private *dev_priv)
 		gen8_enable_rc6(dev_priv);
 	else if (INTEL_GEN(dev_priv) >= 6)
 		gen6_enable_rc6(dev_priv);
-
-	dev_priv->gt_pm.rc6.enabled = true;
 }
 
 static void __enable_rps(struct drm_i915_private *dev_priv)
@@ -2510,9 +2441,6 @@  static void __enable_rps(struct drm_i915_private *dev_priv)
 
 	lockdep_assert_held(&rps->lock);
 
-	if (rps->enabled)
-		return;
-
 	if (IS_CHERRYVIEW(dev_priv)) {
 		cherryview_enable_rps(dev_priv);
 	} else if (IS_VALLEYVIEW(dev_priv)) {
@@ -2536,7 +2464,12 @@  static void __enable_rps(struct drm_i915_private *dev_priv)
 	WARN_ON(rps->efficient_freq < rps->min_freq);
 	WARN_ON(rps->efficient_freq > rps->max_freq);
 
-	rps->enabled = true;
+	/* Force a reset */
+	rps->cur_freq = rps->max_freq;
+	rps->power = -1;
+	__intel_set_rps(dev_priv, rps->idle_freq);
+
+	rps->cur_freq = rps->idle_freq;
 }
 
 void intel_gt_pm_enable_rc6(struct drm_i915_private *dev_priv)
@@ -2563,9 +2496,6 @@  static void __disable_rc6(struct drm_i915_private *dev_priv)
 {
 	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
 
-	if (!dev_priv->gt_pm.rc6.enabled)
-		return;
-
 	if (INTEL_GEN(dev_priv) >= 9)
 		gen9_disable_rc6(dev_priv);
 	else if (IS_CHERRYVIEW(dev_priv))
@@ -2574,8 +2504,6 @@  static void __disable_rc6(struct drm_i915_private *dev_priv)
 		valleyview_disable_rc6(dev_priv);
 	else if (INTEL_GEN(dev_priv) >= 6)
 		gen6_disable_rc6(dev_priv);
-
-	dev_priv->gt_pm.rc6.enabled = false;
 }
 
 void intel_gt_pm_disable_rc6(struct drm_i915_private *dev_priv)
@@ -2589,9 +2517,6 @@  static void __disable_rps(struct drm_i915_private *dev_priv)
 {
 	lockdep_assert_held(&dev_priv->gt_pm.rps.lock);
 
-	if (!dev_priv->gt_pm.rps.enabled)
-		return;
-
 	if (INTEL_GEN(dev_priv) >= 9)
 		gen9_disable_rps(dev_priv);
 	else if (IS_CHERRYVIEW(dev_priv))
@@ -2602,8 +2527,6 @@  static void __disable_rps(struct drm_i915_private *dev_priv)
 		gen6_disable_rps(dev_priv);
 	else if (INTEL_GEN(dev_priv) >= 5)
 		ironlake_disable_drps(dev_priv);
-
-	dev_priv->gt_pm.rps.enabled = false;
 }
 
 void intel_gt_pm_disable_rps(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_gt_pm.h b/drivers/gpu/drm/i915/intel_gt_pm.h
index 5975c63f46bf..314912c15126 100644
--- a/drivers/gpu/drm/i915/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/intel_gt_pm.h
@@ -42,11 +42,12 @@  void intel_gt_pm_disable_rps(struct drm_i915_private *dev_priv);
 void intel_gt_pm_enable_rc6(struct drm_i915_private *dev_priv);
 void intel_gt_pm_disable_rc6(struct drm_i915_private *dev_priv);
 
+void intel_gt_pm_busy(struct drm_i915_private *dev_priv);
+void intel_gt_pm_idle(struct drm_i915_private *dev_priv);
+
 void intel_gt_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
 
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
+void intel_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
 
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);