@@ -2191,9 +2191,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct drm_file *file;
- seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
+ seq_printf(m, "RPS active? %s\n", yesno(rps->active));
seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
@@ -2226,9 +2226,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
atomic_read(&rps->boosts));
mutex_unlock(&dev->filelist_mutex);
- if (INTEL_GEN(dev_priv) >= 6 &&
- rps->enabled &&
- dev_priv->gt.active_requests) {
+ if (INTEL_GEN(dev_priv) >= 6 && dev_priv->gt.awake) {
u32 rpup, rpupei;
u32 rpdown, rpdownei;
@@ -2588,9 +2588,6 @@ static int intel_runtime_suspend(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
- if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv))))
- return -ENODEV;
-
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
@@ -151,10 +151,9 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
i915_pmu_gt_parked(i915);
i915_vma_parked(i915);
- i915->gt.awake = false;
+ intel_gt_pm_idle(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_idle(i915);
+ i915->gt.awake = false;
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
@@ -202,9 +201,9 @@ void i915_gem_unpark(struct drm_i915_private *i915)
if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
i915->gt.epoch = 1;
+ intel_gt_pm_busy(i915);
i915_update_gfx_val(i915);
- if (INTEL_GEN(i915) >= 6)
- gen6_rps_busy(i915);
+
i915_pmu_gt_unparked(i915);
intel_engines_unpark(i915);
@@ -465,10 +464,8 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
- if (rps_client && !i915_request_started(rq)) {
- if (INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq, rps_client);
- }
+ if (rps_client && !i915_request_started(rq))
+ intel_rps_boost(rq, rps_client);
timeout = i915_request_wait(rq, flags, timeout);
@@ -377,7 +377,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
intel_gpu_freq(dev_priv, val));
rps->max_freq_softlimit = val;
- schedule_work(&rps->work);
+ if (rps->active)
+ schedule_work(&rps->work);
unlock:
mutex_unlock(&rps->lock);
@@ -419,7 +420,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
}
rps->min_freq_softlimit = val;
- schedule_work(&rps->work);
+ if (rps->active)
+ schedule_work(&rps->work);
unlock:
mutex_unlock(&rps->lock);
@@ -12799,7 +12799,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_request_started(rq))
- gen6_rps_boost(rq, NULL);
+ intel_rps_boost(rq, NULL);
i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -12817,7 +12817,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
if (!dma_fence_is_i915(fence))
return;
- if (INTEL_GEN(to_i915(crtc->dev)) < 6)
+ if (!HAS_RPS(to_i915(crtc->dev)))
return;
if (drm_crtc_vblank_get(crtc))
@@ -327,15 +327,11 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *i915, u8 val)
*/
static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- struct intel_rps *rps = &dev_priv->gt_pm.rps;
-
- if (val != rps->cur_freq) {
+ if (val != dev_priv->gt_pm.rps.cur_freq) {
if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(GEN6_RPNSWREQ,
- GEN9_FREQUENCY(val));
+ I915_WRITE(GEN6_RPNSWREQ, GEN9_FREQUENCY(val));
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- I915_WRITE(GEN6_RPNSWREQ,
- HSW_FREQUENCY(val));
+ I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(val));
else
I915_WRITE(GEN6_RPNSWREQ,
GEN6_FREQUENCY(val) |
@@ -352,9 +348,6 @@ static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
- rps->cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
return 0;
}
@@ -377,48 +370,17 @@ static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
gen6_set_rps_thresholds(dev_priv, val);
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
- dev_priv->gt_pm.rps.cur_freq = val;
- trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
-
return 0;
}
-/*
- * vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
- *
- * If Gfx is Idle, then
- * 1. Forcewake Media well.
- * 2. Request idle freq.
- * 3. Release Forcewake of Media well.
- */
-static void vlv_set_rps_idle(struct drm_i915_private *i915)
+static int __intel_set_rps(struct drm_i915_private *i915, u8 val)
{
- struct intel_rps *rps = &i915->gt_pm.rps;
- u32 val = rps->idle_freq;
- int err;
-
- if (rps->cur_freq <= val)
- return;
-
- /*
- * The punit delays the write of the frequency and voltage until it
- * determines the GPU is awake. During normal usage we don't want to
- * waste power changing the frequency if the GPU is sleeping (rc6).
- * However, the GPU and driver is now idle and we do not want to delay
- * switching to minimum voltage (reducing power whilst idle) as we do
- * not expect to be woken in the near future and so must flush the
- * change by waking the device.
- *
- * We choose to take the media powerwell (either would do to trick the
- * punit into committing the voltage change) as that takes a lot less
- * power than the render powerwell.
- */
- intel_uncore_forcewake_get(i915, FORCEWAKE_MEDIA);
- err = valleyview_set_rps(i915, val);
- intel_uncore_forcewake_put(i915, FORCEWAKE_MEDIA);
-
- if (err)
- DRM_ERROR("Failed to set RPS for idle\n");
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return valleyview_set_rps(i915, val);
+ else if (INTEL_GEN(i915) >= 6)
+ return gen6_set_rps(i915, val);
+ else
+ return 0;
}
static int intel_set_rps(struct drm_i915_private *i915, u8 val)
@@ -427,20 +389,20 @@ static int intel_set_rps(struct drm_i915_private *i915, u8 val)
int err;
lockdep_assert_held(&rps->lock);
+ GEM_BUG_ON(!rps->active);
GEM_BUG_ON(val > rps->max_freq);
GEM_BUG_ON(val < rps->min_freq);
- if (!rps->enabled) {
+ err = __intel_set_rps(i915, val);
+ if (err)
+ return err;
+
+ if (val != rps->cur_freq) {
+ trace_intel_gpu_freq_change(intel_gpu_freq(i915, val));
rps->cur_freq = val;
- return 0;
}
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- err = valleyview_set_rps(i915, val);
- else
- err = gen6_set_rps(i915, val);
-
- return err;
+ return 0;
}
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
@@ -538,18 +500,11 @@ static void enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- if (READ_ONCE(rps->interrupts_enabled))
- return;
-
if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
return;
spin_lock_irq(&dev_priv->irq_lock);
- WARN_ON_ONCE(rps->pm_iir);
- WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & rps->pm_events);
- rps->interrupts_enabled = true;
gen6_enable_pm_irq(dev_priv, rps->pm_events);
-
spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -557,29 +512,15 @@ static void disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- if (!READ_ONCE(rps->interrupts_enabled))
- return;
-
if (WARN_ON_ONCE(IS_GEN11(dev_priv)))
return;
spin_lock_irq(&dev_priv->irq_lock);
- rps->interrupts_enabled = false;
-
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
-
gen6_disable_pm_irq(dev_priv, rps->pm_events);
-
spin_unlock_irq(&dev_priv->irq_lock);
- synchronize_irq(dev_priv->drm.irq);
- /* Now that we will not be generating any more work, flush any
- * outstanding tasks. As we are called on the RPS idle path,
- * we will reset the GPU to minimum frequencies, so the current
- * state of the worker can be discarded.
- */
- cancel_work_sync(&rps->work);
- gen6_reset_rps_interrupts(dev_priv);
+ synchronize_irq(dev_priv->drm.irq);
}
static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -646,6 +587,9 @@ static void intel_rps_work(struct work_struct *work)
mutex_lock(&rps->lock);
+ if (!rps->active)
+ goto unlock;
+
min = rps->min_freq_softlimit;
max = rps->max_freq_softlimit;
if (client_boost && max < rps->boost_freq)
@@ -694,106 +638,125 @@ static void intel_rps_work(struct work_struct *work)
adj = 0;
}
- mutex_unlock(&rps->lock);
-
if (pm_iir) {
spin_lock_irq(&i915->irq_lock);
- if (rps->interrupts_enabled)
- gen6_unmask_pm_irq(i915, rps->pm_events);
+ gen6_unmask_pm_irq(i915, rps->pm_events);
spin_unlock_irq(&i915->irq_lock);
rps->last_adj = adj;
}
+
+unlock:
+ mutex_unlock(&rps->lock);
}
void intel_gt_pm_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- if (pm_iir & rps->pm_events) {
+ if (rps->active && pm_iir & rps->pm_events) {
spin_lock(&dev_priv->irq_lock);
gen6_mask_pm_irq(dev_priv, pm_iir & rps->pm_events);
- if (rps->interrupts_enabled) {
- rps->pm_iir |= pm_iir & rps->pm_events;
- schedule_work(&rps->work);
- }
+ rps->pm_iir |= pm_iir & rps->pm_events;
spin_unlock(&dev_priv->irq_lock);
+
+ schedule_work(&rps->work);
}
}
-void gen6_rps_busy(struct drm_i915_private *dev_priv)
+void intel_gt_pm_busy(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ u8 freq;
if (!HAS_RPS(dev_priv))
return;
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- u8 freq;
+ GEM_BUG_ON(rps->pm_iir);
+ GEM_BUG_ON(rps->active);
- I915_WRITE(GEN6_PMINTRMSK,
- gen6_rps_pm_mask(dev_priv, rps->cur_freq));
+ mutex_lock(&rps->lock);
+ rps->active = true;
- enable_rps_interrupts(dev_priv);
- memset(&rps->ei, 0, sizeof(rps->ei));
+ /*
+ * Use the user's desired frequency as a guide, but for better
+ * performance, jump directly to RPe as our starting frequency.
+ */
+ freq = max(rps->cur_freq, rps->efficient_freq);
+ if (intel_set_rps(dev_priv,
+ clamp(freq,
+ rps->min_freq_softlimit,
+ rps->max_freq_softlimit)))
+ DRM_DEBUG_DRIVER("Failed to set busy frequency\n");
- /*
- * Use the user's desired frequency as a guide, but for better
- * performance, jump directly to RPe as our starting frequency.
- */
- freq = max(rps->cur_freq, rps->efficient_freq);
+ rps->last_adj = 0;
- if (intel_set_rps(dev_priv,
- clamp(freq,
- rps->min_freq_softlimit,
- rps->max_freq_softlimit)))
- DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
+ if (INTEL_GEN(dev_priv) >= 6) {
+ memset(&rps->ei, 0, sizeof(rps->ei));
+ enable_rps_interrupts(dev_priv);
}
+
mutex_unlock(&rps->lock);
}
-void gen6_rps_idle(struct drm_i915_private *dev_priv)
+void intel_gt_pm_idle(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- if (!HAS_RPS(dev_priv))
+ if (!rps->active)
return;
- /*
- * Flush our bottom-half so that it does not race with us
- * setting the idle frequency and so that it is bounded by
- * our rpm wakeref. And then disable the interrupts to stop any
- * futher RPS reclocking whilst we are asleep.
- */
+ mutex_lock(&rps->lock);
+
disable_rps_interrupts(dev_priv);
- mutex_lock(&rps->lock);
- if (rps->enabled) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_set_rps_idle(dev_priv);
- else
- gen6_set_rps(dev_priv, rps->idle_freq);
- rps->last_adj = 0;
+ if (rps->cur_freq > rps->idle_freq) {
+ /*
+ * The punit delays the write of the frequency and voltage
+ * until it determines the GPU is awake. During normal usage we
+ * don't want to waste power changing the frequency if the GPU
+ * is sleeping (rc6). However, the GPU and driver is now idle
+ * and we do not want to delay switching to minimum voltage
+ * (reducing power whilst idle) as we do not expect to be woken
+ * in the near future and so must flush the change by waking
+ * the device.
+ *
+ * We choose to take the media powerwell (either would do to
+ * trick the punit into committing the voltage change) as that
+ * takes a lot less power than the render powerwell.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+ if (__intel_set_rps(dev_priv, rps->idle_freq))
+ DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
+ rps->cur_freq = rps->idle_freq;
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 6) {
I915_WRITE(GEN6_PMINTRMSK,
gen6_sanitize_rps_pm_mask(dev_priv, ~0));
}
+
+ rps->last_adj = 0;
+ rps->active = false;
mutex_unlock(&rps->lock);
+
+ /*
+ * Now that we will not be generating any more work, flush any
+ * outstanding tasks. As we are called on the RPS idle path,
+ * we will reset the GPU to minimum frequencies, so the current
+ * state of the worker can be discarded.
+ */
+ cancel_work_sync(&rps->work);
+ gen6_reset_rps_interrupts(dev_priv);
}
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *client)
+void intel_rps_boost(struct i915_request *rq, struct intel_rps_client *client)
{
struct intel_rps *rps = &rq->i915->gt_pm.rps;
unsigned long flags;
bool boost;
- if (!HAS_RPS(rq->i915))
- return;
-
- /*
- * This is intentionally racy! We peek at the state here, then
- * validate inside the RPS worker.
- */
- if (!rps->enabled)
+ if (!READ_ONCE(rps->active))
return;
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
@@ -1005,20 +968,6 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
}
}
-static void reset_rps(struct drm_i915_private *i915,
- int (*set)(struct drm_i915_private *, u8))
-{
- struct intel_rps *rps = &i915->gt_pm.rps;
- u8 freq = rps->cur_freq;
-
- /* force a reset */
- rps->power = -1;
- rps->cur_freq = -1;
-
- if (set(i915, freq))
- DRM_ERROR("Failed to reset RPS to initial values\n");
-}
-
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
@@ -1040,7 +989,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
* Up/Down EI & threshold registers, as well as the RP_CONTROL,
* RP_INTERRUPT_LIMITS & RPNSWREQ registers.
*/
- reset_rps(dev_priv, gen6_set_rps);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -1210,8 +1158,6 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
- reset_rps(dev_priv, gen6_set_rps);
-
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -1311,8 +1257,6 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
- reset_rps(dev_priv, gen6_set_rps);
-
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -1829,8 +1773,6 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
- reset_rps(dev_priv, valleyview_set_rps);
-
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -1915,8 +1857,6 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
- reset_rps(dev_priv, valleyview_set_rps);
-
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
}
@@ -2403,11 +2343,7 @@ static void intel_init_emon(struct drm_i915_private *dev_priv)
void intel_gt_pm_sanitize(struct drm_i915_private *i915)
{
intel_gt_pm_disable_llc(i915);
-
- i915->gt_pm.rc6.enabled = true; /* force RC6 disabling */
intel_gt_pm_disable_rc6(i915);
-
- i915->gt_pm.rps.enabled = true; /* force RPS disabling */
intel_gt_pm_disable_rps(i915);
if (INTEL_GEN(i915) >= 11)
@@ -2507,9 +2443,6 @@ static void __enable_rc6(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->gt_pm.rps.lock);
- if (i915->gt_pm.rc6.enabled)
- return;
-
if (IS_CHERRYVIEW(i915))
cherryview_enable_rc6(i915);
else if (IS_VALLEYVIEW(i915))
@@ -2520,8 +2453,6 @@ static void __enable_rc6(struct drm_i915_private *i915)
gen8_enable_rc6(i915);
else if (INTEL_GEN(i915) >= 6)
gen6_enable_rc6(i915);
-
- i915->gt_pm.rc6.enabled = true;
}
void intel_gt_pm_enable_rc6(struct drm_i915_private *i915)
@@ -2540,9 +2471,6 @@ static void __enable_rps(struct drm_i915_private *i915)
lockdep_assert_held(&rps->lock);
- if (rps->enabled)
- return;
-
if (IS_CHERRYVIEW(i915)) {
cherryview_enable_rps(i915);
} else if (IS_VALLEYVIEW(i915)) {
@@ -2564,7 +2492,12 @@ static void __enable_rps(struct drm_i915_private *i915)
WARN_ON(rps->efficient_freq < rps->min_freq);
WARN_ON(rps->efficient_freq > rps->max_freq);
- rps->enabled = true;
+ /* Force a reset */
+ rps->cur_freq = rps->max_freq;
+ rps->power = -1;
+ __intel_set_rps(i915, rps->idle_freq);
+
+ rps->cur_freq = rps->idle_freq;
}
void intel_gt_pm_enable_rps(struct drm_i915_private *i915)
@@ -2581,11 +2514,7 @@ static void __enable_llc(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->gt_pm.rps.lock);
- if (i915->gt_pm.llc_pstate.enabled)
- return;
-
gen6_update_ring_freq(i915);
- i915->gt_pm.llc_pstate.enabled = true;
}
void intel_gt_pm_enable_llc(struct drm_i915_private *i915)
@@ -2602,9 +2531,6 @@ static void __disable_rc6(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->gt_pm.rps.lock);
- if (!i915->gt_pm.rc6.enabled)
- return;
-
if (INTEL_GEN(i915) >= 9)
gen9_disable_rc6(i915);
else if (IS_CHERRYVIEW(i915))
@@ -2613,8 +2539,6 @@ static void __disable_rc6(struct drm_i915_private *i915)
valleyview_disable_rc6(i915);
else if (INTEL_GEN(i915) >= 6)
gen6_disable_rc6(i915);
-
- i915->gt_pm.rc6.enabled = false;
}
void intel_gt_pm_disable_rc6(struct drm_i915_private *i915)
@@ -2628,9 +2552,6 @@ static void __disable_rps(struct drm_i915_private *i915)
{
lockdep_assert_held(&i915->gt_pm.rps.lock);
- if (!i915->gt_pm.rps.enabled)
- return;
-
if (INTEL_GEN(i915) >= 9)
gen9_disable_rps(i915);
else if (IS_CHERRYVIEW(i915))
@@ -2641,8 +2562,6 @@ static void __disable_rps(struct drm_i915_private *i915)
gen6_disable_rps(i915);
else if (INTEL_GEN(i915) >= 5)
ironlake_disable_drps(i915);
-
- i915->gt_pm.rps.enabled = false;
}
void intel_gt_pm_disable_rps(struct drm_i915_private *i915)
@@ -2652,21 +2571,9 @@ void intel_gt_pm_disable_rps(struct drm_i915_private *i915)
mutex_unlock(&i915->gt_pm.rps.lock);
}
-static void __disable_llc(struct drm_i915_private *i915)
-{
- lockdep_assert_held(&i915->gt_pm.rps.lock);
-
- if (!i915->gt_pm.llc_pstate.enabled)
- return;
-
- i915->gt_pm.llc_pstate.enabled = false;
-}
-
void intel_gt_pm_disable_llc(struct drm_i915_private *i915)
{
- mutex_lock(&i915->gt_pm.rps.lock);
- __disable_llc(i915);
- mutex_unlock(&i915->gt_pm.rps.lock);
+ /* Nothing to do here. */
}
void intel_gt_pm_fini(struct drm_i915_private *i915)
@@ -19,14 +19,10 @@ struct intel_rps_ei {
struct intel_rps {
struct mutex lock;
-
- /*
- * work, interrupts_enabled and pm_iir are protected by
- * i915->irq_lock
- */
struct work_struct work;
- bool interrupts_enabled;
- u32 pm_iir;
+
+ bool active;
+ u32 pm_iir; /* protected by i915->irq_lock */
/* PM interrupt bits that should never be masked */
u32 pm_intrmsk_mbz;
@@ -63,7 +59,6 @@ struct intel_rps {
int last_adj;
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
- bool enabled;
atomic_t num_waiters;
atomic_t boosts;
@@ -72,19 +67,13 @@ struct intel_rps {
};
struct intel_rc6 {
- bool enabled;
u64 prev_hw_residency[4];
u64 cur_residency[4];
};
-struct intel_llc_pstate {
- bool enabled;
-};
-
struct intel_gt_pm {
- struct intel_rps rps;
struct intel_rc6 rc6;
- struct intel_llc_pstate llc_pstate;
+ struct intel_rps rps;
u32 imr;
u32 ier;
@@ -108,11 +97,12 @@ void intel_gt_pm_disable_rc6(struct drm_i915_private *i915);
void intel_gt_pm_enable_llc(struct drm_i915_private *i915);
void intel_gt_pm_disable_llc(struct drm_i915_private *i915);
+void intel_gt_pm_busy(struct drm_i915_private *i915);
+void intel_gt_pm_idle(struct drm_i915_private *i915);
+
void intel_gt_pm_irq_handler(struct drm_i915_private *i915, u32 pm_iir);
-void gen6_rps_busy(struct drm_i915_private *i915);
-void gen6_rps_idle(struct drm_i915_private *i915);
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
+void intel_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
int intel_gpu_freq(const struct drm_i915_private *i915, int val);
int intel_freq_opcode(const struct drm_i915_private *i915, int val);
As we know that whenever the GT is awake, rc6 and rps are enabled (if available), then we can remove the individual tracking and enabling to the gen6_rps_busy/gen6_rps_idle() (now called intel_gt_pm_busy and intel_gt_pm_idle) entry points. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_debugfs.c | 6 +- drivers/gpu/drm/i915/i915_drv.c | 3 - drivers/gpu/drm/i915/i915_gem.c | 15 +- drivers/gpu/drm/i915/i915_sysfs.c | 6 +- drivers/gpu/drm/i915/intel_display.c | 4 +- drivers/gpu/drm/i915/intel_gt_pm.c | 291 +++++++++------------------ drivers/gpu/drm/i915/intel_gt_pm.h | 26 +-- 7 files changed, 121 insertions(+), 230 deletions(-)