diff mbox

drm/i915: Don't try to use DDR DVFS on CHV when disabled in the BIOS

Message ID 1441735512-31652-1-git-send-email-ville.syrjala@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ville Syrjälä Sept. 8, 2015, 6:05 p.m. UTC
From: Ville Syrjälä <ville.syrjala@linux.intel.com>

If one disables DDR DVFS in the BIOS, Punit will apparently ignores
all DDR DVFS request. Currently we assume that DDR DVFS is always
operational, which leads to errors in dmesg when the DDR DVFS requests
time out.

Fix the problem by gently prodding Punit during driver load to find out
whether it will respond to DDR DVFS requests. If the request times out,
we assume that DDR DVFS has been permanenly disabled in the BIOS and
no longer perster the Punit about it.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91629
Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h |  2 ++
 drivers/gpu/drm/i915/intel_pm.c | 42 ++++++++++++++++++++++++++++-------------
 2 files changed, 31 insertions(+), 13 deletions(-)

Comments

Chris Wilson Sept. 8, 2015, 8:16 p.m. UTC | #1
On Tue, Sep 08, 2015 at 09:05:12PM +0300, ville.syrjala@linux.intel.com wrote:
> From: Ville Syrjälä <ville.syrjala@linux.intel.com>
> 
> If one disables DDR DVFS in the BIOS, Punit will apparently ignores
> all DDR DVFS request. Currently we assume that DDR DVFS is always
> operational, which leads to errors in dmesg when the DDR DVFS requests
> time out.
> 
> Fix the problem by gently prodding Punit during driver load to find out
> whether it will respond to DDR DVFS requests. If the request times out,
> we assume that DDR DVFS has been permanenly disabled in the BIOS and
> no longer perster the Punit about it.
> 
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91629
> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>

Code looks clean (just a minor duplicated vlv_read(DDR_SETUP2) in
setup). Would it make sense to disable dvfs after a failure as well,
then the user is shown a single DRM_ERROR at runtime and we should
recover (by not going to the full WM next time)?
-Chris
Taylor, Clinton A Sept. 8, 2015, 8:34 p.m. UTC | #2
On 09/08/2015 11:05 AM, ville.syrjala@linux.intel.com wrote:
> From: Ville Syrjälä <ville.syrjala@linux.intel.com>
>
> If one disables DDR DVFS in the BIOS, Punit will apparently ignores
> all DDR DVFS request. Currently we assume that DDR DVFS is always
> operational, which leads to errors in dmesg when the DDR DVFS requests
> time out.
>
> Fix the problem by gently prodding Punit during driver load to find out
> whether it will respond to DDR DVFS requests. If the request times out,
> we assume that DDR DVFS has been permanenly disabled in the BIOS and
> no longer perster the Punit about it.
>
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91629
> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
> ---
>   drivers/gpu/drm/i915/i915_drv.h |  2 ++
>   drivers/gpu/drm/i915/intel_pm.c | 42 ++++++++++++++++++++++++++++-------------
>   2 files changed, 31 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index cf1880e..c2af7d8 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1940,6 +1940,8 @@ struct drm_i915_private {
>   			struct skl_wm_values skl_hw;
>   			struct vlv_wm_values vlv;
>   		};
> +
> +		uint8_t max_level;
>   	} wm;
>
>   	struct i915_runtime_pm pm;
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 64bc77e..1f6b5bb 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -961,8 +961,6 @@ enum vlv_wm_level {
>   	VLV_WM_LEVEL_PM2,
>   	VLV_WM_LEVEL_PM5,
>   	VLV_WM_LEVEL_DDR_DVFS,
> -	CHV_WM_NUM_LEVELS,
> -	VLV_WM_NUM_LEVELS = 1,
>   };
>
>   /* latency must be in 0.1us units. */
> @@ -988,9 +986,13 @@ static void vlv_setup_wm_latency(struct drm_device *dev)
>   	/* all latencies in usec */
>   	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
>
> +	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
> +
>   	if (IS_CHERRYVIEW(dev_priv)) {
>   		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
>   		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
> +
> +		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
>   	}
>   }
>
> @@ -1143,10 +1145,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
>   	memset(wm_state, 0, sizeof(*wm_state));
>
>   	wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
> -	if (IS_CHERRYVIEW(dev))
> -		wm_state->num_levels = CHV_WM_NUM_LEVELS;
> -	else
> -		wm_state->num_levels = VLV_WM_NUM_LEVELS;
> +	wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
>
>   	wm_state->num_active_planes = 0;
>
> @@ -1226,7 +1225,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
>   	}
>
>   	/* clear any (partially) filled invalid levels */
> -	for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
> +	for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
>   		memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
>   		memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
>   	}
> @@ -1330,10 +1329,7 @@ static void vlv_merge_wm(struct drm_device *dev,
>   	struct intel_crtc *crtc;
>   	int num_active_crtcs = 0;
>
> -	if (IS_CHERRYVIEW(dev))
> -		wm->level = VLV_WM_LEVEL_DDR_DVFS;
> -	else
> -		wm->level = VLV_WM_LEVEL_PM2;
> +	wm->level = to_i915(dev)->wm.max_level;
>   	wm->cxsr = true;
>
>   	for_each_intel_crtc(dev, crtc) {
> @@ -4090,9 +4086,29 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
>   		if (val & DSP_MAXFIFO_PM5_ENABLE)
>   			wm->level = VLV_WM_LEVEL_PM5;
>
> +		/*
> +		 * If DDR DVFS is disabled in the BIOS, Punit
> +		 * will never ack the request. So if that happens
> +		 * assume we don't have to enable/disable DDR DVFS
> +		 * dynamically. To test that just set the REQ_ACK
> +		 * bit to poke the Punit, but don't change the
> +		 * HIGH/LOW bits so that we don't actually change
> +		 * the current state.
> +		 */
>   		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
> -		if ((val & FORCE_DDR_HIGH_FREQ) == 0)
> -			wm->level = VLV_WM_LEVEL_DDR_DVFS;
> +		val |= FORCE_DDR_FREQ_REQ_ACK;
> +		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
> +
> +		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
> +			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
> +			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
> +				      "assuming DDR DVFS is disabled\n");
> +			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
> +		} else {
> +			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
> +			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
> +				wm->level = VLV_WM_LEVEL_DDR_DVFS;
> +		}
>
>   		mutex_unlock(&dev_priv->rps.hw_lock);
>   	}
>

Nice.

Reviewed-by: Clint Taylor <Clinton.A.Taylor@intel.com>
Tested-by: Clint Taylor <Clinton.A.Taylor@intel.com>
Ville Syrjälä Sept. 9, 2015, 3:28 p.m. UTC | #3
On Tue, Sep 08, 2015 at 09:16:11PM +0100, Chris Wilson wrote:
> On Tue, Sep 08, 2015 at 09:05:12PM +0300, ville.syrjala@linux.intel.com wrote:
> > From: Ville Syrjälä <ville.syrjala@linux.intel.com>
> > 
> > If one disables DDR DVFS in the BIOS, Punit will apparently ignores
> > all DDR DVFS request. Currently we assume that DDR DVFS is always
> > operational, which leads to errors in dmesg when the DDR DVFS requests
> > time out.
> > 
> > Fix the problem by gently prodding Punit during driver load to find out
> > whether it will respond to DDR DVFS requests. If the request times out,
> > we assume that DDR DVFS has been permanenly disabled in the BIOS and
> > no longer perster the Punit about it.
> > 
> > Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91629
> > Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
> 
> Code looks clean (just a minor duplicated vlv_read(DDR_SETUP2) in
> setup).

Yeah I suppose I could have reused the orignal value read, or reassign
val also in wait_for().

> Would it make sense to disable dvfs after a failure as well,
> then the user is shown a single DRM_ERROR at runtime and we should
> recover (by not going to the full WM next time)?

I wouldn't expect any failures after we've determined that it works.
That would indicate Punit going belly up or something, and then I'm
not sure anything would work anymore.

So I would hold on doing that unless someone actually hits such a
problem.
Chris Wilson Sept. 9, 2015, 3:41 p.m. UTC | #4
On Wed, Sep 09, 2015 at 06:28:50PM +0300, Ville Syrjälä wrote:
> On Tue, Sep 08, 2015 at 09:16:11PM +0100, Chris Wilson wrote:
> > Would it make sense to disable dvfs after a failure as well,
> > then the user is shown a single DRM_ERROR at runtime and we should
> > recover (by not going to the full WM next time)?
> 
> I wouldn't expect any failures after we've determined that it works.
> That would indicate Punit going belly up or something, and then I'm
> not sure anything would work anymore.

We didn't expect any before either :) And it sounds like you are arguing
that we should be reducing the noise from the victims as well :)
-Chris
Ville Syrjälä Sept. 9, 2015, 4:10 p.m. UTC | #5
On Wed, Sep 09, 2015 at 04:41:04PM +0100, Chris Wilson wrote:
> On Wed, Sep 09, 2015 at 06:28:50PM +0300, Ville Syrjälä wrote:
> > On Tue, Sep 08, 2015 at 09:16:11PM +0100, Chris Wilson wrote:
> > > Would it make sense to disable dvfs after a failure as well,
> > > then the user is shown a single DRM_ERROR at runtime and we should
> > > recover (by not going to the full WM next time)?
> > 
> > I wouldn't expect any failures after we've determined that it works.
> > That would indicate Punit going belly up or something, and then I'm
> > not sure anything would work anymore.
> 
> We didn't expect any before either :) And it sounds like you are arguing
> that we should be reducing the noise from the victims as well :)

Well, I think I'll still leave it as is. People have generally been
opposed to adding code to deal with conditions that should never
happen in real life.

Should I be proven wrong, you can smack me on the head with a big
"told you so!" sign ;)
Jani Nikula Sept. 10, 2015, 9:58 a.m. UTC | #6
On Wed, 09 Sep 2015, Ville Syrjälä <ville.syrjala@linux.intel.com> wrote:
> On Wed, Sep 09, 2015 at 04:41:04PM +0100, Chris Wilson wrote:
>> On Wed, Sep 09, 2015 at 06:28:50PM +0300, Ville Syrjälä wrote:
>> > On Tue, Sep 08, 2015 at 09:16:11PM +0100, Chris Wilson wrote:
>> > > Would it make sense to disable dvfs after a failure as well,
>> > > then the user is shown a single DRM_ERROR at runtime and we should
>> > > recover (by not going to the full WM next time)?
>> > 
>> > I wouldn't expect any failures after we've determined that it works.
>> > That would indicate Punit going belly up or something, and then I'm
>> > not sure anything would work anymore.
>> 
>> We didn't expect any before either :) And it sounds like you are arguing
>> that we should be reducing the noise from the victims as well :)
>
> Well, I think I'll still leave it as is. People have generally been
> opposed to adding code to deal with conditions that should never
> happen in real life.
>
> Should I be proven wrong, you can smack me on the head with a big
> "told you so!" sign ;)

Isn't that English for dealing with conditions that should never happen?
;)

BR,
Jani.


>
> -- 
> Ville Syrjälä
> Intel OTC
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
Jani Nikula Sept. 10, 2015, 12:43 p.m. UTC | #7
On Tue, 08 Sep 2015, Clint Taylor <clinton.a.taylor@intel.com> wrote:
> On 09/08/2015 11:05 AM, ville.syrjala@linux.intel.com wrote:
>> From: Ville Syrjälä <ville.syrjala@linux.intel.com>
>>
>> If one disables DDR DVFS in the BIOS, Punit will apparently ignores
>> all DDR DVFS request. Currently we assume that DDR DVFS is always
>> operational, which leads to errors in dmesg when the DDR DVFS requests
>> time out.
>>
>> Fix the problem by gently prodding Punit during driver load to find out
>> whether it will respond to DDR DVFS requests. If the request times out,
>> we assume that DDR DVFS has been permanenly disabled in the BIOS and
>> no longer perster the Punit about it.
>>
>> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=91629
>> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
>> ---
>>   drivers/gpu/drm/i915/i915_drv.h |  2 ++
>>   drivers/gpu/drm/i915/intel_pm.c | 42 ++++++++++++++++++++++++++++-------------
>>   2 files changed, 31 insertions(+), 13 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
>> index cf1880e..c2af7d8 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.h
>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>> @@ -1940,6 +1940,8 @@ struct drm_i915_private {
>>   			struct skl_wm_values skl_hw;
>>   			struct vlv_wm_values vlv;
>>   		};
>> +
>> +		uint8_t max_level;
>>   	} wm;
>>
>>   	struct i915_runtime_pm pm;
>> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
>> index 64bc77e..1f6b5bb 100644
>> --- a/drivers/gpu/drm/i915/intel_pm.c
>> +++ b/drivers/gpu/drm/i915/intel_pm.c
>> @@ -961,8 +961,6 @@ enum vlv_wm_level {
>>   	VLV_WM_LEVEL_PM2,
>>   	VLV_WM_LEVEL_PM5,
>>   	VLV_WM_LEVEL_DDR_DVFS,
>> -	CHV_WM_NUM_LEVELS,
>> -	VLV_WM_NUM_LEVELS = 1,
>>   };
>>
>>   /* latency must be in 0.1us units. */
>> @@ -988,9 +986,13 @@ static void vlv_setup_wm_latency(struct drm_device *dev)
>>   	/* all latencies in usec */
>>   	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
>>
>> +	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
>> +
>>   	if (IS_CHERRYVIEW(dev_priv)) {
>>   		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
>>   		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
>> +
>> +		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
>>   	}
>>   }
>>
>> @@ -1143,10 +1145,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
>>   	memset(wm_state, 0, sizeof(*wm_state));
>>
>>   	wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
>> -	if (IS_CHERRYVIEW(dev))
>> -		wm_state->num_levels = CHV_WM_NUM_LEVELS;
>> -	else
>> -		wm_state->num_levels = VLV_WM_NUM_LEVELS;
>> +	wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
>>
>>   	wm_state->num_active_planes = 0;
>>
>> @@ -1226,7 +1225,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
>>   	}
>>
>>   	/* clear any (partially) filled invalid levels */
>> -	for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
>> +	for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
>>   		memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
>>   		memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
>>   	}
>> @@ -1330,10 +1329,7 @@ static void vlv_merge_wm(struct drm_device *dev,
>>   	struct intel_crtc *crtc;
>>   	int num_active_crtcs = 0;
>>
>> -	if (IS_CHERRYVIEW(dev))
>> -		wm->level = VLV_WM_LEVEL_DDR_DVFS;
>> -	else
>> -		wm->level = VLV_WM_LEVEL_PM2;
>> +	wm->level = to_i915(dev)->wm.max_level;
>>   	wm->cxsr = true;
>>
>>   	for_each_intel_crtc(dev, crtc) {
>> @@ -4090,9 +4086,29 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
>>   		if (val & DSP_MAXFIFO_PM5_ENABLE)
>>   			wm->level = VLV_WM_LEVEL_PM5;
>>
>> +		/*
>> +		 * If DDR DVFS is disabled in the BIOS, Punit
>> +		 * will never ack the request. So if that happens
>> +		 * assume we don't have to enable/disable DDR DVFS
>> +		 * dynamically. To test that just set the REQ_ACK
>> +		 * bit to poke the Punit, but don't change the
>> +		 * HIGH/LOW bits so that we don't actually change
>> +		 * the current state.
>> +		 */
>>   		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
>> -		if ((val & FORCE_DDR_HIGH_FREQ) == 0)
>> -			wm->level = VLV_WM_LEVEL_DDR_DVFS;
>> +		val |= FORCE_DDR_FREQ_REQ_ACK;
>> +		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
>> +
>> +		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
>> +			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
>> +			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
>> +				      "assuming DDR DVFS is disabled\n");
>> +			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
>> +		} else {
>> +			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
>> +			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
>> +				wm->level = VLV_WM_LEVEL_DDR_DVFS;
>> +		}
>>
>>   		mutex_unlock(&dev_priv->rps.hw_lock);
>>   	}
>>
>
> Nice.
>
> Reviewed-by: Clint Taylor <Clinton.A.Taylor@intel.com>
> Tested-by: Clint Taylor <Clinton.A.Taylor@intel.com>

Pushed to drm-intel-next-fixes, thanks for the patch and review.

BR,
Jani.


>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cf1880e..c2af7d8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1940,6 +1940,8 @@  struct drm_i915_private {
 			struct skl_wm_values skl_hw;
 			struct vlv_wm_values vlv;
 		};
+
+		uint8_t max_level;
 	} wm;
 
 	struct i915_runtime_pm pm;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 64bc77e..1f6b5bb 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -961,8 +961,6 @@  enum vlv_wm_level {
 	VLV_WM_LEVEL_PM2,
 	VLV_WM_LEVEL_PM5,
 	VLV_WM_LEVEL_DDR_DVFS,
-	CHV_WM_NUM_LEVELS,
-	VLV_WM_NUM_LEVELS = 1,
 };
 
 /* latency must be in 0.1us units. */
@@ -988,9 +986,13 @@  static void vlv_setup_wm_latency(struct drm_device *dev)
 	/* all latencies in usec */
 	dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
 
+	dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+
 	if (IS_CHERRYVIEW(dev_priv)) {
 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
 		dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+
+		dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
 	}
 }
 
@@ -1143,10 +1145,7 @@  static void vlv_compute_wm(struct intel_crtc *crtc)
 	memset(wm_state, 0, sizeof(*wm_state));
 
 	wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
-	if (IS_CHERRYVIEW(dev))
-		wm_state->num_levels = CHV_WM_NUM_LEVELS;
-	else
-		wm_state->num_levels = VLV_WM_NUM_LEVELS;
+	wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
 
 	wm_state->num_active_planes = 0;
 
@@ -1226,7 +1225,7 @@  static void vlv_compute_wm(struct intel_crtc *crtc)
 	}
 
 	/* clear any (partially) filled invalid levels */
-	for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
+	for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
 		memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
 		memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
 	}
@@ -1330,10 +1329,7 @@  static void vlv_merge_wm(struct drm_device *dev,
 	struct intel_crtc *crtc;
 	int num_active_crtcs = 0;
 
-	if (IS_CHERRYVIEW(dev))
-		wm->level = VLV_WM_LEVEL_DDR_DVFS;
-	else
-		wm->level = VLV_WM_LEVEL_PM2;
+	wm->level = to_i915(dev)->wm.max_level;
 	wm->cxsr = true;
 
 	for_each_intel_crtc(dev, crtc) {
@@ -4090,9 +4086,29 @@  void vlv_wm_get_hw_state(struct drm_device *dev)
 		if (val & DSP_MAXFIFO_PM5_ENABLE)
 			wm->level = VLV_WM_LEVEL_PM5;
 
+		/*
+		 * If DDR DVFS is disabled in the BIOS, Punit
+		 * will never ack the request. So if that happens
+		 * assume we don't have to enable/disable DDR DVFS
+		 * dynamically. To test that just set the REQ_ACK
+		 * bit to poke the Punit, but don't change the
+		 * HIGH/LOW bits so that we don't actually change
+		 * the current state.
+		 */
 		val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
-		if ((val & FORCE_DDR_HIGH_FREQ) == 0)
-			wm->level = VLV_WM_LEVEL_DDR_DVFS;
+		val |= FORCE_DDR_FREQ_REQ_ACK;
+		vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+		if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+			      FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+			DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
+				      "assuming DDR DVFS is disabled\n");
+			dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+		} else {
+			val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+			if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+				wm->level = VLV_WM_LEVEL_DDR_DVFS;
+		}
 
 		mutex_unlock(&dev_priv->rps.hw_lock);
 	}