[v10,1/2] drm/i915: Refactor intel_can_enable_sagv
diff mbox series

Message ID 20191107153037.17640-2-stanislav.lisovskiy@intel.com
State New
Headers show
Series
  • Refactor Gen11+ SAGV support
Related show

Commit Message

Stanislav Lisovskiy Nov. 7, 2019, 3:30 p.m. UTC
Currently intel_can_enable_sagv function contains
a mix of workarounds for different platforms
some of them are not valid for gens >= 11 already,
so lets split it into separate functions.

v2:
    - Rework watermark calculation algorithm to
      attempt to calculate Level 0 watermark
      with added sagv block time latency and
      check if it fits in DBuf in order to
      determine if SAGV can be enabled already
      at this stage, just as BSpec 49325 states.
      if that fails rollback to usual Level 0
      latency and disable SAGV.
    - Remove unneeded tabs(James Ausmus)

v3: Rebased the patch

v4: - Added back interlaced check for Gen12 and
      added separate function for TGL SAGV check
      (thanks to James Ausmus for spotting)
    - Removed unneeded gen check
    - Extracted Gen12 SAGV decision making code
      to a separate function from skl_compute_wm

v5: - Added SAGV global state to dev_priv, because
      we need to track all pipes, not only those
      in atomic state. Each pipe has now correspondent
      bit mask reflecting, whether it can tolerate
      SAGV or not(thanks to Ville Syrjala for suggestions).
    - Now using active flag instead of enable in crc
      usage check.

v6: - Fixed rebase conflicts

Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Cc: Ville Syrjälä <ville.syrjala@intel.com>
Cc: James Ausmus <james.ausmus@intel.com>
---
 drivers/gpu/drm/i915/display/intel_display.c  |   4 +
 .../drm/i915/display/intel_display_types.h    |   9 +
 drivers/gpu/drm/i915/i915_drv.h               |   6 +
 drivers/gpu/drm/i915/intel_pm.c               | 296 +++++++++++++++++-
 4 files changed, 303 insertions(+), 12 deletions(-)

Comments

Matt Roper Nov. 12, 2019, 12:15 a.m. UTC | #1
On Thu, Nov 07, 2019 at 05:30:36PM +0200, Stanislav Lisovskiy wrote:
> Currently intel_can_enable_sagv function contains
> a mix of workarounds for different platforms
> some of them are not valid for gens >= 11 already,
> so lets split it into separate functions.
> 
> v2:
>     - Rework watermark calculation algorithm to
>       attempt to calculate Level 0 watermark
>       with added sagv block time latency and
>       check if it fits in DBuf in order to
>       determine if SAGV can be enabled already
>       at this stage, just as BSpec 49325 states.
>       if that fails rollback to usual Level 0
>       latency and disable SAGV.
>     - Remove unneeded tabs(James Ausmus)
> 
> v3: Rebased the patch
> 
> v4: - Added back interlaced check for Gen12 and
>       added separate function for TGL SAGV check
>       (thanks to James Ausmus for spotting)
>     - Removed unneeded gen check
>     - Extracted Gen12 SAGV decision making code
>       to a separate function from skl_compute_wm
> 
> v5: - Added SAGV global state to dev_priv, because
>       we need to track all pipes, not only those
>       in atomic state. Each pipe has now correspondent
>       bit mask reflecting, whether it can tolerate
>       SAGV or not(thanks to Ville Syrjala for suggestions).
>     - Now using active flag instead of enable in crc
>       usage check.
> 
> v6: - Fixed rebase conflicts
> 
> Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> Cc: Ville Syrjälä <ville.syrjala@intel.com>
> Cc: James Ausmus <james.ausmus@intel.com>
> ---
>  drivers/gpu/drm/i915/display/intel_display.c  |   4 +
>  .../drm/i915/display/intel_display_types.h    |   9 +
>  drivers/gpu/drm/i915/i915_drv.h               |   6 +
>  drivers/gpu/drm/i915/intel_pm.c               | 296 +++++++++++++++++-
>  4 files changed, 303 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> index 876fc25968bf..7ea1e7518ab6 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -14855,6 +14855,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
>  		if (dev_priv->display.optimize_watermarks)
>  			dev_priv->display.optimize_watermarks(state,
>  							      new_crtc_state);
> +		if (state->crtc_sagv_mask & BIT(crtc->pipe))
> +			dev_priv->crtc_sagv_mask |= BIT(crtc->pipe);
> +		else
> +			dev_priv->crtc_sagv_mask &= ~BIT(crtc->pipe);
>  	}
>  
>  	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
> diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
> index fadd9853f966..fb274538af23 100644
> --- a/drivers/gpu/drm/i915/display/intel_display_types.h
> +++ b/drivers/gpu/drm/i915/display/intel_display_types.h
> @@ -490,6 +490,14 @@ struct intel_atomic_state {
>  	 */
>  	u8 active_pipe_changes;
>  
> +	/*
> +	 * Contains a mask which reflects whether correspondent pipe
> +	 * can tolerate SAGV or not, so that we can make a decision
> +	 * at atomic_commit_tail stage, whether we enable it or not
> +	 * based on global state in dev_priv.
> +	 */
> +	u32 crtc_sagv_mask;
> +
>  	u8 active_pipes;
>  	/* minimum acceptable cdclk for each pipe */
>  	int min_cdclk[I915_MAX_PIPES];
> @@ -670,6 +678,7 @@ struct skl_plane_wm {
>  	struct skl_wm_level wm[8];
>  	struct skl_wm_level uv_wm[8];
>  	struct skl_wm_level trans_wm;
> +	struct skl_wm_level sagv_wm0;
>  	bool is_planar;
>  };
>  
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 7e0f67babe20..4f4e2e839513 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1176,6 +1176,12 @@ struct drm_i915_private {
>  
>  	u32 sagv_block_time_us;
>  
> +	/*
> +	 * Contains a bit mask, whether correspondent
> +	 * pipe allows SAGV or not.
> +	 */
> +	u32 crtc_sagv_mask;
> +
>  	struct {
>  		/*
>  		 * Raw watermark latency values:
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 2d389e437e87..c792dd168742 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -3740,7 +3740,7 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
>  	return 0;
>  }
>  
> -bool intel_can_enable_sagv(struct intel_atomic_state *state)
> +static void skl_set_sagv_mask(struct intel_atomic_state *state)
>  {
>  	struct drm_device *dev = state->base.dev;
>  	struct drm_i915_private *dev_priv = to_i915(dev);
> @@ -3750,21 +3750,23 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
>  	enum pipe pipe;
>  	int level, latency;
>  
> +	state->crtc_sagv_mask = 0;
> +
>  	if (!intel_has_sagv(dev_priv))
> -		return false;
> +		return;
>  
>  	/*
>  	 * If there are no active CRTCs, no additional checks need be performed
>  	 */
>  	if (hweight8(state->active_pipes) == 0)
> -		return true;
> +		return;
>  
>  	/*
>  	 * SKL+ workaround: bspec recommends we disable SAGV when we have
>  	 * more then one pipe enabled
>  	 */
>  	if (hweight8(state->active_pipes) > 1)
> -		return false;
> +		return;
>  
>  	/* Since we're now guaranteed to only have one active CRTC... */
>  	pipe = ffs(state->active_pipes) - 1;
> @@ -3772,7 +3774,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
>  	crtc_state = to_intel_crtc_state(crtc->base.state);
>  
>  	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
> -		return false;
> +		return;
>  
>  	for_each_intel_plane_on_crtc(dev, crtc, plane) {
>  		struct skl_plane_wm *wm =
> @@ -3800,9 +3802,127 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
>  		 * can't enable SAGV.
>  		 */
>  		if (latency < dev_priv->sagv_block_time_us)
> -			return false;
> +			return;
>  	}
>  
> +	state->crtc_sagv_mask |= BIT(crtc->pipe);
> +}
> +
> +static void tgl_set_sagv_mask(struct intel_atomic_state *state);
> +
> +static void icl_set_sagv_mask(struct intel_atomic_state *state)
> +{
> +	struct drm_device *dev = state->base.dev;
> +	struct drm_i915_private *dev_priv = to_i915(dev);
> +	struct intel_crtc *crtc;
> +	struct intel_crtc_state *new_crtc_state;
> +	int level, latency;
> +	int i;
> +	int plane_id;
> +
> +	state->crtc_sagv_mask = 0;
> +
> +	if (!intel_has_sagv(dev_priv))
> +		return;
> +
> +	/*
> +	 * If there are no active CRTCs, no additional checks need be performed
> +	 */
> +	if (hweight8(state->active_pipes) == 0)
> +		return;
> +
> +	for_each_new_intel_crtc_in_state(state, crtc,
> +					     new_crtc_state, i) {
> +		unsigned int flags = crtc->base.state->adjusted_mode.flags;
> +		bool can_sagv;
> +
> +		if (flags & DRM_MODE_FLAG_INTERLACE)
> +			continue;
> +
> +		if (!new_crtc_state->base.active)
> +			continue;
> +
> +		can_sagv = true;
> +		for_each_plane_id_on_crtc(crtc, plane_id) {
> +			struct skl_plane_wm *wm =
> +				&new_crtc_state->wm.skl.optimal.planes[plane_id];
> +
> +			/* Skip this plane if it's not enabled */
> +			if (!wm->wm[0].plane_en)
> +				continue;
> +
> +			/* Find the highest enabled wm level for this plane */
> +			for (level = ilk_wm_max_level(dev_priv);
> +			     !wm->wm[level].plane_en; --level) {
> +			}
> +
> +			latency = dev_priv->wm.skl_latency[level];
> +
> +			/*
> +			 * If any of the planes on this pipe don't enable
> +			 * wm levels that incur memory latencies higher than
> +			 * sagv_block_time_us we can't enable SAGV.
> +			 */
> +			if (latency < dev_priv->sagv_block_time_us) {
> +				can_sagv = false;
> +				break;
> +			}

I find the wording of the bspec ("if any enabled plane will not be able
to enable watermarks for memory latency >= SAGV block time") in this
area somewhat ambiguous.  To me that wording sounds like they want us to
calculate the watermarks one more time, but using the SAGV blocking time
rather than any of the 8 latency values we received from the pcode ---
if the calculated watermark value for that "sagv level" fits within the
DDB allocation then we can enable SAGV, otherwise we can't.

Your approach here somewhat approximates that.  If the highest watermark
level we enabled had a latency higher than the SAGV blocking time, then
we automatically know we also would have had a valid watermark value for
a lower sagv latency.  But if the highest latency we enabled has a lower
latency, we can't say for certain whether the SAGV's blocking time would
have led to valid or invalid watermarks.  If the first watermark level
we failed on also had a lower latency than the SAGV time then we can
conclude that the SAGV can't be enabled.  But if the next level up had a
latency higher than the blocking time (i.e., good < SAGV < bad), we
can't really tell whether SAGV was possible without actually doing the
extra watermark calculation.

But even given the above, the bspec suggestion seems somewhat surprising
to me.  Intuitively it seems like SAGV would be introducing an
additional delay on top of the existing memory fetch latency, not
replacing the latency entirely.  Intuitively the algorithm suggested for
TGL makes sense to me (i.e., add the SAGV's extra delay to the WM0
latency to ensure that regular latency plus an extra SAGV delay doesn't
lead us to run dry), but that's not what the bspec calls for on ICL.
I'm not really sure whether that's truly an intentional behavior change
between platforms or whether the TGL bspec section does just a better
job of explaining what was supposed to be done and clarifying the
language.

Anyway, we should probably trust the bspec for now, so it seems to me
like we should add a "fake" watermark level associated with the SAGV
block time and explicitly calculate that as enabled/disabled anytime we
have a good < SAGV < bad situation.

> +		}
> +		if (can_sagv)
> +			state->crtc_sagv_mask |= BIT(crtc->pipe);
> +	}
> +}
> +
> +bool intel_can_enable_sagv(struct intel_atomic_state *state)
> +{
> +	struct drm_device *dev = state->base.dev;
> +	struct drm_i915_private *dev_priv = to_i915(dev);
> +	enum pipe pipe;
> +
> +	if (INTEL_GEN(dev_priv) >= 12)
> +		tgl_set_sagv_mask(state);
> +	else if (INTEL_GEN(dev_priv) == 11)
> +		icl_set_sagv_mask(state);
> +	else
> +		skl_set_sagv_mask(state);
> +
> +	/*
> +	 * For SAGV we need to account all the pipes,
> +	 * not only the ones which are in state currently.
> +	 */
> +	for_each_pipe(dev_priv, pipe) {
> +		unsigned int active_pipes;
> +		/*
> +		 * Figure out if we are changing active pipes here
> +		 * then after commit dev_priv->active_pipes will
> +		 * anyway be assigned to state->active_pipes.
> +		 */
> +		if (state->active_pipe_changes)
> +			active_pipes = state->active_pipes;
> +		else
> +			active_pipes = dev_priv->active_pipes;
> +
> +		/* Skip if pipe is inactive */
> +		if (!(BIT(pipe) & active_pipes))
> +			continue;
> +
> +		/*
> +		 * Pipe can be active in this state or in dev_priv
> +		 * as we haven't committed thise changes yet(and we shouldn't)
> +		 * - we need to check both.
> +		 */
> +		if (state->active_pipe_changes & BIT(pipe)) {
> +			bool state_sagv_masked = \
> +				(BIT(pipe) & state->crtc_sagv_mask) == 0;
> +			if (state_sagv_masked)
> +				return false;
> +		} else {
> +			bool sagv_masked = \
> +				(BIT(pipe) & dev_priv->crtc_sagv_mask) == 0;

If we're not changing which pipes are active, then we didn't globally
lock everything at the beginning of this atomic transaction; we can have
racing commits against different CRTC's.  So when you look at
dev_priv->crtc_sagv_mask here, the value might change immediately
afterward if a commit on a different CRTC completed in the meantime.

I don't think we want to look at other CRTC's outside our current
transaction here.  We should just figure out whether our own CRTC's are
okay with SAGV or not.  Then in the commit phase we'd need to grab some
kind of SAGV lock, combine our local "SAGV okay" with the global "SAGV
okay" and enable/disable as necessary.

Some kind of reference-counting mechanism might make this simpler...is
there any way we could tie this in with power domains (e.g., adding a
"SAGV off" power domain and power well that we grab during commit when
we've calculated that our own crtcs can't cope with SAGV latency)?


> +			if (sagv_masked)
> +				return false;
> +		}
> +	}
>  	return true;
>  }
>  
> @@ -3925,6 +4045,7 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
>  				 int color_plane);
>  static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
>  				 int level,
> +				 u32 latency,
>  				 const struct skl_wm_params *wp,
>  				 const struct skl_wm_level *result_prev,
>  				 struct skl_wm_level *result /* out */);
> @@ -3947,7 +4068,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
>  	WARN_ON(ret);
>  
>  	for (level = 0; level <= max_level; level++) {
> -		skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
> +		u32 latency = dev_priv->wm.skl_latency[level];
> +
> +		skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm);
> +
>  		if (wm.min_ddb_alloc == U16_MAX)
>  			break;
>  
> @@ -4212,6 +4336,68 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
>  	return total_data_rate;
>  }
>  
> +static int
> +tgl_check_pipe_fits_sagv_wm(struct intel_crtc_state *crtc_state,
> +		      struct skl_ddb_allocation *ddb /* out */)
> +{
> +	struct drm_crtc *crtc = crtc_state->base.crtc;
> +	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
> +	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> +	struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
> +	u16 alloc_size;
> +	u16 total[I915_MAX_PLANES] = {};
> +	u64 total_data_rate;
> +	enum plane_id plane_id;
> +	int num_active;
> +	u64 plane_data_rate[I915_MAX_PLANES] = {};
> +	u32 blocks;
> +
> +	/*
> +	 * No need to check gen here, we call this only for gen12
> +	 */
> +	total_data_rate =
> +		icl_get_total_relative_data_rate(crtc_state,
> +						 plane_data_rate);
> +
> +	skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
> +					   total_data_rate,
> +					   ddb, alloc, &num_active);
> +	alloc_size = skl_ddb_entry_size(alloc);
> +	if (alloc_size == 0)
> +		return -ENOSPC;
> +
> +	/* Allocate fixed number of blocks for cursor. */
> +	total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
> +	alloc_size -= total[PLANE_CURSOR];
> +	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
> +		alloc->end - total[PLANE_CURSOR];
> +	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
> +
> +	/*
> +	 * Do check if we can fit L0 + sagv_block_time and
> +	 * disable SAGV if we can't.
> +	 */
> +	blocks = 0;
> +	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
> +		const struct skl_plane_wm *wm =
> +			&crtc_state->wm.skl.optimal.planes[plane_id];
> +
> +		if (plane_id == PLANE_CURSOR) {
> +			if (WARN_ON(wm->sagv_wm0.min_ddb_alloc >
> +				    total[PLANE_CURSOR])) {
> +				blocks = U32_MAX;
> +				break;
> +			}
> +			continue;
> +		}
> +
> +		blocks += wm->sagv_wm0.min_ddb_alloc;
> +		if (blocks > alloc_size)
> +			return -ENOSPC;
> +	}
> +	return 0;
> +}
> +
>  static int
>  skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
>  		      struct skl_ddb_allocation *ddb /* out */)
> @@ -4641,6 +4827,7 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
>  
>  static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
>  				 int level,
> +				 u32 latency,
>  				 const struct skl_wm_params *wp,
>  				 const struct skl_wm_level *result_prev,
>  				 struct skl_wm_level *result /* out */)

It doesn't look like this latency parameter gets used (it gets masked by
a local latency variable still.


> @@ -4767,20 +4954,45 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
>  static void
>  skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
>  		      const struct skl_wm_params *wm_params,
> -		      struct skl_wm_level *levels)
> +		      struct skl_plane_wm *plane_wm,
> +		      bool yuv)
>  {
>  	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
>  	int level, max_level = ilk_wm_max_level(dev_priv);
> +	/*
> +	 * Check which kind of plane is it and based on that calculate
> +	 * correspondent WM levels.
> +	 */
> +	struct skl_wm_level *levels = yuv ? plane_wm->uv_wm : plane_wm->wm;
>  	struct skl_wm_level *result_prev = &levels[0];
>  
>  	for (level = 0; level <= max_level; level++) {
>  		struct skl_wm_level *result = &levels[level];
> +		u32 latency = dev_priv->wm.skl_latency[level];
>  
> -		skl_compute_plane_wm(crtc_state, level, wm_params,
> -				     result_prev, result);
> +		skl_compute_plane_wm(crtc_state, level, latency,
> +				     wm_params, result_prev, result);
>  
>  		result_prev = result;
>  	}
> +	/*
> +	 * For Gen12 if it is an L0 we need to also
> +	 * consider sagv_block_time when calculating
> +	 * L0 watermark - we will need that when making
> +	 * a decision whether enable SAGV or not.
> +	 * For older gens we agreed to copy L0 value for
> +	 * compatibility.
> +	 */
> +	if ((INTEL_GEN(dev_priv) >= 12)) {
> +		u32 latency = dev_priv->wm.skl_latency[0];
> +
> +		latency += dev_priv->sagv_block_time_us;
> +		skl_compute_plane_wm(crtc_state, 0, latency,
> +		     wm_params, &levels[0],
> +		    &plane_wm->sagv_wm0);
> +	} else
> +		memcpy(&plane_wm->sagv_wm0, &levels[0],
> +			sizeof(struct skl_wm_level));
>  }
>  
>  static u32
> @@ -4873,7 +5085,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
>  	if (ret)
>  		return ret;
>  
> -	skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
> +	skl_compute_wm_levels(crtc_state, &wm_params, wm, false);
>  	skl_compute_transition_wm(crtc_state, &wm_params, wm);
>  
>  	return 0;
> @@ -4895,7 +5107,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
>  	if (ret)
>  		return ret;
>  
> -	skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
> +	skl_compute_wm_levels(crtc_state, &wm_params, wm, true);
>  
>  	return 0;
>  }
> @@ -5167,6 +5379,8 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
>  	return 0;
>  }
>  
> +static void tgl_set_sagv_wm0(struct intel_atomic_state *state);
> +
>  static int
>  skl_compute_ddb(struct intel_atomic_state *state)
>  {
> @@ -5177,6 +5391,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
>  	struct intel_crtc *crtc;
>  	int ret, i;
>  
> +	/* For Gen12+ for SAGV we have a special L0 wm values */
> +	if (INTEL_GEN(dev_priv) >= 12)
> +		if (intel_can_enable_sagv(state))
> +			tgl_set_sagv_wm0(state);
> +
>  	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
>  
>  	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
> @@ -5443,6 +5662,56 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
>  	return 0;
>  }
>  
> +void tgl_set_sagv_wm0(struct intel_atomic_state *state)
> +{
> +	struct intel_crtc *crtc;
> +	struct intel_crtc_state *new_crtc_state;
> +	struct intel_crtc_state *old_crtc_state;
> +	struct drm_device *dev = state->base.dev;
> +	const struct drm_i915_private *dev_priv = to_i915(dev);
> +	int i;
> +
> +	/*
> +	 * If we determined that we can actually enable SAGV, then
> +	 * actually use those levels tgl_check_pipe_fits_sagv_wm
> +	 * has already taken care of checking if L0 + sagv block time
> +	 * fits into ddb.
> +	 */
> +	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
> +				    new_crtc_state, i) {
> +		struct intel_plane *plane;
> +
> +		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
> +			enum plane_id plane_id = plane->id;
> +
> +			struct skl_plane_wm *plane_wm = \
> +			    &new_crtc_state->wm.skl.optimal.planes[plane_id];
> +			struct skl_wm_level *sagv_wm0 = &plane_wm->sagv_wm0;
> +			struct skl_wm_level *l0_wm0 = &plane_wm->wm[0];
> +
> +			memcpy(l0_wm0, sagv_wm0, sizeof(struct skl_wm_level));
> +		}
> +	}
> +}
> +
> +static void tgl_set_sagv_mask(struct intel_atomic_state *state)
> +{
> +	struct intel_crtc *crtc;
> +	struct intel_crtc_state *new_crtc_state;
> +	struct intel_crtc_state *old_crtc_state;
> +	struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
> +	int ret, i;
> +
> +	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
> +					    new_crtc_state, i) {
> +		ret = tgl_check_pipe_fits_sagv_wm(new_crtc_state, ddb);
> +		if (!ret) {
> +			int pipe_bit = BIT(crtc->pipe);
> +			state->crtc_sagv_mask |= pipe_bit;
> +		}
> +	}
> +}
> +
>  static int
>  skl_compute_wm(struct intel_atomic_state *state)
>  {
> @@ -5455,6 +5724,9 @@ skl_compute_wm(struct intel_atomic_state *state)
>  	/* Clear all dirty flags */
>  	results->dirty_pipes = 0;
>  
> +	/* If we exit before check is done */
> +	state->crtc_sagv_mask = 0;
> +
>  	ret = skl_ddb_add_affected_pipes(state);
>  	if (ret)
>  		return ret;
> -- 
> 2.17.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Stanislav Lisovskiy Nov. 12, 2019, 4:04 p.m. UTC | #2
On Mon, 2019-11-11 at 16:15 -0800, Matt Roper wrote:
> On Thu, Nov 07, 2019 at 05:30:36PM +0200, Stanislav Lisovskiy wrote:
> > Currently intel_can_enable_sagv function contains
> > a mix of workarounds for different platforms
> > some of them are not valid for gens >= 11 already,
> > so lets split it into separate functions.
> > 
> > v2:
> >     - Rework watermark calculation algorithm to
> >       attempt to calculate Level 0 watermark
> >       with added sagv block time latency and
> >       check if it fits in DBuf in order to
> >       determine if SAGV can be enabled already
> >       at this stage, just as BSpec 49325 states.
> >       if that fails rollback to usual Level 0
> >       latency and disable SAGV.
> >     - Remove unneeded tabs(James Ausmus)
> > 
> > v3: Rebased the patch
> > 
> > v4: - Added back interlaced check for Gen12 and
> >       added separate function for TGL SAGV check
> >       (thanks to James Ausmus for spotting)
> >     - Removed unneeded gen check
> >     - Extracted Gen12 SAGV decision making code
> >       to a separate function from skl_compute_wm
> > 
> > v5: - Added SAGV global state to dev_priv, because
> >       we need to track all pipes, not only those
> >       in atomic state. Each pipe has now correspondent
> >       bit mask reflecting, whether it can tolerate
> >       SAGV or not(thanks to Ville Syrjala for suggestions).
> >     - Now using active flag instead of enable in crc
> >       usage check.
> > 
> > v6: - Fixed rebase conflicts
> > 
> > Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
> > Cc: Ville Syrjälä <ville.syrjala@intel.com>
> > Cc: James Ausmus <james.ausmus@intel.com>
> > ---
> >  drivers/gpu/drm/i915/display/intel_display.c  |   4 +
> >  .../drm/i915/display/intel_display_types.h    |   9 +
> >  drivers/gpu/drm/i915/i915_drv.h               |   6 +
> >  drivers/gpu/drm/i915/intel_pm.c               | 296
> > +++++++++++++++++-
> >  4 files changed, 303 insertions(+), 12 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/display/intel_display.c
> > b/drivers/gpu/drm/i915/display/intel_display.c
> > index 876fc25968bf..7ea1e7518ab6 100644
> > --- a/drivers/gpu/drm/i915/display/intel_display.c
> > +++ b/drivers/gpu/drm/i915/display/intel_display.c
> > @@ -14855,6 +14855,10 @@ static void
> > intel_atomic_commit_tail(struct intel_atomic_state *state)
> >  		if (dev_priv->display.optimize_watermarks)
> >  			dev_priv->display.optimize_watermarks(state,
> >  							      new_crtc_
> > state);
> > +		if (state->crtc_sagv_mask & BIT(crtc->pipe))
> > +			dev_priv->crtc_sagv_mask |= BIT(crtc->pipe);
> > +		else
> > +			dev_priv->crtc_sagv_mask &= ~BIT(crtc->pipe);
> >  	}
> >  
> >  	for_each_oldnew_intel_crtc_in_state(state, crtc,
> > old_crtc_state, new_crtc_state, i) {
> > diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h
> > b/drivers/gpu/drm/i915/display/intel_display_types.h
> > index fadd9853f966..fb274538af23 100644
> > --- a/drivers/gpu/drm/i915/display/intel_display_types.h
> > +++ b/drivers/gpu/drm/i915/display/intel_display_types.h
> > @@ -490,6 +490,14 @@ struct intel_atomic_state {
> >  	 */
> >  	u8 active_pipe_changes;
> >  
> > +	/*
> > +	 * Contains a mask which reflects whether correspondent pipe
> > +	 * can tolerate SAGV or not, so that we can make a decision
> > +	 * at atomic_commit_tail stage, whether we enable it or not
> > +	 * based on global state in dev_priv.
> > +	 */
> > +	u32 crtc_sagv_mask;
> > +
> >  	u8 active_pipes;
> >  	/* minimum acceptable cdclk for each pipe */
> >  	int min_cdclk[I915_MAX_PIPES];
> > @@ -670,6 +678,7 @@ struct skl_plane_wm {
> >  	struct skl_wm_level wm[8];
> >  	struct skl_wm_level uv_wm[8];
> >  	struct skl_wm_level trans_wm;
> > +	struct skl_wm_level sagv_wm0;
> >  	bool is_planar;
> >  };
> >  
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h
> > b/drivers/gpu/drm/i915/i915_drv.h
> > index 7e0f67babe20..4f4e2e839513 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -1176,6 +1176,12 @@ struct drm_i915_private {
> >  
> >  	u32 sagv_block_time_us;
> >  
> > +	/*
> > +	 * Contains a bit mask, whether correspondent
> > +	 * pipe allows SAGV or not.
> > +	 */
> > +	u32 crtc_sagv_mask;
> > +
> >  	struct {
> >  		/*
> >  		 * Raw watermark latency values:
> > diff --git a/drivers/gpu/drm/i915/intel_pm.c
> > b/drivers/gpu/drm/i915/intel_pm.c
> > index 2d389e437e87..c792dd168742 100644
> > --- a/drivers/gpu/drm/i915/intel_pm.c
> > +++ b/drivers/gpu/drm/i915/intel_pm.c
> > @@ -3740,7 +3740,7 @@ intel_disable_sagv(struct drm_i915_private
> > *dev_priv)
> >  	return 0;
> >  }
> >  
> > -bool intel_can_enable_sagv(struct intel_atomic_state *state)
> > +static void skl_set_sagv_mask(struct intel_atomic_state *state)
> >  {
> >  	struct drm_device *dev = state->base.dev;
> >  	struct drm_i915_private *dev_priv = to_i915(dev);
> > @@ -3750,21 +3750,23 @@ bool intel_can_enable_sagv(struct
> > intel_atomic_state *state)
> >  	enum pipe pipe;
> >  	int level, latency;
> >  
> > +	state->crtc_sagv_mask = 0;
> > +
> >  	if (!intel_has_sagv(dev_priv))
> > -		return false;
> > +		return;
> >  
> >  	/*
> >  	 * If there are no active CRTCs, no additional checks need be
> > performed
> >  	 */
> >  	if (hweight8(state->active_pipes) == 0)
> > -		return true;
> > +		return;
> >  
> >  	/*
> >  	 * SKL+ workaround: bspec recommends we disable SAGV when we
> > have
> >  	 * more then one pipe enabled
> >  	 */
> >  	if (hweight8(state->active_pipes) > 1)
> > -		return false;
> > +		return;
> >  
> >  	/* Since we're now guaranteed to only have one active CRTC...
> > */
> >  	pipe = ffs(state->active_pipes) - 1;
> > @@ -3772,7 +3774,7 @@ bool intel_can_enable_sagv(struct
> > intel_atomic_state *state)
> >  	crtc_state = to_intel_crtc_state(crtc->base.state);
> >  
> >  	if (crtc_state->hw.adjusted_mode.flags &
> > DRM_MODE_FLAG_INTERLACE)
> > -		return false;
> > +		return;
> >  
> >  	for_each_intel_plane_on_crtc(dev, crtc, plane) {
> >  		struct skl_plane_wm *wm =
> > @@ -3800,9 +3802,127 @@ bool intel_can_enable_sagv(struct
> > intel_atomic_state *state)
> >  		 * can't enable SAGV.
> >  		 */
> >  		if (latency < dev_priv->sagv_block_time_us)
> > -			return false;
> > +			return;
> >  	}
> >  
> > +	state->crtc_sagv_mask |= BIT(crtc->pipe);
> > +}
> > +
> > +static void tgl_set_sagv_mask(struct intel_atomic_state *state);
> > +
> > +static void icl_set_sagv_mask(struct intel_atomic_state *state)
> > +{
> > +	struct drm_device *dev = state->base.dev;
> > +	struct drm_i915_private *dev_priv = to_i915(dev);
> > +	struct intel_crtc *crtc;
> > +	struct intel_crtc_state *new_crtc_state;
> > +	int level, latency;
> > +	int i;
> > +	int plane_id;
> > +
> > +	state->crtc_sagv_mask = 0;
> > +
> > +	if (!intel_has_sagv(dev_priv))
> > +		return;
> > +
> > +	/*
> > +	 * If there are no active CRTCs, no additional checks need be
> > performed
> > +	 */
> > +	if (hweight8(state->active_pipes) == 0)
> > +		return;
> > +
> > +	for_each_new_intel_crtc_in_state(state, crtc,
> > +					     new_crtc_state, i) {
> > +		unsigned int flags = crtc->base.state-
> > >adjusted_mode.flags;
> > +		bool can_sagv;
> > +
> > +		if (flags & DRM_MODE_FLAG_INTERLACE)
> > +			continue;
> > +
> > +		if (!new_crtc_state->base.active)
> > +			continue;
> > +
> > +		can_sagv = true;
> > +		for_each_plane_id_on_crtc(crtc, plane_id) {
> > +			struct skl_plane_wm *wm =
> > +				&new_crtc_state-
> > >wm.skl.optimal.planes[plane_id];
> > +
> > +			/* Skip this plane if it's not enabled */
> > +			if (!wm->wm[0].plane_en)
> > +				continue;
> > +
> > +			/* Find the highest enabled wm level for this
> > plane */
> > +			for (level = ilk_wm_max_level(dev_priv);
> > +			     !wm->wm[level].plane_en; --level) {
> > +			}
> > +
> > +			latency = dev_priv->wm.skl_latency[level];
> > +
> > +			/*
> > +			 * If any of the planes on this pipe don't
> > enable
> > +			 * wm levels that incur memory latencies higher
> > than
> > +			 * sagv_block_time_us we can't enable SAGV.
> > +			 */
> > +			if (latency < dev_priv->sagv_block_time_us) {
> > +				can_sagv = false;
> > +				break;
> > +			}
> 
> I find the wording of the bspec ("if any enabled plane will not be
> able
> to enable watermarks for memory latency >= SAGV block time") in this
> area somewhat ambiguous.  To me that wording sounds like they want us
> to
> calculate the watermarks one more time, but using the SAGV blocking
> time
> rather than any of the 8 latency values we received from the pcode --
> -
> if the calculated watermark value for that "sagv level" fits within
> the
> DDB allocation then we can enable SAGV, otherwise we can't.
> 
> Your approach here somewhat approximates that.  If the highest
> watermark
> level we enabled had a latency higher than the SAGV blocking time,
> then
> we automatically know we also would have had a valid watermark value
> for
> a lower sagv latency.  But if the highest latency we enabled has a
> lower
> latency, we can't say for certain whether the SAGV's blocking time
> would
> have led to valid or invalid watermarks.  If the first watermark
> level
> we failed on also had a lower latency than the SAGV time then we can
> conclude that the SAGV can't be enabled.  But if the next level up
> had a
> latency higher than the blocking time (i.e., good < SAGV < bad), we
> can't really tell whether SAGV was possible without actually doing
> the
> extra watermark calculation.
> 
> But even given the above, the bspec suggestion seems somewhat
> surprising
> to me.  Intuitively it seems like SAGV would be introducing an
> additional delay on top of the existing memory fetch latency, not
> replacing the latency entirely.  Intuitively the algorithm suggested
> for
> TGL makes sense to me (i.e., add the SAGV's extra delay to the WM0
> latency to ensure that regular latency plus an extra SAGV delay
> doesn't
> lead us to run dry), but that's not what the bspec calls for on ICL.
> I'm not really sure whether that's truly an intentional behavior
> change
> between platforms or whether the TGL bspec section does just a better
> job of explaining what was supposed to be done and clarifying the
> language.
> 
> Anyway, we should probably trust the bspec for now, so it seems to me
> like we should add a "fake" watermark level associated with the SAGV
> block time and explicitly calculate that as enabled/disabled anytime
> we
> have a good < SAGV < bad situation.

I totally agree, had same questions during implementation, however
still I simply stick to what has been defined in a spec and also done
for previous platforms, as this is basically the same how it's done
now. What I did is mostly just splitting the code to get rid of some
skl specific ugly workaround there.
Regarding "fake" watermark level, as you might noticed now I would be
storing both sagv_wm and "normal" wm level to let intel_can_enable_sagv
to make decision later based on that(mostly decision now is simply
dictated by whether we could fit this into dbuf or not).
Regarding intel_can_enable_sagv implementation I will reply inline
also below.

> 
> > +		}
> > +		if (can_sagv)
> > +			state->crtc_sagv_mask |= BIT(crtc->pipe);
> > +	}
> > +}
> > +
> > +bool intel_can_enable_sagv(struct intel_atomic_state *state)
> > +{
> > +	struct drm_device *dev = state->base.dev;
> > +	struct drm_i915_private *dev_priv = to_i915(dev);
> > +	enum pipe pipe;
> > +
> > +	if (INTEL_GEN(dev_priv) >= 12)
> > +		tgl_set_sagv_mask(state);
> > +	else if (INTEL_GEN(dev_priv) == 11)
> > +		icl_set_sagv_mask(state);
> > +	else
> > +		skl_set_sagv_mask(state);
> > +
> > +	/*
> > +	 * For SAGV we need to account all the pipes,
> > +	 * not only the ones which are in state currently.
> > +	 */
> > +	for_each_pipe(dev_priv, pipe) {
> > +		unsigned int active_pipes;
> > +		/*
> > +		 * Figure out if we are changing active pipes here
> > +		 * then after commit dev_priv->active_pipes will
> > +		 * anyway be assigned to state->active_pipes.
> > +		 */
> > +		if (state->active_pipe_changes)
> > +			active_pipes = state->active_pipes;
> > +		else
> > +			active_pipes = dev_priv->active_pipes;
> > +
> > +		/* Skip if pipe is inactive */
> > +		if (!(BIT(pipe) & active_pipes))
> > +			continue;
> > +
> > +		/*
> > +		 * Pipe can be active in this state or in dev_priv
> > +		 * as we haven't committed thise changes yet(and we
> > shouldn't)
> > +		 * - we need to check both.
> > +		 */
> > +		if (state->active_pipe_changes & BIT(pipe)) {
> > +			bool state_sagv_masked = \
> > +				(BIT(pipe) & state->crtc_sagv_mask) ==
> > 0;
> > +			if (state_sagv_masked)
> > +				return false;
> > +		} else {
> > +			bool sagv_masked = \
> > +				(BIT(pipe) & dev_priv->crtc_sagv_mask)
> > == 0;
> 
> If we're not changing which pipes are active, then we didn't globally
> lock everything at the beginning of this atomic transaction; we can
> have
> racing commits against different CRTC's.  So when you look at
> dev_priv->crtc_sagv_mask here, the value might change immediately
> afterward if a commit on a different CRTC completed in the meantime.
> 
> I don't think we want to look at other CRTC's outside our current
> transaction here.  We should just figure out whether our own CRTC's
> are
> okay with SAGV or not.  Then in the commit phase we'd need to grab
> some
> kind of SAGV lock, combine our local "SAGV okay" with the global
> "SAGV
> okay" and enable/disable as necessary.
> 
> Some kind of reference-counting mechanism might make this
> simpler...is
> there any way we could tie this in with power domains (e.g., adding a
> "SAGV off" power domain and power well that we grab during commit
> when
> we've calculated that our own crtcs can't cope with SAGV latency)?
> 

Yes, however the problem is that we need to know whether we can enable
SAGV or not already on calculation stage(i.e during
intel_atomic_check). Bandwidth checking code calls this as according to
BSpec if SAGV can't be enabled - it should stick to the highest
bandwidth point only. Also during ddb/wm calculation we call this check
to understand which wm level we should fit into dbuf.
And we can't make this decision based on this state only as it might
not contain all the crtcs - however if any of the pipes, even those
which are not in this state can't tolerate SAGV - then we can't enable
it anyway, even if those crtc in this state can.
That is why I have to iterate through all of the crtcs. However it is
good that you pointed out that those might change on the fly, I guess
we need to protect those from being changed by other commits.
We already do something similar by using serializing global state from
Ville, i.e once we add detect that we are changing some dev_priv global
state variables we mark that this commit should be serialized by adding
all the crtcs to the state. 

> 
> > +			if (sagv_masked)
> > +				return false;
> > +		}
> > +	}
> >  	return true;
> >  }
> >  
> > @@ -3925,6 +4045,7 @@ static int skl_compute_wm_params(const struct
> > intel_crtc_state *crtc_state,
> >  				 int color_plane);
> >  static void skl_compute_plane_wm(const struct intel_crtc_state
> > *crtc_state,
> >  				 int level,
> > +				 u32 latency,
> >  				 const struct skl_wm_params *wp,
> >  				 const struct skl_wm_level
> > *result_prev,
> >  				 struct skl_wm_level *result /* out
> > */);
> > @@ -3947,7 +4068,10 @@ skl_cursor_allocation(const struct
> > intel_crtc_state *crtc_state,
> >  	WARN_ON(ret);
> >  
> >  	for (level = 0; level <= max_level; level++) {
> > -		skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
> > +		u32 latency = dev_priv->wm.skl_latency[level];
> > +
> > +		skl_compute_plane_wm(crtc_state, level, latency, &wp,
> > &wm, &wm);
> > +
> >  		if (wm.min_ddb_alloc == U16_MAX)
> >  			break;
> >  
> > @@ -4212,6 +4336,68 @@ icl_get_total_relative_data_rate(struct
> > intel_crtc_state *crtc_state,
> >  	return total_data_rate;
> >  }
> >  
> > +static int
> > +tgl_check_pipe_fits_sagv_wm(struct intel_crtc_state *crtc_state,
> > +		      struct skl_ddb_allocation *ddb /* out */)
> > +{
> > +	struct drm_crtc *crtc = crtc_state->base.crtc;
> > +	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
> > +	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> > +	struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
> > +	u16 alloc_size;
> > +	u16 total[I915_MAX_PLANES] = {};
> > +	u64 total_data_rate;
> > +	enum plane_id plane_id;
> > +	int num_active;
> > +	u64 plane_data_rate[I915_MAX_PLANES] = {};
> > +	u32 blocks;
> > +
> > +	/*
> > +	 * No need to check gen here, we call this only for gen12
> > +	 */
> > +	total_data_rate =
> > +		icl_get_total_relative_data_rate(crtc_state,
> > +						 plane_data_rate);
> > +
> > +	skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
> > +					   total_data_rate,
> > +					   ddb, alloc, &num_active);
> > +	alloc_size = skl_ddb_entry_size(alloc);
> > +	if (alloc_size == 0)
> > +		return -ENOSPC;
> > +
> > +	/* Allocate fixed number of blocks for cursor. */
> > +	total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state,
> > num_active);
> > +	alloc_size -= total[PLANE_CURSOR];
> > +	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
> > +		alloc->end - total[PLANE_CURSOR];
> > +	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
> > +
> > +	/*
> > +	 * Do check if we can fit L0 + sagv_block_time and
> > +	 * disable SAGV if we can't.
> > +	 */
> > +	blocks = 0;
> > +	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
> > +		const struct skl_plane_wm *wm =
> > +			&crtc_state->wm.skl.optimal.planes[plane_id];
> > +
> > +		if (plane_id == PLANE_CURSOR) {
> > +			if (WARN_ON(wm->sagv_wm0.min_ddb_alloc >
> > +				    total[PLANE_CURSOR])) {
> > +				blocks = U32_MAX;
> > +				break;
> > +			}
> > +			continue;
> > +		}
> > +
> > +		blocks += wm->sagv_wm0.min_ddb_alloc;
> > +		if (blocks > alloc_size)
> > +			return -ENOSPC;
> > +	}
> > +	return 0;
> > +}
> > +
> >  static int
> >  skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
> >  		      struct skl_ddb_allocation *ddb /* out */)
> > @@ -4641,6 +4827,7 @@ static bool skl_wm_has_lines(struct
> > drm_i915_private *dev_priv, int level)
> >  
> >  static void skl_compute_plane_wm(const struct intel_crtc_state
> > *crtc_state,
> >  				 int level,
> > +				 u32 latency,
> >  				 const struct skl_wm_params *wp,
> >  				 const struct skl_wm_level
> > *result_prev,
> >  				 struct skl_wm_level *result /* out */)
> 
> It doesn't look like this latency parameter gets used (it gets masked
> by
> a local latency variable still.

Actually no, there are no other latency variables declared in
skl_compute_plane_wm(did I accidentally fix this some how?) as I see.
Anyway thank you for a really precise review.

> 
> 
> > @@ -4767,20 +4954,45 @@ static void skl_compute_plane_wm(const
> > struct intel_crtc_state *crtc_state,
> >  static void
> >  skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
> >  		      const struct skl_wm_params *wm_params,
> > -		      struct skl_wm_level *levels)
> > +		      struct skl_plane_wm *plane_wm,
> > +		      bool yuv)
> >  {
> >  	struct drm_i915_private *dev_priv = to_i915(crtc_state-
> > >uapi.crtc->dev);
> >  	int level, max_level = ilk_wm_max_level(dev_priv);
> > +	/*
> > +	 * Check which kind of plane is it and based on that calculate
> > +	 * correspondent WM levels.
> > +	 */
> > +	struct skl_wm_level *levels = yuv ? plane_wm->uv_wm : plane_wm-
> > >wm;
> >  	struct skl_wm_level *result_prev = &levels[0];
> >  
> >  	for (level = 0; level <= max_level; level++) {
> >  		struct skl_wm_level *result = &levels[level];
> > +		u32 latency = dev_priv->wm.skl_latency[level];
> >  
> > -		skl_compute_plane_wm(crtc_state, level, wm_params,
> > -				     result_prev, result);
> > +		skl_compute_plane_wm(crtc_state, level, latency,
> > +				     wm_params, result_prev, result);
> >  
> >  		result_prev = result;
> >  	}
> > +	/*
> > +	 * For Gen12 if it is an L0 we need to also
> > +	 * consider sagv_block_time when calculating
> > +	 * L0 watermark - we will need that when making
> > +	 * a decision whether enable SAGV or not.
> > +	 * For older gens we agreed to copy L0 value for
> > +	 * compatibility.
> > +	 */
> > +	if ((INTEL_GEN(dev_priv) >= 12)) {
> > +		u32 latency = dev_priv->wm.skl_latency[0];
> > +
> > +		latency += dev_priv->sagv_block_time_us;
> > +		skl_compute_plane_wm(crtc_state, 0, latency,
> > +		     wm_params, &levels[0],
> > +		    &plane_wm->sagv_wm0);
> > +	} else
> > +		memcpy(&plane_wm->sagv_wm0, &levels[0],
> > +			sizeof(struct skl_wm_level));
> >  }
> >  
> >  static u32
> > @@ -4873,7 +5085,7 @@ static int skl_build_plane_wm_single(struct
> > intel_crtc_state *crtc_state,
> >  	if (ret)
> >  		return ret;
> >  
> > -	skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
> > +	skl_compute_wm_levels(crtc_state, &wm_params, wm, false);
> >  	skl_compute_transition_wm(crtc_state, &wm_params, wm);
> >  
> >  	return 0;
> > @@ -4895,7 +5107,7 @@ static int skl_build_plane_wm_uv(struct
> > intel_crtc_state *crtc_state,
> >  	if (ret)
> >  		return ret;
> >  
> > -	skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
> > +	skl_compute_wm_levels(crtc_state, &wm_params, wm, true);
> >  
> >  	return 0;
> >  }
> > @@ -5167,6 +5379,8 @@ skl_ddb_add_affected_planes(const struct
> > intel_crtc_state *old_crtc_state,
> >  	return 0;
> >  }
> >  
> > +static void tgl_set_sagv_wm0(struct intel_atomic_state *state);
> > +
> >  static int
> >  skl_compute_ddb(struct intel_atomic_state *state)
> >  {
> > @@ -5177,6 +5391,11 @@ skl_compute_ddb(struct intel_atomic_state
> > *state)
> >  	struct intel_crtc *crtc;
> >  	int ret, i;
> >  
> > +	/* For Gen12+ for SAGV we have a special L0 wm values */
> > +	if (INTEL_GEN(dev_priv) >= 12)
> > +		if (intel_can_enable_sagv(state))
> > +			tgl_set_sagv_wm0(state);
> > +
> >  	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
> >  
> >  	for_each_oldnew_intel_crtc_in_state(state, crtc,
> > old_crtc_state,
> > @@ -5443,6 +5662,56 @@ static int skl_wm_add_affected_planes(struct
> > intel_atomic_state *state,
> >  	return 0;
> >  }
> >  
> > +void tgl_set_sagv_wm0(struct intel_atomic_state *state)
> > +{
> > +	struct intel_crtc *crtc;
> > +	struct intel_crtc_state *new_crtc_state;
> > +	struct intel_crtc_state *old_crtc_state;
> > +	struct drm_device *dev = state->base.dev;
> > +	const struct drm_i915_private *dev_priv = to_i915(dev);
> > +	int i;
> > +
> > +	/*
> > +	 * If we determined that we can actually enable SAGV, then
> > +	 * actually use those levels tgl_check_pipe_fits_sagv_wm
> > +	 * has already taken care of checking if L0 + sagv block time
> > +	 * fits into ddb.
> > +	 */
> > +	for_each_oldnew_intel_crtc_in_state(state, crtc,
> > old_crtc_state,
> > +				    new_crtc_state, i) {
> > +		struct intel_plane *plane;
> > +
> > +		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc,
> > plane) {
> > +			enum plane_id plane_id = plane->id;
> > +
> > +			struct skl_plane_wm *plane_wm = \
> > +			    &new_crtc_state-
> > >wm.skl.optimal.planes[plane_id];
> > +			struct skl_wm_level *sagv_wm0 = &plane_wm-
> > >sagv_wm0;
> > +			struct skl_wm_level *l0_wm0 = &plane_wm->wm[0];
> > +
> > +			memcpy(l0_wm0, sagv_wm0, sizeof(struct
> > skl_wm_level));
> > +		}
> > +	}
> > +}
> > +
> > +static void tgl_set_sagv_mask(struct intel_atomic_state *state)
> > +{
> > +	struct intel_crtc *crtc;
> > +	struct intel_crtc_state *new_crtc_state;
> > +	struct intel_crtc_state *old_crtc_state;
> > +	struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
> > +	int ret, i;
> > +
> > +	for_each_oldnew_intel_crtc_in_state(state, crtc,
> > old_crtc_state,
> > +					    new_crtc_state, i) {
> > +		ret = tgl_check_pipe_fits_sagv_wm(new_crtc_state, ddb);
> > +		if (!ret) {
> > +			int pipe_bit = BIT(crtc->pipe);
> > +			state->crtc_sagv_mask |= pipe_bit;
> > +		}
> > +	}
> > +}
> > +
> >  static int
> >  skl_compute_wm(struct intel_atomic_state *state)
> >  {
> > @@ -5455,6 +5724,9 @@ skl_compute_wm(struct intel_atomic_state
> > *state)
> >  	/* Clear all dirty flags */
> >  	results->dirty_pipes = 0;
> >  
> > +	/* If we exit before check is done */
> > +	state->crtc_sagv_mask = 0;
> > +
> >  	ret = skl_ddb_add_affected_pipes(state);
> >  	if (ret)
> >  		return ret;
> > -- 
> > 2.17.1
> > 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
>

Patch
diff mbox series

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 876fc25968bf..7ea1e7518ab6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -14855,6 +14855,10 @@  static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 		if (dev_priv->display.optimize_watermarks)
 			dev_priv->display.optimize_watermarks(state,
 							      new_crtc_state);
+		if (state->crtc_sagv_mask & BIT(crtc->pipe))
+			dev_priv->crtc_sagv_mask |= BIT(crtc->pipe);
+		else
+			dev_priv->crtc_sagv_mask &= ~BIT(crtc->pipe);
 	}
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index fadd9853f966..fb274538af23 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -490,6 +490,14 @@  struct intel_atomic_state {
 	 */
 	u8 active_pipe_changes;
 
+	/*
+	 * Contains a mask which reflects whether correspondent pipe
+	 * can tolerate SAGV or not, so that we can make a decision
+	 * at atomic_commit_tail stage, whether we enable it or not
+	 * based on global state in dev_priv.
+	 */
+	u32 crtc_sagv_mask;
+
 	u8 active_pipes;
 	/* minimum acceptable cdclk for each pipe */
 	int min_cdclk[I915_MAX_PIPES];
@@ -670,6 +678,7 @@  struct skl_plane_wm {
 	struct skl_wm_level wm[8];
 	struct skl_wm_level uv_wm[8];
 	struct skl_wm_level trans_wm;
+	struct skl_wm_level sagv_wm0;
 	bool is_planar;
 };
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7e0f67babe20..4f4e2e839513 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1176,6 +1176,12 @@  struct drm_i915_private {
 
 	u32 sagv_block_time_us;
 
+	/*
+	 * Contains a bit mask, whether correspondent
+	 * pipe allows SAGV or not.
+	 */
+	u32 crtc_sagv_mask;
+
 	struct {
 		/*
 		 * Raw watermark latency values:
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2d389e437e87..c792dd168742 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3740,7 +3740,7 @@  intel_disable_sagv(struct drm_i915_private *dev_priv)
 	return 0;
 }
 
-bool intel_can_enable_sagv(struct intel_atomic_state *state)
+static void skl_set_sagv_mask(struct intel_atomic_state *state)
 {
 	struct drm_device *dev = state->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3750,21 +3750,23 @@  bool intel_can_enable_sagv(struct intel_atomic_state *state)
 	enum pipe pipe;
 	int level, latency;
 
+	state->crtc_sagv_mask = 0;
+
 	if (!intel_has_sagv(dev_priv))
-		return false;
+		return;
 
 	/*
 	 * If there are no active CRTCs, no additional checks need be performed
 	 */
 	if (hweight8(state->active_pipes) == 0)
-		return true;
+		return;
 
 	/*
 	 * SKL+ workaround: bspec recommends we disable SAGV when we have
 	 * more then one pipe enabled
 	 */
 	if (hweight8(state->active_pipes) > 1)
-		return false;
+		return;
 
 	/* Since we're now guaranteed to only have one active CRTC... */
 	pipe = ffs(state->active_pipes) - 1;
@@ -3772,7 +3774,7 @@  bool intel_can_enable_sagv(struct intel_atomic_state *state)
 	crtc_state = to_intel_crtc_state(crtc->base.state);
 
 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
-		return false;
+		return;
 
 	for_each_intel_plane_on_crtc(dev, crtc, plane) {
 		struct skl_plane_wm *wm =
@@ -3800,9 +3802,127 @@  bool intel_can_enable_sagv(struct intel_atomic_state *state)
 		 * can't enable SAGV.
 		 */
 		if (latency < dev_priv->sagv_block_time_us)
-			return false;
+			return;
 	}
 
+	state->crtc_sagv_mask |= BIT(crtc->pipe);
+}
+
+static void tgl_set_sagv_mask(struct intel_atomic_state *state);
+
+static void icl_set_sagv_mask(struct intel_atomic_state *state)
+{
+	struct drm_device *dev = state->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_crtc *crtc;
+	struct intel_crtc_state *new_crtc_state;
+	int level, latency;
+	int i;
+	int plane_id;
+
+	state->crtc_sagv_mask = 0;
+
+	if (!intel_has_sagv(dev_priv))
+		return;
+
+	/*
+	 * If there are no active CRTCs, no additional checks need be performed
+	 */
+	if (hweight8(state->active_pipes) == 0)
+		return;
+
+	for_each_new_intel_crtc_in_state(state, crtc,
+					     new_crtc_state, i) {
+		unsigned int flags = crtc->base.state->adjusted_mode.flags;
+		bool can_sagv;
+
+		if (flags & DRM_MODE_FLAG_INTERLACE)
+			continue;
+
+		if (!new_crtc_state->base.active)
+			continue;
+
+		can_sagv = true;
+		for_each_plane_id_on_crtc(crtc, plane_id) {
+			struct skl_plane_wm *wm =
+				&new_crtc_state->wm.skl.optimal.planes[plane_id];
+
+			/* Skip this plane if it's not enabled */
+			if (!wm->wm[0].plane_en)
+				continue;
+
+			/* Find the highest enabled wm level for this plane */
+			for (level = ilk_wm_max_level(dev_priv);
+			     !wm->wm[level].plane_en; --level) {
+			}
+
+			latency = dev_priv->wm.skl_latency[level];
+
+			/*
+			 * If any of the planes on this pipe don't enable
+			 * wm levels that incur memory latencies higher than
+			 * sagv_block_time_us we can't enable SAGV.
+			 */
+			if (latency < dev_priv->sagv_block_time_us) {
+				can_sagv = false;
+				break;
+			}
+		}
+		if (can_sagv)
+			state->crtc_sagv_mask |= BIT(crtc->pipe);
+	}
+}
+
+bool intel_can_enable_sagv(struct intel_atomic_state *state)
+{
+	struct drm_device *dev = state->base.dev;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	enum pipe pipe;
+
+	if (INTEL_GEN(dev_priv) >= 12)
+		tgl_set_sagv_mask(state);
+	else if (INTEL_GEN(dev_priv) == 11)
+		icl_set_sagv_mask(state);
+	else
+		skl_set_sagv_mask(state);
+
+	/*
+	 * For SAGV we need to account all the pipes,
+	 * not only the ones which are in state currently.
+	 */
+	for_each_pipe(dev_priv, pipe) {
+		unsigned int active_pipes;
+		/*
+		 * Figure out if we are changing active pipes here
+		 * then after commit dev_priv->active_pipes will
+		 * anyway be assigned to state->active_pipes.
+		 */
+		if (state->active_pipe_changes)
+			active_pipes = state->active_pipes;
+		else
+			active_pipes = dev_priv->active_pipes;
+
+		/* Skip if pipe is inactive */
+		if (!(BIT(pipe) & active_pipes))
+			continue;
+
+		/*
+		 * Pipe can be active in this state or in dev_priv
+		 * as we haven't committed thise changes yet(and we shouldn't)
+		 * - we need to check both.
+		 */
+		if (state->active_pipe_changes & BIT(pipe)) {
+			bool state_sagv_masked = \
+				(BIT(pipe) & state->crtc_sagv_mask) == 0;
+			if (state_sagv_masked)
+				return false;
+		} else {
+			bool sagv_masked = \
+				(BIT(pipe) & dev_priv->crtc_sagv_mask) == 0;
+			if (sagv_masked)
+				return false;
+		}
+	}
 	return true;
 }
 
@@ -3925,6 +4045,7 @@  static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
 				 int color_plane);
 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
 				 int level,
+				 u32 latency,
 				 const struct skl_wm_params *wp,
 				 const struct skl_wm_level *result_prev,
 				 struct skl_wm_level *result /* out */);
@@ -3947,7 +4068,10 @@  skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
 	WARN_ON(ret);
 
 	for (level = 0; level <= max_level; level++) {
-		skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
+		u32 latency = dev_priv->wm.skl_latency[level];
+
+		skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm);
+
 		if (wm.min_ddb_alloc == U16_MAX)
 			break;
 
@@ -4212,6 +4336,68 @@  icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
 	return total_data_rate;
 }
 
+static int
+tgl_check_pipe_fits_sagv_wm(struct intel_crtc_state *crtc_state,
+		      struct skl_ddb_allocation *ddb /* out */)
+{
+	struct drm_crtc *crtc = crtc_state->base.crtc;
+	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
+	u16 alloc_size;
+	u16 total[I915_MAX_PLANES] = {};
+	u64 total_data_rate;
+	enum plane_id plane_id;
+	int num_active;
+	u64 plane_data_rate[I915_MAX_PLANES] = {};
+	u32 blocks;
+
+	/*
+	 * No need to check gen here, we call this only for gen12
+	 */
+	total_data_rate =
+		icl_get_total_relative_data_rate(crtc_state,
+						 plane_data_rate);
+
+	skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state,
+					   total_data_rate,
+					   ddb, alloc, &num_active);
+	alloc_size = skl_ddb_entry_size(alloc);
+	if (alloc_size == 0)
+		return -ENOSPC;
+
+	/* Allocate fixed number of blocks for cursor. */
+	total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active);
+	alloc_size -= total[PLANE_CURSOR];
+	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
+		alloc->end - total[PLANE_CURSOR];
+	crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+
+	/*
+	 * Do check if we can fit L0 + sagv_block_time and
+	 * disable SAGV if we can't.
+	 */
+	blocks = 0;
+	for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+		const struct skl_plane_wm *wm =
+			&crtc_state->wm.skl.optimal.planes[plane_id];
+
+		if (plane_id == PLANE_CURSOR) {
+			if (WARN_ON(wm->sagv_wm0.min_ddb_alloc >
+				    total[PLANE_CURSOR])) {
+				blocks = U32_MAX;
+				break;
+			}
+			continue;
+		}
+
+		blocks += wm->sagv_wm0.min_ddb_alloc;
+		if (blocks > alloc_size)
+			return -ENOSPC;
+	}
+	return 0;
+}
+
 static int
 skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
 		      struct skl_ddb_allocation *ddb /* out */)
@@ -4641,6 +4827,7 @@  static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
 
 static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
 				 int level,
+				 u32 latency,
 				 const struct skl_wm_params *wp,
 				 const struct skl_wm_level *result_prev,
 				 struct skl_wm_level *result /* out */)
@@ -4767,20 +4954,45 @@  static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
 static void
 skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
 		      const struct skl_wm_params *wm_params,
-		      struct skl_wm_level *levels)
+		      struct skl_plane_wm *plane_wm,
+		      bool yuv)
 {
 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
 	int level, max_level = ilk_wm_max_level(dev_priv);
+	/*
+	 * Check which kind of plane is it and based on that calculate
+	 * correspondent WM levels.
+	 */
+	struct skl_wm_level *levels = yuv ? plane_wm->uv_wm : plane_wm->wm;
 	struct skl_wm_level *result_prev = &levels[0];
 
 	for (level = 0; level <= max_level; level++) {
 		struct skl_wm_level *result = &levels[level];
+		u32 latency = dev_priv->wm.skl_latency[level];
 
-		skl_compute_plane_wm(crtc_state, level, wm_params,
-				     result_prev, result);
+		skl_compute_plane_wm(crtc_state, level, latency,
+				     wm_params, result_prev, result);
 
 		result_prev = result;
 	}
+	/*
+	 * For Gen12 if it is an L0 we need to also
+	 * consider sagv_block_time when calculating
+	 * L0 watermark - we will need that when making
+	 * a decision whether enable SAGV or not.
+	 * For older gens we agreed to copy L0 value for
+	 * compatibility.
+	 */
+	if ((INTEL_GEN(dev_priv) >= 12)) {
+		u32 latency = dev_priv->wm.skl_latency[0];
+
+		latency += dev_priv->sagv_block_time_us;
+		skl_compute_plane_wm(crtc_state, 0, latency,
+		     wm_params, &levels[0],
+		    &plane_wm->sagv_wm0);
+	} else
+		memcpy(&plane_wm->sagv_wm0, &levels[0],
+			sizeof(struct skl_wm_level));
 }
 
 static u32
@@ -4873,7 +5085,7 @@  static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
-	skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
+	skl_compute_wm_levels(crtc_state, &wm_params, wm, false);
 	skl_compute_transition_wm(crtc_state, &wm_params, wm);
 
 	return 0;
@@ -4895,7 +5107,7 @@  static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
 	if (ret)
 		return ret;
 
-	skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
+	skl_compute_wm_levels(crtc_state, &wm_params, wm, true);
 
 	return 0;
 }
@@ -5167,6 +5379,8 @@  skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
 	return 0;
 }
 
+static void tgl_set_sagv_wm0(struct intel_atomic_state *state);
+
 static int
 skl_compute_ddb(struct intel_atomic_state *state)
 {
@@ -5177,6 +5391,11 @@  skl_compute_ddb(struct intel_atomic_state *state)
 	struct intel_crtc *crtc;
 	int ret, i;
 
+	/* For Gen12+ for SAGV we have a special L0 wm values */
+	if (INTEL_GEN(dev_priv) >= 12)
+		if (intel_can_enable_sagv(state))
+			tgl_set_sagv_wm0(state);
+
 	memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
 
 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
@@ -5443,6 +5662,56 @@  static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
 	return 0;
 }
 
+void tgl_set_sagv_wm0(struct intel_atomic_state *state)
+{
+	struct intel_crtc *crtc;
+	struct intel_crtc_state *new_crtc_state;
+	struct intel_crtc_state *old_crtc_state;
+	struct drm_device *dev = state->base.dev;
+	const struct drm_i915_private *dev_priv = to_i915(dev);
+	int i;
+
+	/*
+	 * If we determined that we can actually enable SAGV, then
+	 * actually use those levels tgl_check_pipe_fits_sagv_wm
+	 * has already taken care of checking if L0 + sagv block time
+	 * fits into ddb.
+	 */
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+				    new_crtc_state, i) {
+		struct intel_plane *plane;
+
+		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+			enum plane_id plane_id = plane->id;
+
+			struct skl_plane_wm *plane_wm = \
+			    &new_crtc_state->wm.skl.optimal.planes[plane_id];
+			struct skl_wm_level *sagv_wm0 = &plane_wm->sagv_wm0;
+			struct skl_wm_level *l0_wm0 = &plane_wm->wm[0];
+
+			memcpy(l0_wm0, sagv_wm0, sizeof(struct skl_wm_level));
+		}
+	}
+}
+
+static void tgl_set_sagv_mask(struct intel_atomic_state *state)
+{
+	struct intel_crtc *crtc;
+	struct intel_crtc_state *new_crtc_state;
+	struct intel_crtc_state *old_crtc_state;
+	struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+	int ret, i;
+
+	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+					    new_crtc_state, i) {
+		ret = tgl_check_pipe_fits_sagv_wm(new_crtc_state, ddb);
+		if (!ret) {
+			int pipe_bit = BIT(crtc->pipe);
+			state->crtc_sagv_mask |= pipe_bit;
+		}
+	}
+}
+
 static int
 skl_compute_wm(struct intel_atomic_state *state)
 {
@@ -5455,6 +5724,9 @@  skl_compute_wm(struct intel_atomic_state *state)
 	/* Clear all dirty flags */
 	results->dirty_pipes = 0;
 
+	/* If we exit before check is done */
+	state->crtc_sagv_mask = 0;
+
 	ret = skl_ddb_add_affected_pipes(state);
 	if (ret)
 		return ret;