Message ID | 20201027203955.28032-4-ville.syrjala@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | drm/i915: Remainder of dbuf state stuff | expand |
On Tue, Oct 27, 2020 at 10:39:50PM +0200, Ville Syrjala wrote: > From: Ville Syrjälä <ville.syrjala@linux.intel.com> > > Put the code into a function with a descriptive name. Also relocate > the code a bit help future work. > > Cc: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com> > Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Reviewed-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com> > --- > drivers/gpu/drm/i915/intel_pm.c | 36 +++++++++++++++++++-------------- > drivers/gpu/drm/i915/intel_pm.h | 1 - > 2 files changed, 21 insertions(+), 16 deletions(-) > > diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c > index 260d3cf24db3..8083785237ba 100644 > --- a/drivers/gpu/drm/i915/intel_pm.c > +++ b/drivers/gpu/drm/i915/intel_pm.c > @@ -4024,6 +4024,24 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) > return 0; > } > > +static int intel_dbuf_size(struct drm_i915_private *dev_priv) > +{ > + int ddb_size = INTEL_INFO(dev_priv)->ddb_size; > + > + drm_WARN_ON(&dev_priv->drm, ddb_size == 0); > + > + if (INTEL_GEN(dev_priv) < 11) > + return ddb_size - 4; /* 4 blocks for bypass path allocation */ > + > + return ddb_size; > +} > + > +static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv) > +{ > + return intel_dbuf_size(dev_priv) / > + INTEL_INFO(dev_priv)->num_supported_dbuf_slices; > +} > + > /* > * Calculate initial DBuf slice offset, based on slice size > * and mask(i.e if slice size is 1024 and second slice is enabled > @@ -4045,22 +4063,11 @@ icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask, > return offset; > } > > -u16 intel_get_ddb_size(struct drm_i915_private *dev_priv) > -{ > - u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size; > - drm_WARN_ON(&dev_priv->drm, ddb_size == 0); > - > - if (INTEL_GEN(dev_priv) < 11) > - return ddb_size - 4; /* 4 blocks for bypass path allocation */ > - > - return ddb_size; > -} > - > u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv, > const struct skl_ddb_entry *entry) > { > u32 slice_mask = 0; > - u16 ddb_size = intel_get_ddb_size(dev_priv); > + u16 ddb_size = intel_dbuf_size(dev_priv); > u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; > u16 slice_size = ddb_size / num_supported_slices; > u16 start_slice; > @@ -4142,9 +4149,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, > return 0; > } > > - ddb_size = intel_get_ddb_size(dev_priv); > - > - slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices; > + ddb_size = intel_dbuf_size(dev_priv); > + slice_size = intel_dbuf_slice_size(dev_priv); > > /* > * If the state doesn't change the active CRTC's or there is no > diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h > index eab83e251dd5..00910bc01407 100644 > --- a/drivers/gpu/drm/i915/intel_pm.h > +++ b/drivers/gpu/drm/i915/intel_pm.h > @@ -40,7 +40,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, > struct skl_ddb_entry *ddb_y, > struct skl_ddb_entry *ddb_uv); > void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv); > -u16 intel_get_ddb_size(struct drm_i915_private *dev_priv); > u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv, > const struct skl_ddb_entry *entry); > void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, > -- > 2.26.2 >
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 260d3cf24db3..8083785237ba 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4024,6 +4024,24 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return 0; } +static int intel_dbuf_size(struct drm_i915_private *dev_priv) +{ + int ddb_size = INTEL_INFO(dev_priv)->ddb_size; + + drm_WARN_ON(&dev_priv->drm, ddb_size == 0); + + if (INTEL_GEN(dev_priv) < 11) + return ddb_size - 4; /* 4 blocks for bypass path allocation */ + + return ddb_size; +} + +static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv) +{ + return intel_dbuf_size(dev_priv) / + INTEL_INFO(dev_priv)->num_supported_dbuf_slices; +} + /* * Calculate initial DBuf slice offset, based on slice size * and mask(i.e if slice size is 1024 and second slice is enabled @@ -4045,22 +4063,11 @@ icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask, return offset; } -u16 intel_get_ddb_size(struct drm_i915_private *dev_priv) -{ - u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size; - drm_WARN_ON(&dev_priv->drm, ddb_size == 0); - - if (INTEL_GEN(dev_priv) < 11) - return ddb_size - 4; /* 4 blocks for bypass path allocation */ - - return ddb_size; -} - u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv, const struct skl_ddb_entry *entry) { u32 slice_mask = 0; - u16 ddb_size = intel_get_ddb_size(dev_priv); + u16 ddb_size = intel_dbuf_size(dev_priv); u16 num_supported_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; u16 slice_size = ddb_size / num_supported_slices; u16 start_slice; @@ -4142,9 +4149,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, return 0; } - ddb_size = intel_get_ddb_size(dev_priv); - - slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices; + ddb_size = intel_dbuf_size(dev_priv); + slice_size = intel_dbuf_slice_size(dev_priv); /* * If the state doesn't change the active CRTC's or there is no diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index eab83e251dd5..00910bc01407 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -40,7 +40,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, struct skl_ddb_entry *ddb_y, struct skl_ddb_entry *ddb_uv); void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv); -u16 intel_get_ddb_size(struct drm_i915_private *dev_priv); u32 skl_ddb_dbuf_slice_mask(struct drm_i915_private *dev_priv, const struct skl_ddb_entry *entry); void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,