Message ID | 20241010224311.50133-5-matthew.s.atwood@intel.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Add xe3lpd edp enabling | expand |
On Thu, 2024-10-10 at 15:43 -0700, Matt Atwood wrote: > From: Matt Roper <matthew.d.roper@intel.com> > > There are some minor changes to pmdemand handling on Xe3: > - Active scalers are no longer tracked. We can simply skip the readout > and programming of this field. > - Active dbuf slices are no longer tracked. We should skip the readout > and programming of this field and also make sure that it stays 0 in > our software bookkeeping so that we won't erroneously return true > from intel_pmdemand_needs_update() due to mismatches. > - Even though there aren't enough pipes to utilize them, the size of > the 'active pipes' field has expanded to four bits, taking over the > register bits previously used for dbuf slices. Since the lower bits > of the mask have moved, we need to update our reads/writes to handle > this properly. > > v2: active pipes is no longer always max 3, add in the ability to go to > 4 for PTL. > > Bspec: 68883, 69125 > Signed-off-by: Matt Roper <matthew.d.roper@intel.com> > Signed-off-by: Matt Atwood <matthew.s.atwood@intel.com> > --- > drivers/gpu/drm/i915/display/intel_pmdemand.c | 65 +++++++++++++------ > drivers/gpu/drm/i915/display/intel_pmdemand.h | 4 +- > drivers/gpu/drm/i915/i915_reg.h | 1 + > 3 files changed, 48 insertions(+), 22 deletions(-) > > diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c > b/drivers/gpu/drm/i915/display/intel_pmdemand.c > index ceaf9e3147da..3a820dd53b13 100644 > --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c > +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c > @@ -258,6 +258,7 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) > > static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) > { > + struct drm_i915_private *i915 = to_i915(state->base.dev); > const struct intel_bw_state *new_bw_state, *old_bw_state; > const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; > const struct intel_crtc_state *new_crtc_state, *old_crtc_state; > @@ -274,12 +275,16 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) > new_dbuf_state = intel_atomic_get_new_dbuf_state(state); > old_dbuf_state = intel_atomic_get_old_dbuf_state(state); > if (new_dbuf_state && > - (new_dbuf_state->active_pipes != > - old_dbuf_state->active_pipes || > - new_dbuf_state->enabled_slices != > - old_dbuf_state->enabled_slices)) > + new_dbuf_state->active_pipes != old_dbuf_state->active_pipes) > return true; > > + if (DISPLAY_VER(i915) < 30) { > + if (new_dbuf_state && > + new_dbuf_state->enabled_slices != > + old_dbuf_state->enabled_slices) > + return true; > + } > + > new_cdclk_state = intel_atomic_get_new_cdclk_state(state); > old_cdclk_state = intel_atomic_get_old_cdclk_state(state); > if (new_cdclk_state && > @@ -304,6 +309,7 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) > const struct intel_cdclk_state *new_cdclk_state; > const struct intel_dbuf_state *new_dbuf_state; > struct intel_pmdemand_state *new_pmdemand_state; > + int max_active_pipes = 3; > > if (DISPLAY_VER(i915) < 14) > return 0; > @@ -327,10 +333,13 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) > if (IS_ERR(new_dbuf_state)) > return PTR_ERR(new_dbuf_state); > > + if (DISPLAY_VER(i915) >= 30) max_active_pipes = 4; Trailing statements should be on the next line With that fixed, Reviewed-by: Vinod Govindapillai <vinod.govindapillai@intel.com> > new_pmdemand_state->params.active_pipes = > - min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); > - new_pmdemand_state->params.active_dbufs = > - min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); > + min_t(u8, hweight8(new_dbuf_state->active_pipes), max_active_pipes); > + > + if (DISPLAY_VER(i915) < 30) > + new_pmdemand_state->params.active_dbufs = > + min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); > > new_cdclk_state = intel_atomic_get_cdclk_state(state); > if (IS_ERR(new_cdclk_state)) > @@ -395,27 +404,32 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, > > reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); > > - /* Set 1*/ > pmdemand_state->params.qclk_gv_bw = > REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); > pmdemand_state->params.voltage_index = > REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); > pmdemand_state->params.qclk_gv_index = > REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); > - pmdemand_state->params.active_pipes = > - REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); > - pmdemand_state->params.active_dbufs = > - REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); > pmdemand_state->params.active_phys = > REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); > > - /* Set 2*/ > pmdemand_state->params.cdclk_freq_mhz = > REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); > pmdemand_state->params.ddiclk_max = > REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); > - pmdemand_state->params.scalers = > - REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); > + > + if (DISPLAY_VER(i915) >= 30) { > + pmdemand_state->params.active_pipes = > + REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1); > + } else { > + pmdemand_state->params.active_pipes = > + REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); > + pmdemand_state->params.active_dbufs = > + REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); > + > + pmdemand_state->params.scalers = > + REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); > + } > > unlock: > mutex_unlock(&i915->display.pmdemand.lock); > @@ -442,6 +456,10 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, > { > u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); > > + /* PM Demand only tracks active dbufs on pre-Xe3 platforms */ > + if (DISPLAY_VER(i915) >= 30) > + return; > + > mutex_lock(&i915->display.pmdemand.lock); > if (drm_WARN_ON(&i915->drm, > !intel_pmdemand_check_prev_transaction(i915))) > @@ -460,7 +478,8 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, > } > > static void > -intel_pmdemand_update_params(const struct intel_pmdemand_state *new, > +intel_pmdemand_update_params(struct drm_i915_private *i915, > + const struct intel_pmdemand_state *new, > const struct intel_pmdemand_state *old, > u32 *reg1, u32 *reg2, bool serialized) > { > @@ -495,16 +514,22 @@ intel_pmdemand_update_params(const struct intel_pmdemand_state *new, > update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); > update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); > update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); > - update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); > - update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); > update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); > > /* Set 2*/ > update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); > update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); > - update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); > update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); > > + if (DISPLAY_VER(i915) >= 30) { > + update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK); > + } else { > + update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); > + update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); > + > + update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); > + } > + > #undef update_reg > } > > @@ -529,7 +554,7 @@ intel_pmdemand_program_params(struct drm_i915_private *i915, > reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); > mod_reg2 = reg2; > > - intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2, > + intel_pmdemand_update_params(i915, new, old, &mod_reg1, &mod_reg2, > serialized); > > if (reg1 != mod_reg1) { > diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h > b/drivers/gpu/drm/i915/display/intel_pmdemand.h > index 128fd61f8f14..a1c49efdc493 100644 > --- a/drivers/gpu/drm/i915/display/intel_pmdemand.h > +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h > @@ -20,14 +20,14 @@ struct pmdemand_params { > u8 voltage_index; > u8 qclk_gv_index; > u8 active_pipes; > - u8 active_dbufs; > + u8 active_dbufs; /* pre-Xe3 only */ > /* Total number of non type C active phys from active_phys_mask */ > u8 active_phys; > u8 plls; > u16 cdclk_freq_mhz; > /* max from ddi_clocks[] */ > u16 ddiclk_max; > - u8 scalers; > + u8 scalers; /* pre-Xe3 only */ > }; > > struct intel_pmdemand_state { > diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h > index 818142f5a10c..d30459f8d1cb 100644 > --- a/drivers/gpu/drm/i915/i915_reg.h > +++ b/drivers/gpu/drm/i915/i915_reg.h > @@ -2705,6 +2705,7 @@ > #define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16) > #define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12) > #define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8) > +#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4) > #define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6) > #define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4) > #define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
Quoting Matt Atwood (2024-10-10 19:43:05-03:00) >From: Matt Roper <matthew.d.roper@intel.com> > >There are some minor changes to pmdemand handling on Xe3: > - Active scalers are no longer tracked. We can simply skip the readout > and programming of this field. > - Active dbuf slices are no longer tracked. We should skip the readout > and programming of this field and also make sure that it stays 0 in > our software bookkeeping so that we won't erroneously return true > from intel_pmdemand_needs_update() due to mismatches. > - Even though there aren't enough pipes to utilize them, the size of > the 'active pipes' field has expanded to four bits, taking over the > register bits previously used for dbuf slices. Since the lower bits > of the mask have moved, we need to update our reads/writes to handle > this properly. > >v2: active pipes is no longer always max 3, add in the ability to go to >4 for PTL. > >Bspec: 68883, 69125 >Signed-off-by: Matt Roper <matthew.d.roper@intel.com> >Signed-off-by: Matt Atwood <matthew.s.atwood@intel.com> >--- > drivers/gpu/drm/i915/display/intel_pmdemand.c | 65 +++++++++++++------ > drivers/gpu/drm/i915/display/intel_pmdemand.h | 4 +- > drivers/gpu/drm/i915/i915_reg.h | 1 + > 3 files changed, 48 insertions(+), 22 deletions(-) > >diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c >index ceaf9e3147da..3a820dd53b13 100644 >--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c >+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c >@@ -258,6 +258,7 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) > > static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) > { >+ struct drm_i915_private *i915 = to_i915(state->base.dev); > const struct intel_bw_state *new_bw_state, *old_bw_state; > const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; > const struct intel_crtc_state *new_crtc_state, *old_crtc_state; >@@ -274,12 +275,16 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) > new_dbuf_state = intel_atomic_get_new_dbuf_state(state); > old_dbuf_state = intel_atomic_get_old_dbuf_state(state); > if (new_dbuf_state && >- (new_dbuf_state->active_pipes != >- old_dbuf_state->active_pipes || >- new_dbuf_state->enabled_slices != >- old_dbuf_state->enabled_slices)) >+ new_dbuf_state->active_pipes != old_dbuf_state->active_pipes) > return true; > >+ if (DISPLAY_VER(i915) < 30) { >+ if (new_dbuf_state && >+ new_dbuf_state->enabled_slices != >+ old_dbuf_state->enabled_slices) >+ return true; >+ } >+ > new_cdclk_state = intel_atomic_get_new_cdclk_state(state); > old_cdclk_state = intel_atomic_get_old_cdclk_state(state); > if (new_cdclk_state && >@@ -304,6 +309,7 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) > const struct intel_cdclk_state *new_cdclk_state; > const struct intel_dbuf_state *new_dbuf_state; > struct intel_pmdemand_state *new_pmdemand_state; >+ int max_active_pipes = 3; > > if (DISPLAY_VER(i915) < 14) > return 0; >@@ -327,10 +333,13 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) > if (IS_ERR(new_dbuf_state)) > return PTR_ERR(new_dbuf_state); > >+ if (DISPLAY_VER(i915) >= 30) max_active_pipes = 4; We should be using INTEL_NUM_PIPES(i915) to get the correct runtime value (to account for the case of any pipe fused-off). -- Gustavo Sousa > new_pmdemand_state->params.active_pipes = >- min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); >- new_pmdemand_state->params.active_dbufs = >- min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); >+ min_t(u8, hweight8(new_dbuf_state->active_pipes), max_active_pipes); >+ >+ if (DISPLAY_VER(i915) < 30) >+ new_pmdemand_state->params.active_dbufs = >+ min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); > > new_cdclk_state = intel_atomic_get_cdclk_state(state); > if (IS_ERR(new_cdclk_state)) >@@ -395,27 +404,32 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, > > reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); > >- /* Set 1*/ > pmdemand_state->params.qclk_gv_bw = > REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); > pmdemand_state->params.voltage_index = > REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); > pmdemand_state->params.qclk_gv_index = > REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); >- pmdemand_state->params.active_pipes = >- REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); >- pmdemand_state->params.active_dbufs = >- REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); > pmdemand_state->params.active_phys = > REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); > >- /* Set 2*/ > pmdemand_state->params.cdclk_freq_mhz = > REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); > pmdemand_state->params.ddiclk_max = > REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); >- pmdemand_state->params.scalers = >- REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); >+ >+ if (DISPLAY_VER(i915) >= 30) { >+ pmdemand_state->params.active_pipes = >+ REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1); >+ } else { >+ pmdemand_state->params.active_pipes = >+ REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); >+ pmdemand_state->params.active_dbufs = >+ REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); >+ >+ pmdemand_state->params.scalers = >+ REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); >+ } > > unlock: > mutex_unlock(&i915->display.pmdemand.lock); >@@ -442,6 +456,10 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, > { > u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); > >+ /* PM Demand only tracks active dbufs on pre-Xe3 platforms */ >+ if (DISPLAY_VER(i915) >= 30) >+ return; >+ > mutex_lock(&i915->display.pmdemand.lock); > if (drm_WARN_ON(&i915->drm, > !intel_pmdemand_check_prev_transaction(i915))) >@@ -460,7 +478,8 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, > } > > static void >-intel_pmdemand_update_params(const struct intel_pmdemand_state *new, >+intel_pmdemand_update_params(struct drm_i915_private *i915, >+ const struct intel_pmdemand_state *new, > const struct intel_pmdemand_state *old, > u32 *reg1, u32 *reg2, bool serialized) > { >@@ -495,16 +514,22 @@ intel_pmdemand_update_params(const struct intel_pmdemand_state *new, > update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); > update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); > update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); >- update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); >- update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); > update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); > > /* Set 2*/ > update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); > update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); >- update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); > update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); > >+ if (DISPLAY_VER(i915) >= 30) { >+ update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK); >+ } else { >+ update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); >+ update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); >+ >+ update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); >+ } >+ > #undef update_reg > } > >@@ -529,7 +554,7 @@ intel_pmdemand_program_params(struct drm_i915_private *i915, > reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); > mod_reg2 = reg2; > >- intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2, >+ intel_pmdemand_update_params(i915, new, old, &mod_reg1, &mod_reg2, > serialized); > > if (reg1 != mod_reg1) { >diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h >index 128fd61f8f14..a1c49efdc493 100644 >--- a/drivers/gpu/drm/i915/display/intel_pmdemand.h >+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h >@@ -20,14 +20,14 @@ struct pmdemand_params { > u8 voltage_index; > u8 qclk_gv_index; > u8 active_pipes; >- u8 active_dbufs; >+ u8 active_dbufs; /* pre-Xe3 only */ > /* Total number of non type C active phys from active_phys_mask */ > u8 active_phys; > u8 plls; > u16 cdclk_freq_mhz; > /* max from ddi_clocks[] */ > u16 ddiclk_max; >- u8 scalers; >+ u8 scalers; /* pre-Xe3 only */ > }; > > struct intel_pmdemand_state { >diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h >index 818142f5a10c..d30459f8d1cb 100644 >--- a/drivers/gpu/drm/i915/i915_reg.h >+++ b/drivers/gpu/drm/i915/i915_reg.h >@@ -2705,6 +2705,7 @@ > #define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16) > #define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12) > #define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8) >+#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4) > #define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6) > #define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4) > #define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0) >-- >2.45.0 >
On Thu, 10 Oct 2024, Matt Atwood <matthew.s.atwood@intel.com> wrote: > From: Matt Roper <matthew.d.roper@intel.com> > > There are some minor changes to pmdemand handling on Xe3: > - Active scalers are no longer tracked. We can simply skip the readout > and programming of this field. > - Active dbuf slices are no longer tracked. We should skip the readout > and programming of this field and also make sure that it stays 0 in > our software bookkeeping so that we won't erroneously return true > from intel_pmdemand_needs_update() due to mismatches. > - Even though there aren't enough pipes to utilize them, the size of > the 'active pipes' field has expanded to four bits, taking over the > register bits previously used for dbuf slices. Since the lower bits > of the mask have moved, we need to update our reads/writes to handle > this properly. > > v2: active pipes is no longer always max 3, add in the ability to go to > 4 for PTL. > > Bspec: 68883, 69125 > Signed-off-by: Matt Roper <matthew.d.roper@intel.com> > Signed-off-by: Matt Atwood <matthew.s.atwood@intel.com> > --- > drivers/gpu/drm/i915/display/intel_pmdemand.c | 65 +++++++++++++------ > drivers/gpu/drm/i915/display/intel_pmdemand.h | 4 +- > drivers/gpu/drm/i915/i915_reg.h | 1 + > 3 files changed, 48 insertions(+), 22 deletions(-) > > diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c > index ceaf9e3147da..3a820dd53b13 100644 > --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c > +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c > @@ -258,6 +258,7 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) > > static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) > { > + struct drm_i915_private *i915 = to_i915(state->base.dev); Hmmh, we really shouldn't be adding new struct drm_i915_private variables or parameters in display code anymore. struct intel_display instead. BR, Jani. > const struct intel_bw_state *new_bw_state, *old_bw_state; > const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; > const struct intel_crtc_state *new_crtc_state, *old_crtc_state; > @@ -274,12 +275,16 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) > new_dbuf_state = intel_atomic_get_new_dbuf_state(state); > old_dbuf_state = intel_atomic_get_old_dbuf_state(state); > if (new_dbuf_state && > - (new_dbuf_state->active_pipes != > - old_dbuf_state->active_pipes || > - new_dbuf_state->enabled_slices != > - old_dbuf_state->enabled_slices)) > + new_dbuf_state->active_pipes != old_dbuf_state->active_pipes) > return true; > > + if (DISPLAY_VER(i915) < 30) { > + if (new_dbuf_state && > + new_dbuf_state->enabled_slices != > + old_dbuf_state->enabled_slices) > + return true; > + } > + > new_cdclk_state = intel_atomic_get_new_cdclk_state(state); > old_cdclk_state = intel_atomic_get_old_cdclk_state(state); > if (new_cdclk_state && > @@ -304,6 +309,7 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) > const struct intel_cdclk_state *new_cdclk_state; > const struct intel_dbuf_state *new_dbuf_state; > struct intel_pmdemand_state *new_pmdemand_state; > + int max_active_pipes = 3; > > if (DISPLAY_VER(i915) < 14) > return 0; > @@ -327,10 +333,13 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) > if (IS_ERR(new_dbuf_state)) > return PTR_ERR(new_dbuf_state); > > + if (DISPLAY_VER(i915) >= 30) max_active_pipes = 4; > new_pmdemand_state->params.active_pipes = > - min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); > - new_pmdemand_state->params.active_dbufs = > - min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); > + min_t(u8, hweight8(new_dbuf_state->active_pipes), max_active_pipes); > + > + if (DISPLAY_VER(i915) < 30) > + new_pmdemand_state->params.active_dbufs = > + min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); > > new_cdclk_state = intel_atomic_get_cdclk_state(state); > if (IS_ERR(new_cdclk_state)) > @@ -395,27 +404,32 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, > > reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); > > - /* Set 1*/ > pmdemand_state->params.qclk_gv_bw = > REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); > pmdemand_state->params.voltage_index = > REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); > pmdemand_state->params.qclk_gv_index = > REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); > - pmdemand_state->params.active_pipes = > - REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); > - pmdemand_state->params.active_dbufs = > - REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); > pmdemand_state->params.active_phys = > REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); > > - /* Set 2*/ > pmdemand_state->params.cdclk_freq_mhz = > REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); > pmdemand_state->params.ddiclk_max = > REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); > - pmdemand_state->params.scalers = > - REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); > + > + if (DISPLAY_VER(i915) >= 30) { > + pmdemand_state->params.active_pipes = > + REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1); > + } else { > + pmdemand_state->params.active_pipes = > + REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); > + pmdemand_state->params.active_dbufs = > + REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); > + > + pmdemand_state->params.scalers = > + REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); > + } > > unlock: > mutex_unlock(&i915->display.pmdemand.lock); > @@ -442,6 +456,10 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, > { > u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); > > + /* PM Demand only tracks active dbufs on pre-Xe3 platforms */ > + if (DISPLAY_VER(i915) >= 30) > + return; > + > mutex_lock(&i915->display.pmdemand.lock); > if (drm_WARN_ON(&i915->drm, > !intel_pmdemand_check_prev_transaction(i915))) > @@ -460,7 +478,8 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, > } > > static void > -intel_pmdemand_update_params(const struct intel_pmdemand_state *new, > +intel_pmdemand_update_params(struct drm_i915_private *i915, > + const struct intel_pmdemand_state *new, > const struct intel_pmdemand_state *old, > u32 *reg1, u32 *reg2, bool serialized) > { > @@ -495,16 +514,22 @@ intel_pmdemand_update_params(const struct intel_pmdemand_state *new, > update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); > update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); > update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); > - update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); > - update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); > update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); > > /* Set 2*/ > update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); > update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); > - update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); > update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); > > + if (DISPLAY_VER(i915) >= 30) { > + update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK); > + } else { > + update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); > + update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); > + > + update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); > + } > + > #undef update_reg > } > > @@ -529,7 +554,7 @@ intel_pmdemand_program_params(struct drm_i915_private *i915, > reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); > mod_reg2 = reg2; > > - intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2, > + intel_pmdemand_update_params(i915, new, old, &mod_reg1, &mod_reg2, > serialized); > > if (reg1 != mod_reg1) { > diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h > index 128fd61f8f14..a1c49efdc493 100644 > --- a/drivers/gpu/drm/i915/display/intel_pmdemand.h > +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h > @@ -20,14 +20,14 @@ struct pmdemand_params { > u8 voltage_index; > u8 qclk_gv_index; > u8 active_pipes; > - u8 active_dbufs; > + u8 active_dbufs; /* pre-Xe3 only */ > /* Total number of non type C active phys from active_phys_mask */ > u8 active_phys; > u8 plls; > u16 cdclk_freq_mhz; > /* max from ddi_clocks[] */ > u16 ddiclk_max; > - u8 scalers; > + u8 scalers; /* pre-Xe3 only */ > }; > > struct intel_pmdemand_state { > diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h > index 818142f5a10c..d30459f8d1cb 100644 > --- a/drivers/gpu/drm/i915/i915_reg.h > +++ b/drivers/gpu/drm/i915/i915_reg.h > @@ -2705,6 +2705,7 @@ > #define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16) > #define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12) > #define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8) > +#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4) > #define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6) > #define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4) > #define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c index ceaf9e3147da..3a820dd53b13 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c @@ -258,6 +258,7 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_bw_state *new_bw_state, *old_bw_state; const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; const struct intel_crtc_state *new_crtc_state, *old_crtc_state; @@ -274,12 +275,16 @@ static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) new_dbuf_state = intel_atomic_get_new_dbuf_state(state); old_dbuf_state = intel_atomic_get_old_dbuf_state(state); if (new_dbuf_state && - (new_dbuf_state->active_pipes != - old_dbuf_state->active_pipes || - new_dbuf_state->enabled_slices != - old_dbuf_state->enabled_slices)) + new_dbuf_state->active_pipes != old_dbuf_state->active_pipes) return true; + if (DISPLAY_VER(i915) < 30) { + if (new_dbuf_state && + new_dbuf_state->enabled_slices != + old_dbuf_state->enabled_slices) + return true; + } + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); old_cdclk_state = intel_atomic_get_old_cdclk_state(state); if (new_cdclk_state && @@ -304,6 +309,7 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) const struct intel_cdclk_state *new_cdclk_state; const struct intel_dbuf_state *new_dbuf_state; struct intel_pmdemand_state *new_pmdemand_state; + int max_active_pipes = 3; if (DISPLAY_VER(i915) < 14) return 0; @@ -327,10 +333,13 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) if (IS_ERR(new_dbuf_state)) return PTR_ERR(new_dbuf_state); + if (DISPLAY_VER(i915) >= 30) max_active_pipes = 4; new_pmdemand_state->params.active_pipes = - min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); - new_pmdemand_state->params.active_dbufs = - min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); + min_t(u8, hweight8(new_dbuf_state->active_pipes), max_active_pipes); + + if (DISPLAY_VER(i915) < 30) + new_pmdemand_state->params.active_dbufs = + min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); new_cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(new_cdclk_state)) @@ -395,27 +404,32 @@ intel_pmdemand_init_pmdemand_params(struct drm_i915_private *i915, reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); - /* Set 1*/ pmdemand_state->params.qclk_gv_bw = REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_BW_MASK, reg1); pmdemand_state->params.voltage_index = REG_FIELD_GET(XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK, reg1); pmdemand_state->params.qclk_gv_index = REG_FIELD_GET(XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK, reg1); - pmdemand_state->params.active_pipes = - REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); - pmdemand_state->params.active_dbufs = - REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); pmdemand_state->params.active_phys = REG_FIELD_GET(XELPDP_PMDEMAND_PHYS_MASK, reg1); - /* Set 2*/ pmdemand_state->params.cdclk_freq_mhz = REG_FIELD_GET(XELPDP_PMDEMAND_CDCLK_FREQ_MASK, reg2); pmdemand_state->params.ddiclk_max = REG_FIELD_GET(XELPDP_PMDEMAND_DDICLK_FREQ_MASK, reg2); - pmdemand_state->params.scalers = - REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); + + if (DISPLAY_VER(i915) >= 30) { + pmdemand_state->params.active_pipes = + REG_FIELD_GET(XE3_PMDEMAND_PIPES_MASK, reg1); + } else { + pmdemand_state->params.active_pipes = + REG_FIELD_GET(XELPDP_PMDEMAND_PIPES_MASK, reg1); + pmdemand_state->params.active_dbufs = + REG_FIELD_GET(XELPDP_PMDEMAND_DBUFS_MASK, reg1); + + pmdemand_state->params.scalers = + REG_FIELD_GET(XELPDP_PMDEMAND_SCALERS_MASK, reg2); + } unlock: mutex_unlock(&i915->display.pmdemand.lock); @@ -442,6 +456,10 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, { u32 dbufs = min_t(u32, hweight8(dbuf_slices), 3); + /* PM Demand only tracks active dbufs on pre-Xe3 platforms */ + if (DISPLAY_VER(i915) >= 30) + return; + mutex_lock(&i915->display.pmdemand.lock); if (drm_WARN_ON(&i915->drm, !intel_pmdemand_check_prev_transaction(i915))) @@ -460,7 +478,8 @@ void intel_pmdemand_program_dbuf(struct drm_i915_private *i915, } static void -intel_pmdemand_update_params(const struct intel_pmdemand_state *new, +intel_pmdemand_update_params(struct drm_i915_private *i915, + const struct intel_pmdemand_state *new, const struct intel_pmdemand_state *old, u32 *reg1, u32 *reg2, bool serialized) { @@ -495,16 +514,22 @@ intel_pmdemand_update_params(const struct intel_pmdemand_state *new, update_reg(reg1, qclk_gv_bw, XELPDP_PMDEMAND_QCLK_GV_BW_MASK); update_reg(reg1, voltage_index, XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK); update_reg(reg1, qclk_gv_index, XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK); - update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); - update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); update_reg(reg1, active_phys, XELPDP_PMDEMAND_PHYS_MASK); /* Set 2*/ update_reg(reg2, cdclk_freq_mhz, XELPDP_PMDEMAND_CDCLK_FREQ_MASK); update_reg(reg2, ddiclk_max, XELPDP_PMDEMAND_DDICLK_FREQ_MASK); - update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); update_reg(reg2, plls, XELPDP_PMDEMAND_PLLS_MASK); + if (DISPLAY_VER(i915) >= 30) { + update_reg(reg1, active_pipes, XE3_PMDEMAND_PIPES_MASK); + } else { + update_reg(reg1, active_pipes, XELPDP_PMDEMAND_PIPES_MASK); + update_reg(reg1, active_dbufs, XELPDP_PMDEMAND_DBUFS_MASK); + + update_reg(reg2, scalers, XELPDP_PMDEMAND_SCALERS_MASK); + } + #undef update_reg } @@ -529,7 +554,7 @@ intel_pmdemand_program_params(struct drm_i915_private *i915, reg2 = intel_de_read(i915, XELPDP_INITIATE_PMDEMAND_REQUEST(1)); mod_reg2 = reg2; - intel_pmdemand_update_params(new, old, &mod_reg1, &mod_reg2, + intel_pmdemand_update_params(i915, new, old, &mod_reg1, &mod_reg2, serialized); if (reg1 != mod_reg1) { diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.h b/drivers/gpu/drm/i915/display/intel_pmdemand.h index 128fd61f8f14..a1c49efdc493 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.h +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.h @@ -20,14 +20,14 @@ struct pmdemand_params { u8 voltage_index; u8 qclk_gv_index; u8 active_pipes; - u8 active_dbufs; + u8 active_dbufs; /* pre-Xe3 only */ /* Total number of non type C active phys from active_phys_mask */ u8 active_phys; u8 plls; u16 cdclk_freq_mhz; /* max from ddi_clocks[] */ u16 ddiclk_max; - u8 scalers; + u8 scalers; /* pre-Xe3 only */ }; struct intel_pmdemand_state { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 818142f5a10c..d30459f8d1cb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2705,6 +2705,7 @@ #define XELPDP_PMDEMAND_QCLK_GV_BW_MASK REG_GENMASK(31, 16) #define XELPDP_PMDEMAND_VOLTAGE_INDEX_MASK REG_GENMASK(14, 12) #define XELPDP_PMDEMAND_QCLK_GV_INDEX_MASK REG_GENMASK(11, 8) +#define XE3_PMDEMAND_PIPES_MASK REG_GENMASK(7, 4) #define XELPDP_PMDEMAND_PIPES_MASK REG_GENMASK(7, 6) #define XELPDP_PMDEMAND_DBUFS_MASK REG_GENMASK(5, 4) #define XELPDP_PMDEMAND_PHYS_MASK REG_GENMASK(2, 0)