Message ID | 20240314-dpu-perf-rework-v3-3-79fa4e065574@linaro.org (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | drm/msm/dpu: rework debugfs interface of dpu_core_perf | expand |
On 3/13/2024 6:10 PM, Dmitry Baryshkov wrote: > Move perf mode handling for the bandwidth to > _dpu_core_perf_crtc_update_bus() rather than overriding per-CRTC data > and then aggregating known values. > > Note, this changes the fix_core_ab_vote. Previously it would be > multiplied per the CRTC number, now it will be used directly for > interconnect voting. > > Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> > --- > drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 39 +++++++++++++-------------- > 1 file changed, 19 insertions(+), 20 deletions(-) > > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c > index 87b892069526..ff2942a6a678 100644 > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c > @@ -118,21 +118,9 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, > return; > } > > - memset(perf, 0, sizeof(struct dpu_core_perf_params)); > - > - if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) { > - perf->bw_ctl = 0; > - perf->max_per_pipe_ib = 0; > - perf->core_clk_rate = 0; > - } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) { > - perf->bw_ctl = core_perf->fix_core_ab_vote; > - perf->max_per_pipe_ib = core_perf->fix_core_ib_vote; > - perf->core_clk_rate = core_perf->fix_core_clk_rate; > - } else { > - perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); > - perf->max_per_pipe_ib = perf_cfg->min_dram_ib; > - perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); > - } > + perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); > + perf->max_per_pipe_ib = perf_cfg->min_dram_ib; > + perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); > > DRM_DEBUG_ATOMIC( > "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", > @@ -233,18 +221,29 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, > { > struct dpu_core_perf_params perf = { 0 }; > int i, ret = 0; > - u64 avg_bw; > + u32 avg_bw; > + u32 peak_bw; > > if (!kms->num_paths) > return 0; > > - dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); > + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { > + avg_bw = 0; > + peak_bw = 0; > + } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { > + avg_bw = kms->perf.fix_core_ab_vote; > + peak_bw = kms->perf.fix_core_ib_vote; > + } else { > + dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); Where is this function dpu_core_perf_aggregate() defined? I dont see it in msm-next > + > + avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/ > + peak_bw = perf.max_per_pipe_ib; > + } > > - avg_bw = perf.bw_ctl; > - do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ > + avg_bw /= kms->num_paths; > > for (i = 0; i < kms->num_paths; i++) > - icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); > + icc_set_bw(kms->path[i], avg_bw, peak_bw); > > return ret; > } >
On Tue, 19 Mar 2024 at 22:34, Abhinav Kumar <quic_abhinavk@quicinc.com> wrote: > > > > On 3/13/2024 6:10 PM, Dmitry Baryshkov wrote: > > Move perf mode handling for the bandwidth to > > _dpu_core_perf_crtc_update_bus() rather than overriding per-CRTC data > > and then aggregating known values. > > > > Note, this changes the fix_core_ab_vote. Previously it would be > > multiplied per the CRTC number, now it will be used directly for > > interconnect voting. > > > > Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> > > --- > > drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 39 +++++++++++++-------------- > > 1 file changed, 19 insertions(+), 20 deletions(-) > > > > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c > > index 87b892069526..ff2942a6a678 100644 > > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c > > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c > > @@ -118,21 +118,9 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, > > return; > > } > > > > - memset(perf, 0, sizeof(struct dpu_core_perf_params)); > > - > > - if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) { > > - perf->bw_ctl = 0; > > - perf->max_per_pipe_ib = 0; > > - perf->core_clk_rate = 0; > > - } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) { > > - perf->bw_ctl = core_perf->fix_core_ab_vote; > > - perf->max_per_pipe_ib = core_perf->fix_core_ib_vote; > > - perf->core_clk_rate = core_perf->fix_core_clk_rate; > > - } else { > > - perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); > > - perf->max_per_pipe_ib = perf_cfg->min_dram_ib; > > - perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); > > - } > > + perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); > > + perf->max_per_pipe_ib = perf_cfg->min_dram_ib; > > + perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); > > > > DRM_DEBUG_ATOMIC( > > "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", > > @@ -233,18 +221,29 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, > > { > > struct dpu_core_perf_params perf = { 0 }; > > int i, ret = 0; > > - u64 avg_bw; > > + u32 avg_bw; > > + u32 peak_bw; > > > > if (!kms->num_paths) > > return 0; > > > > - dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); > > + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { > > + avg_bw = 0; > > + peak_bw = 0; > > + } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { > > + avg_bw = kms->perf.fix_core_ab_vote; > > + peak_bw = kms->perf.fix_core_ib_vote; > > + } else { > > + dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); > > Where is this function dpu_core_perf_aggregate() defined? I dont see it > in msm-next In the previous patch. > > > + > > + avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/ > > + peak_bw = perf.max_per_pipe_ib; > > + } > > > > - avg_bw = perf.bw_ctl; > > - do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ > > + avg_bw /= kms->num_paths; > > > > for (i = 0; i < kms->num_paths; i++) > > - icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); > > + icc_set_bw(kms->path[i], avg_bw, peak_bw); > > > > return ret; > > } > >
On 3/19/2024 1:43 PM, Dmitry Baryshkov wrote: > On Tue, 19 Mar 2024 at 22:34, Abhinav Kumar <quic_abhinavk@quicinc.com> wrote: >> >> >> >> On 3/13/2024 6:10 PM, Dmitry Baryshkov wrote: >>> Move perf mode handling for the bandwidth to >>> _dpu_core_perf_crtc_update_bus() rather than overriding per-CRTC data >>> and then aggregating known values. >>> >>> Note, this changes the fix_core_ab_vote. Previously it would be >>> multiplied per the CRTC number, now it will be used directly for >>> interconnect voting. >>> >>> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> >>> --- >>> drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 39 +++++++++++++-------------- >>> 1 file changed, 19 insertions(+), 20 deletions(-) >>> >>> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c >>> index 87b892069526..ff2942a6a678 100644 >>> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c >>> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c >>> @@ -118,21 +118,9 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, >>> return; >>> } >>> >>> - memset(perf, 0, sizeof(struct dpu_core_perf_params)); >>> - >>> - if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) { >>> - perf->bw_ctl = 0; >>> - perf->max_per_pipe_ib = 0; >>> - perf->core_clk_rate = 0; >>> - } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) { >>> - perf->bw_ctl = core_perf->fix_core_ab_vote; >>> - perf->max_per_pipe_ib = core_perf->fix_core_ib_vote; >>> - perf->core_clk_rate = core_perf->fix_core_clk_rate; >>> - } else { >>> - perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); >>> - perf->max_per_pipe_ib = perf_cfg->min_dram_ib; >>> - perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); >>> - } >>> + perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); >>> + perf->max_per_pipe_ib = perf_cfg->min_dram_ib; >>> + perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); >>> >>> DRM_DEBUG_ATOMIC( >>> "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", >>> @@ -233,18 +221,29 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, >>> { >>> struct dpu_core_perf_params perf = { 0 }; >>> int i, ret = 0; >>> - u64 avg_bw; >>> + u32 avg_bw; >>> + u32 peak_bw; Why were avg_bw and peak_bw values brought down to u32? I think we might go higher so u64 was better. >>> >>> if (!kms->num_paths) >>> return 0; >>> >>> - dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); >>> + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { >>> + avg_bw = 0; >>> + peak_bw = 0; >>> + } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { >>> + avg_bw = kms->perf.fix_core_ab_vote; >>> + peak_bw = kms->perf.fix_core_ib_vote; Instead of changing the value of avg_bw like mentioned in commit text, why cant we do avg_bw = fix_core_ab * (drm_mode_config::num_crtc); Any reason you want to change it from "per CRTC fixed" to just "fixed"? Now, the user who wants to hard-code this also needs to first account for number of CRTCs from the dri state and then program the fixed value using debugfs. Thats not convenient. >>> + } else { >>> + dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); >> >> Where is this function dpu_core_perf_aggregate() defined? I dont see it >> in msm-next > > In the previous patch. > Sorry, my bad. I thought it had a different name in the prev patch :/ >> >>> + >>> + avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/ >>> + peak_bw = perf.max_per_pipe_ib; >>> + } >>> >>> - avg_bw = perf.bw_ctl; >>> - do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ >>> + avg_bw /= kms->num_paths; >>> >>> for (i = 0; i < kms->num_paths; i++) >>> - icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); >>> + icc_set_bw(kms->path[i], avg_bw, peak_bw); >>> >>> return ret; >>> } >>> > > >
On Tue, 19 Mar 2024 at 23:35, Abhinav Kumar <quic_abhinavk@quicinc.com> wrote: > > > > On 3/19/2024 1:43 PM, Dmitry Baryshkov wrote: > > On Tue, 19 Mar 2024 at 22:34, Abhinav Kumar <quic_abhinavk@quicinc.com> wrote: > >> > >> > >> > >> On 3/13/2024 6:10 PM, Dmitry Baryshkov wrote: > >>> Move perf mode handling for the bandwidth to > >>> _dpu_core_perf_crtc_update_bus() rather than overriding per-CRTC data > >>> and then aggregating known values. > >>> > >>> Note, this changes the fix_core_ab_vote. Previously it would be > >>> multiplied per the CRTC number, now it will be used directly for > >>> interconnect voting. > >>> > >>> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> > >>> --- > >>> drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 39 +++++++++++++-------------- > >>> 1 file changed, 19 insertions(+), 20 deletions(-) > >>> > >>> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c > >>> index 87b892069526..ff2942a6a678 100644 > >>> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c > >>> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c > >>> @@ -118,21 +118,9 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, > >>> return; > >>> } > >>> > >>> - memset(perf, 0, sizeof(struct dpu_core_perf_params)); > >>> - > >>> - if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) { > >>> - perf->bw_ctl = 0; > >>> - perf->max_per_pipe_ib = 0; > >>> - perf->core_clk_rate = 0; > >>> - } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) { > >>> - perf->bw_ctl = core_perf->fix_core_ab_vote; > >>> - perf->max_per_pipe_ib = core_perf->fix_core_ib_vote; > >>> - perf->core_clk_rate = core_perf->fix_core_clk_rate; > >>> - } else { > >>> - perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); > >>> - perf->max_per_pipe_ib = perf_cfg->min_dram_ib; > >>> - perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); > >>> - } > >>> + perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); > >>> + perf->max_per_pipe_ib = perf_cfg->min_dram_ib; > >>> + perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); > >>> > >>> DRM_DEBUG_ATOMIC( > >>> "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", > >>> @@ -233,18 +221,29 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, > >>> { > >>> struct dpu_core_perf_params perf = { 0 }; > >>> int i, ret = 0; > >>> - u64 avg_bw; > >>> + u32 avg_bw; > >>> + u32 peak_bw; > > Why were avg_bw and peak_bw values brought down to u32? > > I think we might go higher so u64 was better. First of all, icc_set_bw takes u32, not u64. The unit is 1000 bps, not 1 bps, so sensible values fit into u32. > > >>> > >>> if (!kms->num_paths) > >>> return 0; > >>> > >>> - dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); > >>> + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { > >>> + avg_bw = 0; > >>> + peak_bw = 0; > >>> + } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { > >>> + avg_bw = kms->perf.fix_core_ab_vote; > >>> + peak_bw = kms->perf.fix_core_ib_vote; > > Instead of changing the value of avg_bw like mentioned in commit text, > why cant we do avg_bw = fix_core_ab * (drm_mode_config::num_crtc); > > Any reason you want to change it from "per CRTC fixed" to just "fixed"? > > Now, the user who wants to hard-code this also needs to first account > for number of CRTCs from the dri state and then program the fixed value > using debugfs. Thats not convenient. Different CRTCs have different bandwidth values, so programming as value-per-CRTC is not efficient. In the end we care for the overall bandwidth, so one has to calculate the expected value then divide it per num_crtc. > > >>> + } else { > >>> + dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); > >> > >> Where is this function dpu_core_perf_aggregate() defined? I dont see it > >> in msm-next > > > > In the previous patch. > > > > Sorry, my bad. I thought it had a different name in the prev patch :/ No problems. > > > >> > >>> + > >>> + avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/ > >>> + peak_bw = perf.max_per_pipe_ib; > >>> + } > >>> > >>> - avg_bw = perf.bw_ctl; > >>> - do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ > >>> + avg_bw /= kms->num_paths; > >>> > > >>> for (i = 0; i < kms->num_paths; i++) > >>> - icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); > >>> + icc_set_bw(kms->path[i], avg_bw, peak_bw); > >>> > >>> return ret; > >>> } > >>> > > > > > >
On 3/19/2024 3:25 PM, Dmitry Baryshkov wrote: > On Tue, 19 Mar 2024 at 23:35, Abhinav Kumar <quic_abhinavk@quicinc.com> wrote: >> >> >> >> On 3/19/2024 1:43 PM, Dmitry Baryshkov wrote: >>> On Tue, 19 Mar 2024 at 22:34, Abhinav Kumar <quic_abhinavk@quicinc.com> wrote: >>>> >>>> >>>> >>>> On 3/13/2024 6:10 PM, Dmitry Baryshkov wrote: >>>>> Move perf mode handling for the bandwidth to >>>>> _dpu_core_perf_crtc_update_bus() rather than overriding per-CRTC data >>>>> and then aggregating known values. >>>>> >>>>> Note, this changes the fix_core_ab_vote. Previously it would be >>>>> multiplied per the CRTC number, now it will be used directly for >>>>> interconnect voting. >>>>> >>>>> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> >>>>> --- >>>>> drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 39 +++++++++++++-------------- >>>>> 1 file changed, 19 insertions(+), 20 deletions(-) >>>>> >>>>> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c >>>>> index 87b892069526..ff2942a6a678 100644 >>>>> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c >>>>> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c >>>>> @@ -118,21 +118,9 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, >>>>> return; >>>>> } >>>>> >>>>> - memset(perf, 0, sizeof(struct dpu_core_perf_params)); >>>>> - >>>>> - if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) { >>>>> - perf->bw_ctl = 0; >>>>> - perf->max_per_pipe_ib = 0; >>>>> - perf->core_clk_rate = 0; >>>>> - } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) { >>>>> - perf->bw_ctl = core_perf->fix_core_ab_vote; >>>>> - perf->max_per_pipe_ib = core_perf->fix_core_ib_vote; >>>>> - perf->core_clk_rate = core_perf->fix_core_clk_rate; >>>>> - } else { >>>>> - perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); >>>>> - perf->max_per_pipe_ib = perf_cfg->min_dram_ib; >>>>> - perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); >>>>> - } >>>>> + perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); >>>>> + perf->max_per_pipe_ib = perf_cfg->min_dram_ib; >>>>> + perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); >>>>> >>>>> DRM_DEBUG_ATOMIC( >>>>> "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", >>>>> @@ -233,18 +221,29 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, >>>>> { >>>>> struct dpu_core_perf_params perf = { 0 }; >>>>> int i, ret = 0; >>>>> - u64 avg_bw; >>>>> + u32 avg_bw; >>>>> + u32 peak_bw; >> >> Why were avg_bw and peak_bw values brought down to u32? >> >> I think we might go higher so u64 was better. > > First of all, icc_set_bw takes u32, not u64. The unit is 1000 bps, not > 1 bps, so sensible values fit into u32. > True and agreed. Would have been better to send this update as a separate patch so that its clear why you are actually doing this downgrade instead of this being hidden in this cleanup perhaps even with a Fixes tag then. >> >>>>> >>>>> if (!kms->num_paths) >>>>> return 0; >>>>> >>>>> - dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); >>>>> + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { >>>>> + avg_bw = 0; >>>>> + peak_bw = 0; >>>>> + } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { >>>>> + avg_bw = kms->perf.fix_core_ab_vote; >>>>> + peak_bw = kms->perf.fix_core_ib_vote; >> >> Instead of changing the value of avg_bw like mentioned in commit text, >> why cant we do avg_bw = fix_core_ab * (drm_mode_config::num_crtc); >> >> Any reason you want to change it from "per CRTC fixed" to just "fixed"? >> >> Now, the user who wants to hard-code this also needs to first account >> for number of CRTCs from the dri state and then program the fixed value >> using debugfs. Thats not convenient. > > Different CRTCs have different bandwidth values, so programming as > value-per-CRTC is not efficient. In the end we care for the overall > bandwidth, so one has to calculate the expected value then divide it > per num_crtc. > Yes, different CRTCs will have different bandwidth values as each CRTC might be driving a different resolution. So you are expecting the user to program the total bandwidth they are expecting (sum_of_crtcs). Then why would they have to divide it per num_crtc? After this change, I think you are expecting the overall bandwidth right? I think some --help option for this debugfs should be written now or later to explain what to do with node, >> >>>>> + } else { >>>>> + dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); >>>> >>>> Where is this function dpu_core_perf_aggregate() defined? I dont see it >>>> in msm-next >>> >>> In the previous patch. >>> >> >> Sorry, my bad. I thought it had a different name in the prev patch :/ > > No problems. > >> >> >>>> >>>>> + >>>>> + avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/ >>>>> + peak_bw = perf.max_per_pipe_ib; >>>>> + } >>>>> >>>>> - avg_bw = perf.bw_ctl; >>>>> - do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ >>>>> + avg_bw /= kms->num_paths; >>>>> >> >>>>> for (i = 0; i < kms->num_paths; i++) >>>>> - icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); >>>>> + icc_set_bw(kms->path[i], avg_bw, peak_bw); >>>>> >>>>> return ret; >>>>> } >>>>> >>> >>> >>> > > >
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 87b892069526..ff2942a6a678 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -118,21 +118,9 @@ static void _dpu_core_perf_calc_crtc(const struct dpu_core_perf *core_perf, return; } - memset(perf, 0, sizeof(struct dpu_core_perf_params)); - - if (core_perf->perf_tune.mode == DPU_PERF_MODE_MINIMUM) { - perf->bw_ctl = 0; - perf->max_per_pipe_ib = 0; - perf->core_clk_rate = 0; - } else if (core_perf->perf_tune.mode == DPU_PERF_MODE_FIXED) { - perf->bw_ctl = core_perf->fix_core_ab_vote; - perf->max_per_pipe_ib = core_perf->fix_core_ib_vote; - perf->core_clk_rate = core_perf->fix_core_clk_rate; - } else { - perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); - perf->max_per_pipe_ib = perf_cfg->min_dram_ib; - perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); - } + perf->bw_ctl = _dpu_core_perf_calc_bw(perf_cfg, crtc); + perf->max_per_pipe_ib = perf_cfg->min_dram_ib; + perf->core_clk_rate = _dpu_core_perf_calc_clk(perf_cfg, crtc, state); DRM_DEBUG_ATOMIC( "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu\n", @@ -233,18 +221,29 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms, { struct dpu_core_perf_params perf = { 0 }; int i, ret = 0; - u64 avg_bw; + u32 avg_bw; + u32 peak_bw; if (!kms->num_paths) return 0; - dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); + if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) { + avg_bw = 0; + peak_bw = 0; + } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) { + avg_bw = kms->perf.fix_core_ab_vote; + peak_bw = kms->perf.fix_core_ib_vote; + } else { + dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf); + + avg_bw = div_u64(perf.bw_ctl, 1000); /*Bps_to_icc*/ + peak_bw = perf.max_per_pipe_ib; + } - avg_bw = perf.bw_ctl; - do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/ + avg_bw /= kms->num_paths; for (i = 0; i < kms->num_paths; i++) - icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib); + icc_set_bw(kms->path[i], avg_bw, peak_bw); return ret; }
Move perf mode handling for the bandwidth to _dpu_core_perf_crtc_update_bus() rather than overriding per-CRTC data and then aggregating known values. Note, this changes the fix_core_ab_vote. Previously it would be multiplied per the CRTC number, now it will be used directly for interconnect voting. Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org> --- drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 39 +++++++++++++-------------- 1 file changed, 19 insertions(+), 20 deletions(-)