diff mbox series

[v4,02/11] drm/msm/dpu: core_perf: extract bandwidth aggregation function

Message ID 20230712221139.313729-3-dmitry.baryshkov@linaro.org (mailing list archive)
State Superseded
Headers show
Series drm/msm/dpu: cleanup dpu_core_perf module | expand

Commit Message

Dmitry Baryshkov July 12, 2023, 10:11 p.m. UTC
In preparation to refactoring the dpu_core_perf debugfs interface,
extract the bandwidth aggregation function from
_dpu_core_perf_crtc_update_bus().

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 39 +++++++++++--------
 1 file changed, 22 insertions(+), 17 deletions(-)

Comments

Abhinav Kumar July 17, 2023, 6:45 p.m. UTC | #1
On 7/12/2023 3:11 PM, Dmitry Baryshkov wrote:
> In preparation to refactoring the dpu_core_perf debugfs interface,
> extract the bandwidth aggregation function from
> _dpu_core_perf_crtc_update_bus().
> 
> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> ---

Drop the core_perf tag from the subject line.

The debugfs refactor was dropped from this series if thats what you are 
referring to here.

So even this and the next patch dont serve any purpose in this series 
and should be dropped, Unless you have some reason of keeping them here.

>   drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 39 +++++++++++--------
>   1 file changed, 22 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> index 1d9d83d7b99e..333dcfe57800 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> @@ -206,33 +206,38 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
>   	return 0;
>   }
>   
> -static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
> -		struct drm_crtc *crtc)
> +static void dpu_core_perf_aggregate(struct drm_device *ddev,
> +				    enum dpu_crtc_client_type curr_client_type,
> +				    struct dpu_core_perf_params *perf)
>   {
> -	struct dpu_core_perf_params perf = { 0 };
> -	enum dpu_crtc_client_type curr_client_type
> -					= dpu_crtc_get_client_type(crtc);
> -	struct drm_crtc *tmp_crtc;
>   	struct dpu_crtc_state *dpu_cstate;
> -	int i, ret = 0;
> -	u64 avg_bw;
> +	struct drm_crtc *tmp_crtc;
>   
> -	drm_for_each_crtc(tmp_crtc, crtc->dev) {
> +	drm_for_each_crtc(tmp_crtc, ddev) {
>   		if (tmp_crtc->enabled &&
> -			curr_client_type ==
> -				dpu_crtc_get_client_type(tmp_crtc)) {
> +		    curr_client_type == dpu_crtc_get_client_type(tmp_crtc)) {
>   			dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
>   
> -			perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
> -					dpu_cstate->new_perf.max_per_pipe_ib);
> +			perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
> +						    dpu_cstate->new_perf.max_per_pipe_ib);
>   
> -			perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
> +			perf->bw_ctl += dpu_cstate->new_perf.bw_ctl;
>   
> -			DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
> -				  tmp_crtc->base.id,
> -				  dpu_cstate->new_perf.bw_ctl, kms->num_paths);
> +			DRM_DEBUG_ATOMIC("crtc=%d bw=%llu\n",
> +					 tmp_crtc->base.id,
> +					 dpu_cstate->new_perf.bw_ctl);
>   		}
>   	}
> +}
> +
> +static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
> +		struct drm_crtc *crtc)
> +{
> +	struct dpu_core_perf_params perf = { 0 };
> +	int i, ret = 0;
> +	u64 avg_bw;
> +
> +	dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
>   
>   	if (!kms->num_paths)
>   		return 0;
Dmitry Baryshkov July 17, 2023, 6:56 p.m. UTC | #2
On Mon, 17 Jul 2023 at 21:45, Abhinav Kumar <quic_abhinavk@quicinc.com> wrote:
>
>
>
> On 7/12/2023 3:11 PM, Dmitry Baryshkov wrote:
> > In preparation to refactoring the dpu_core_perf debugfs interface,
> > extract the bandwidth aggregation function from
> > _dpu_core_perf_crtc_update_bus().
> >
> > Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> > ---
>
> Drop the core_perf tag from the subject line.

Ack.

>
> The debugfs refactor was dropped from this series if thats what you are
> referring to here.
>
> So even this and the next patch dont serve any purpose in this series
> and should be dropped, Unless you have some reason of keeping them here.

The next patch serves its purpose: it prevents recalculating bandwidth
if there are no ICC paths (useless math).

This patch separates actual math and control paths. It was required
for debugfs, but it serves its own purpose too. I'll reflect that in
the commit message.

>
> >   drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c | 39 +++++++++++--------
> >   1 file changed, 22 insertions(+), 17 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> > index 1d9d83d7b99e..333dcfe57800 100644
> > --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> > +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
> > @@ -206,33 +206,38 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
> >       return 0;
> >   }
> >
> > -static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
> > -             struct drm_crtc *crtc)
> > +static void dpu_core_perf_aggregate(struct drm_device *ddev,
> > +                                 enum dpu_crtc_client_type curr_client_type,
> > +                                 struct dpu_core_perf_params *perf)
> >   {
> > -     struct dpu_core_perf_params perf = { 0 };
> > -     enum dpu_crtc_client_type curr_client_type
> > -                                     = dpu_crtc_get_client_type(crtc);
> > -     struct drm_crtc *tmp_crtc;
> >       struct dpu_crtc_state *dpu_cstate;
> > -     int i, ret = 0;
> > -     u64 avg_bw;
> > +     struct drm_crtc *tmp_crtc;
> >
> > -     drm_for_each_crtc(tmp_crtc, crtc->dev) {
> > +     drm_for_each_crtc(tmp_crtc, ddev) {
> >               if (tmp_crtc->enabled &&
> > -                     curr_client_type ==
> > -                             dpu_crtc_get_client_type(tmp_crtc)) {
> > +                 curr_client_type == dpu_crtc_get_client_type(tmp_crtc)) {
> >                       dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
> >
> > -                     perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
> > -                                     dpu_cstate->new_perf.max_per_pipe_ib);
> > +                     perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
> > +                                                 dpu_cstate->new_perf.max_per_pipe_ib);
> >
> > -                     perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
> > +                     perf->bw_ctl += dpu_cstate->new_perf.bw_ctl;
> >
> > -                     DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
> > -                               tmp_crtc->base.id,
> > -                               dpu_cstate->new_perf.bw_ctl, kms->num_paths);
> > +                     DRM_DEBUG_ATOMIC("crtc=%d bw=%llu\n",
> > +                                      tmp_crtc->base.id,
> > +                                      dpu_cstate->new_perf.bw_ctl);
> >               }
> >       }
> > +}
> > +
> > +static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
> > +             struct drm_crtc *crtc)
> > +{
> > +     struct dpu_core_perf_params perf = { 0 };
> > +     int i, ret = 0;
> > +     u64 avg_bw;
> > +
> > +     dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
> >
> >       if (!kms->num_paths)
> >               return 0;
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 1d9d83d7b99e..333dcfe57800 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -206,33 +206,38 @@  int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
 	return 0;
 }
 
-static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
-		struct drm_crtc *crtc)
+static void dpu_core_perf_aggregate(struct drm_device *ddev,
+				    enum dpu_crtc_client_type curr_client_type,
+				    struct dpu_core_perf_params *perf)
 {
-	struct dpu_core_perf_params perf = { 0 };
-	enum dpu_crtc_client_type curr_client_type
-					= dpu_crtc_get_client_type(crtc);
-	struct drm_crtc *tmp_crtc;
 	struct dpu_crtc_state *dpu_cstate;
-	int i, ret = 0;
-	u64 avg_bw;
+	struct drm_crtc *tmp_crtc;
 
-	drm_for_each_crtc(tmp_crtc, crtc->dev) {
+	drm_for_each_crtc(tmp_crtc, ddev) {
 		if (tmp_crtc->enabled &&
-			curr_client_type ==
-				dpu_crtc_get_client_type(tmp_crtc)) {
+		    curr_client_type == dpu_crtc_get_client_type(tmp_crtc)) {
 			dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
 
-			perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
-					dpu_cstate->new_perf.max_per_pipe_ib);
+			perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+						    dpu_cstate->new_perf.max_per_pipe_ib);
 
-			perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+			perf->bw_ctl += dpu_cstate->new_perf.bw_ctl;
 
-			DRM_DEBUG_ATOMIC("crtc=%d bw=%llu paths:%d\n",
-				  tmp_crtc->base.id,
-				  dpu_cstate->new_perf.bw_ctl, kms->num_paths);
+			DRM_DEBUG_ATOMIC("crtc=%d bw=%llu\n",
+					 tmp_crtc->base.id,
+					 dpu_cstate->new_perf.bw_ctl);
 		}
 	}
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+		struct drm_crtc *crtc)
+{
+	struct dpu_core_perf_params perf = { 0 };
+	int i, ret = 0;
+	u64 avg_bw;
+
+	dpu_core_perf_aggregate(crtc->dev, dpu_crtc_get_client_type(crtc), &perf);
 
 	if (!kms->num_paths)
 		return 0;