diff mbox series

[v2,05/22] drm/msm/dpu: move resource allocation to CRTC

Message ID 20240924-concurrent-wb-v2-5-7849f900e863@quicinc.com (mailing list archive)
State Superseded
Headers show
Series drm/msm/dpu: Add Concurrent Writeback Support for DPU 10.x+ | expand

Commit Message

Jessica Zhang Sept. 24, 2024, 10:59 p.m. UTC
From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>

All resource allocation is centered around the LMs. Then other blocks
(except DSCs) are allocated basing on the LMs that was selected, and LM
powers up the CRTC rather than the encoder.

Moreover if at some point the driver supports encoder cloning,
allocating resources from the encoder will be incorrect, as all clones
will have different encoder IDs, while LMs are to be shared by these
encoders.

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
[quic_abhinavk@quicinc.com: Refactored resource allocation for CDM]
Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
[quic_jesszhan@quicinc.com: Changed to grabbing exising global state]
Signed-off-by: Jessica Zhang <quic_jesszhan@quicinc.com>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c    |  86 ++++++++++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 201 +++++++++++-----------------
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h |  19 +++
 3 files changed, 183 insertions(+), 123 deletions(-)

Comments

Dmitry Baryshkov Sept. 24, 2024, 11:13 p.m. UTC | #1
On Tue, Sep 24, 2024 at 03:59:21PM GMT, Jessica Zhang wrote:
> From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> 
> All resource allocation is centered around the LMs. Then other blocks
> (except DSCs) are allocated basing on the LMs that was selected, and LM
> powers up the CRTC rather than the encoder.
> 
> Moreover if at some point the driver supports encoder cloning,
> allocating resources from the encoder will be incorrect, as all clones
> will have different encoder IDs, while LMs are to be shared by these
> encoders.
> 
> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> [quic_abhinavk@quicinc.com: Refactored resource allocation for CDM]
> Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
> [quic_jesszhan@quicinc.com: Changed to grabbing exising global state]
> Signed-off-by: Jessica Zhang <quic_jesszhan@quicinc.com>
> ---
>  drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c    |  86 ++++++++++++
>  drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 201 +++++++++++-----------------
>  drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h |  19 +++
>  3 files changed, 183 insertions(+), 123 deletions(-)
> 
> @@ -544,159 +542,117 @@ void dpu_encoder_helper_split_config(
>  	}
>  }
>  
> -bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
> +void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
> +				 struct msm_display_topology *topology,
> +				 struct drm_atomic_state *state,
> +				 const struct drm_display_mode *adj_mode)
>  {
>  	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
> -	int i, intf_count = 0, num_dsc = 0;
> +	struct drm_connector *connector;
> +	struct drm_connector_state *conn_state;
> +	struct msm_display_info *disp_info;
> +	struct drm_framebuffer *fb;
> +	struct msm_drm_private *priv;
> +	int i;
>  
>  	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
>  		if (dpu_enc->phys_encs[i])
> -			intf_count++;
> +			topology->num_intf++;
>  
> -	/* See dpu_encoder_get_topology, we only support 2:2:1 topology */
> +	/* We only support 2 DSC mode (with 2 LM and 1 INTF) */
>  	if (dpu_enc->dsc)
> -		num_dsc = 2;
> +		topology->num_dsc += 2;
>  
> -	return (num_dsc > 0) && (num_dsc > intf_count);
> -}
> +	connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
> +	if (!connector)
> +		return;
> +	conn_state = drm_atomic_get_new_connector_state(state, connector);
> +	if (!conn_state)
> +		return;
>  
> -struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
> -{
> -	struct msm_drm_private *priv = drm_enc->dev->dev_private;
> -	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
> -	int index = dpu_enc->disp_info.h_tile_instance[0];
> +	disp_info = &dpu_enc->disp_info;
>  
> -	if (dpu_enc->disp_info.intf_type == INTF_DSI)
> -		return msm_dsi_get_dsc_config(priv->dsi[index]);
> +	priv = drm_enc->dev->dev_private;
>  
> -	return NULL;
> +	/*
> +	 * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
> +	 * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
> +	 * earlier.
> +	 */
> +	if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
> +		fb = conn_state->writeback_job->fb;
> +
> +		if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
> +			topology->needs_cdm = true;
> +	} else if (disp_info->intf_type == INTF_DP) {
> +		if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
> +			topology->needs_cdm = true;
> +	}

Just to note, the needs_cdm is not enough once you introduce CWB
support. E.g. DP/YUV420 + WB/YUV case requires two CDM blocks (which we
don't have), but this doesn't get reflected in the topology.

>  }
>
Jessica Zhang Sept. 25, 2024, 8:38 p.m. UTC | #2
On 9/24/2024 4:13 PM, Dmitry Baryshkov wrote:
> On Tue, Sep 24, 2024 at 03:59:21PM GMT, Jessica Zhang wrote:
>> From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
>>
>> All resource allocation is centered around the LMs. Then other blocks
>> (except DSCs) are allocated basing on the LMs that was selected, and LM
>> powers up the CRTC rather than the encoder.
>>
>> Moreover if at some point the driver supports encoder cloning,
>> allocating resources from the encoder will be incorrect, as all clones
>> will have different encoder IDs, while LMs are to be shared by these
>> encoders.
>>
>> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
>> [quic_abhinavk@quicinc.com: Refactored resource allocation for CDM]
>> Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
>> [quic_jesszhan@quicinc.com: Changed to grabbing exising global state]
>> Signed-off-by: Jessica Zhang <quic_jesszhan@quicinc.com>
>> ---
>>   drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c    |  86 ++++++++++++
>>   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 201 +++++++++++-----------------
>>   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h |  19 +++
>>   3 files changed, 183 insertions(+), 123 deletions(-)
>>
>> @@ -544,159 +542,117 @@ void dpu_encoder_helper_split_config(
>>   	}
>>   }
>>   
>> -bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
>> +void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
>> +				 struct msm_display_topology *topology,
>> +				 struct drm_atomic_state *state,
>> +				 const struct drm_display_mode *adj_mode)
>>   {
>>   	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
>> -	int i, intf_count = 0, num_dsc = 0;
>> +	struct drm_connector *connector;
>> +	struct drm_connector_state *conn_state;
>> +	struct msm_display_info *disp_info;
>> +	struct drm_framebuffer *fb;
>> +	struct msm_drm_private *priv;
>> +	int i;
>>   
>>   	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
>>   		if (dpu_enc->phys_encs[i])
>> -			intf_count++;
>> +			topology->num_intf++;
>>   
>> -	/* See dpu_encoder_get_topology, we only support 2:2:1 topology */
>> +	/* We only support 2 DSC mode (with 2 LM and 1 INTF) */
>>   	if (dpu_enc->dsc)
>> -		num_dsc = 2;
>> +		topology->num_dsc += 2;
>>   
>> -	return (num_dsc > 0) && (num_dsc > intf_count);
>> -}
>> +	connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
>> +	if (!connector)
>> +		return;
>> +	conn_state = drm_atomic_get_new_connector_state(state, connector);
>> +	if (!conn_state)
>> +		return;
>>   
>> -struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
>> -{
>> -	struct msm_drm_private *priv = drm_enc->dev->dev_private;
>> -	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
>> -	int index = dpu_enc->disp_info.h_tile_instance[0];
>> +	disp_info = &dpu_enc->disp_info;
>>   
>> -	if (dpu_enc->disp_info.intf_type == INTF_DSI)
>> -		return msm_dsi_get_dsc_config(priv->dsi[index]);
>> +	priv = drm_enc->dev->dev_private;
>>   
>> -	return NULL;
>> +	/*
>> +	 * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
>> +	 * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
>> +	 * earlier.
>> +	 */
>> +	if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
>> +		fb = conn_state->writeback_job->fb;
>> +
>> +		if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
>> +			topology->needs_cdm = true;
>> +	} else if (disp_info->intf_type == INTF_DP) {
>> +		if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
>> +			topology->needs_cdm = true;
>> +	}
> 
> Just to note, the needs_cdm is not enough once you introduce CWB
> support. E.g. DP/YUV420 + WB/YUV case requires two CDM blocks (which we
> don't have), but this doesn't get reflected in the topology.

Hi Dmitry,

Ack. I can add something to make atomic_check fail if the input FB is 
YUV format and CWB is enabled.

Thanks,

Jessica Zhang

> 
>>   }
>>   
> -- 
> With best wishes
> Dmitry
Dmitry Baryshkov Sept. 25, 2024, 9:11 p.m. UTC | #3
On Wed, 25 Sept 2024 at 22:39, Jessica Zhang <quic_jesszhan@quicinc.com> wrote:
>
>
>
> On 9/24/2024 4:13 PM, Dmitry Baryshkov wrote:
> > On Tue, Sep 24, 2024 at 03:59:21PM GMT, Jessica Zhang wrote:
> >> From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> >>
> >> All resource allocation is centered around the LMs. Then other blocks
> >> (except DSCs) are allocated basing on the LMs that was selected, and LM
> >> powers up the CRTC rather than the encoder.
> >>
> >> Moreover if at some point the driver supports encoder cloning,
> >> allocating resources from the encoder will be incorrect, as all clones
> >> will have different encoder IDs, while LMs are to be shared by these
> >> encoders.
> >>
> >> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> >> [quic_abhinavk@quicinc.com: Refactored resource allocation for CDM]
> >> Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
> >> [quic_jesszhan@quicinc.com: Changed to grabbing exising global state]
> >> Signed-off-by: Jessica Zhang <quic_jesszhan@quicinc.com>
> >> ---
> >>   drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c    |  86 ++++++++++++
> >>   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 201 +++++++++++-----------------
> >>   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h |  19 +++
> >>   3 files changed, 183 insertions(+), 123 deletions(-)
> >>
> >> @@ -544,159 +542,117 @@ void dpu_encoder_helper_split_config(
> >>      }
> >>   }
> >>
> >> -bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
> >> +void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
> >> +                             struct msm_display_topology *topology,
> >> +                             struct drm_atomic_state *state,
> >> +                             const struct drm_display_mode *adj_mode)
> >>   {
> >>      struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
> >> -    int i, intf_count = 0, num_dsc = 0;
> >> +    struct drm_connector *connector;
> >> +    struct drm_connector_state *conn_state;
> >> +    struct msm_display_info *disp_info;
> >> +    struct drm_framebuffer *fb;
> >> +    struct msm_drm_private *priv;
> >> +    int i;
> >>
> >>      for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
> >>              if (dpu_enc->phys_encs[i])
> >> -                    intf_count++;
> >> +                    topology->num_intf++;
> >>
> >> -    /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
> >> +    /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
> >>      if (dpu_enc->dsc)
> >> -            num_dsc = 2;
> >> +            topology->num_dsc += 2;
> >>
> >> -    return (num_dsc > 0) && (num_dsc > intf_count);
> >> -}
> >> +    connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
> >> +    if (!connector)
> >> +            return;
> >> +    conn_state = drm_atomic_get_new_connector_state(state, connector);
> >> +    if (!conn_state)
> >> +            return;
> >>
> >> -struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
> >> -{
> >> -    struct msm_drm_private *priv = drm_enc->dev->dev_private;
> >> -    struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
> >> -    int index = dpu_enc->disp_info.h_tile_instance[0];
> >> +    disp_info = &dpu_enc->disp_info;
> >>
> >> -    if (dpu_enc->disp_info.intf_type == INTF_DSI)
> >> -            return msm_dsi_get_dsc_config(priv->dsi[index]);
> >> +    priv = drm_enc->dev->dev_private;
> >>
> >> -    return NULL;
> >> +    /*
> >> +     * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
> >> +     * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
> >> +     * earlier.
> >> +     */
> >> +    if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
> >> +            fb = conn_state->writeback_job->fb;
> >> +
> >> +            if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
> >> +                    topology->needs_cdm = true;
> >> +    } else if (disp_info->intf_type == INTF_DP) {
> >> +            if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
> >> +                    topology->needs_cdm = true;
> >> +    }
> >
> > Just to note, the needs_cdm is not enough once you introduce CWB
> > support. E.g. DP/YUV420 + WB/YUV case requires two CDM blocks (which we
> > don't have), but this doesn't get reflected in the topology.
>
> Hi Dmitry,
>
> Ack. I can add something to make atomic_check fail if the input FB is
> YUV format and CWB is enabled.

I'd prefer for this to be more natural rather than just checking for
the DP && DP_YUV420 && WB && WB_FMT_YUV. In the worst case, count CDM
requests and then in RM check them against the catalog. But I had a
more logical (although more intrusive) implementation on my mind:

struct msm_display_topology {
    struct {
      u32 num_intf;
      u32 num_wb;
      u32 num_dsc;
      bool needs_cdm;
    } outputs[MAX_OUTPUTS];
    u32 num_lm;
};

WDYT?

>
> Thanks,
>
> Jessica Zhang
>
> >
> >>   }
> >>
> > --
> > With best wishes
> > Dmitry
>
Abhinav Kumar Sept. 25, 2024, 9:49 p.m. UTC | #4
On 9/25/2024 2:11 PM, Dmitry Baryshkov wrote:
> On Wed, 25 Sept 2024 at 22:39, Jessica Zhang <quic_jesszhan@quicinc.com> wrote:
>>
>>
>>
>> On 9/24/2024 4:13 PM, Dmitry Baryshkov wrote:
>>> On Tue, Sep 24, 2024 at 03:59:21PM GMT, Jessica Zhang wrote:
>>>> From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
>>>>
>>>> All resource allocation is centered around the LMs. Then other blocks
>>>> (except DSCs) are allocated basing on the LMs that was selected, and LM
>>>> powers up the CRTC rather than the encoder.
>>>>
>>>> Moreover if at some point the driver supports encoder cloning,
>>>> allocating resources from the encoder will be incorrect, as all clones
>>>> will have different encoder IDs, while LMs are to be shared by these
>>>> encoders.
>>>>
>>>> Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
>>>> [quic_abhinavk@quicinc.com: Refactored resource allocation for CDM]
>>>> Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
>>>> [quic_jesszhan@quicinc.com: Changed to grabbing exising global state]
>>>> Signed-off-by: Jessica Zhang <quic_jesszhan@quicinc.com>
>>>> ---
>>>>    drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c    |  86 ++++++++++++
>>>>    drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 201 +++++++++++-----------------
>>>>    drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h |  19 +++
>>>>    3 files changed, 183 insertions(+), 123 deletions(-)
>>>>
>>>> @@ -544,159 +542,117 @@ void dpu_encoder_helper_split_config(
>>>>       }
>>>>    }
>>>>
>>>> -bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
>>>> +void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
>>>> +                             struct msm_display_topology *topology,
>>>> +                             struct drm_atomic_state *state,
>>>> +                             const struct drm_display_mode *adj_mode)
>>>>    {
>>>>       struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
>>>> -    int i, intf_count = 0, num_dsc = 0;
>>>> +    struct drm_connector *connector;
>>>> +    struct drm_connector_state *conn_state;
>>>> +    struct msm_display_info *disp_info;
>>>> +    struct drm_framebuffer *fb;
>>>> +    struct msm_drm_private *priv;
>>>> +    int i;
>>>>
>>>>       for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
>>>>               if (dpu_enc->phys_encs[i])
>>>> -                    intf_count++;
>>>> +                    topology->num_intf++;
>>>>
>>>> -    /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
>>>> +    /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
>>>>       if (dpu_enc->dsc)
>>>> -            num_dsc = 2;
>>>> +            topology->num_dsc += 2;
>>>>
>>>> -    return (num_dsc > 0) && (num_dsc > intf_count);
>>>> -}
>>>> +    connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
>>>> +    if (!connector)
>>>> +            return;
>>>> +    conn_state = drm_atomic_get_new_connector_state(state, connector);
>>>> +    if (!conn_state)
>>>> +            return;
>>>>
>>>> -struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
>>>> -{
>>>> -    struct msm_drm_private *priv = drm_enc->dev->dev_private;
>>>> -    struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
>>>> -    int index = dpu_enc->disp_info.h_tile_instance[0];
>>>> +    disp_info = &dpu_enc->disp_info;
>>>>
>>>> -    if (dpu_enc->disp_info.intf_type == INTF_DSI)
>>>> -            return msm_dsi_get_dsc_config(priv->dsi[index]);
>>>> +    priv = drm_enc->dev->dev_private;
>>>>
>>>> -    return NULL;
>>>> +    /*
>>>> +     * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
>>>> +     * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
>>>> +     * earlier.
>>>> +     */
>>>> +    if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
>>>> +            fb = conn_state->writeback_job->fb;
>>>> +
>>>> +            if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
>>>> +                    topology->needs_cdm = true;
>>>> +    } else if (disp_info->intf_type == INTF_DP) {
>>>> +            if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
>>>> +                    topology->needs_cdm = true;
>>>> +    }
>>>
>>> Just to note, the needs_cdm is not enough once you introduce CWB
>>> support. E.g. DP/YUV420 + WB/YUV case requires two CDM blocks (which we
>>> don't have), but this doesn't get reflected in the topology.
>>
>> Hi Dmitry,
>>
>> Ack. I can add something to make atomic_check fail if the input FB is
>> YUV format and CWB is enabled.
> 
> I'd prefer for this to be more natural rather than just checking for
> the DP && DP_YUV420 && WB && WB_FMT_YUV. In the worst case, count CDM
> requests and then in RM check them against the catalog. But I had a
> more logical (although more intrusive) implementation on my mind:
> 
> struct msm_display_topology {
>      struct {
>        u32 num_intf;
>        u32 num_wb;
>        u32 num_dsc;
>        bool needs_cdm;
>      } outputs[MAX_OUTPUTS];
>      u32 num_lm;
> };
> 
> WDYT?
> 

the struct msm_display_topology was originally designed as a per-encoder 
struct (dpu_encoder_get_topology() indicates the same). Making this an 
array of outputs was not needed as there is expected to be one struct 
msm_display_topology for each virt encoder's requested topology and the 
blocks inside of it other than LM, are "encoder" hw blocks.

needs_cdm was made a boolean instead of a num_cdm_count like other 
hardware blocks because till the most recent chipset, we have only one 
CDM block. Whenever we do have more CDM blocks why will introducing 
num_cdm to the topology struct not solve your problem rather than making 
it an array of outputs?

Because then, RM will know that the request exceeds the max blocks.

I think what you are trying to do now is make struct 
msm_display_topology's encoder parts per-encoder and rest like num_lm 
per "RM session".

The thought is not wrong but at the same time seems a bit of an overkill 
because its mostly already like that. Apart from CDM for which I have no 
indication of another one getting added, rest of the blocks are already 
aligned towards a per-encoder model and not a "RM session" model.

Even if we end up doing it this way, most of the model is kind of unused 
really because each encoder will request its own topology anyway, there 
is just no aggregation for CDM which at this point is not needed as 
there is no HW we are aware of needing this.

I think the atomic_check validation is needed either way because if two 
encoders request cdm, we cannot do clone mode as there is only one cdm 
block today. Its just that we are not tracking num_cdm today due to 
reasons explained above but basically doing something like below seems 
right to me:

if (enc_is_in_clone_mode && needs_cdm)
	return -ENOTSUPPORTED;

When we add more cdm_blocks, we can drop this check and making needs_cdm 
a num_cdm will make it naturally fail.

>>
>> Thanks,
>>
>> Jessica Zhang
>>
>>>
>>>>    }
>>>>
>>> --
>>> With best wishes
>>> Dmitry
>>
> 
>
Dmitry Baryshkov Sept. 26, 2024, 7:12 a.m. UTC | #5
On Wed, Sep 25, 2024 at 02:49:48PM GMT, Abhinav Kumar wrote:
> On 9/25/2024 2:11 PM, Dmitry Baryshkov wrote:
> > On Wed, 25 Sept 2024 at 22:39, Jessica Zhang <quic_jesszhan@quicinc.com> wrote:
> > > On 9/24/2024 4:13 PM, Dmitry Baryshkov wrote:
> > > > On Tue, Sep 24, 2024 at 03:59:21PM GMT, Jessica Zhang wrote:
> > > > > From: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> > > > > 
> > > > > All resource allocation is centered around the LMs. Then other blocks
> > > > > (except DSCs) are allocated basing on the LMs that was selected, and LM
> > > > > powers up the CRTC rather than the encoder.
> > > > > 
> > > > > Moreover if at some point the driver supports encoder cloning,
> > > > > allocating resources from the encoder will be incorrect, as all clones
> > > > > will have different encoder IDs, while LMs are to be shared by these
> > > > > encoders.
> > > > > 
> > > > > Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
> > > > > [quic_abhinavk@quicinc.com: Refactored resource allocation for CDM]
> > > > > Signed-off-by: Abhinav Kumar <quic_abhinavk@quicinc.com>
> > > > > [quic_jesszhan@quicinc.com: Changed to grabbing exising global state]
> > > > > Signed-off-by: Jessica Zhang <quic_jesszhan@quicinc.com>
> > > > > ---
> > > > >    drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c    |  86 ++++++++++++
> > > > >    drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 201 +++++++++++-----------------
> > > > >    drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h |  19 +++
> > > > >    3 files changed, 183 insertions(+), 123 deletions(-)
> > > > > 
> > > > > @@ -544,159 +542,117 @@ void dpu_encoder_helper_split_config(
> > > > >       }
> > > > >    }
> > > > > 
> > > > > -bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
> > > > > +void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
> > > > > +                             struct msm_display_topology *topology,
> > > > > +                             struct drm_atomic_state *state,
> > > > > +                             const struct drm_display_mode *adj_mode)
> > > > >    {
> > > > >       struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
> > > > > -    int i, intf_count = 0, num_dsc = 0;
> > > > > +    struct drm_connector *connector;
> > > > > +    struct drm_connector_state *conn_state;
> > > > > +    struct msm_display_info *disp_info;
> > > > > +    struct drm_framebuffer *fb;
> > > > > +    struct msm_drm_private *priv;
> > > > > +    int i;
> > > > > 
> > > > >       for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
> > > > >               if (dpu_enc->phys_encs[i])
> > > > > -                    intf_count++;
> > > > > +                    topology->num_intf++;
> > > > > 
> > > > > -    /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
> > > > > +    /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
> > > > >       if (dpu_enc->dsc)
> > > > > -            num_dsc = 2;
> > > > > +            topology->num_dsc += 2;
> > > > > 
> > > > > -    return (num_dsc > 0) && (num_dsc > intf_count);
> > > > > -}
> > > > > +    connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
> > > > > +    if (!connector)
> > > > > +            return;
> > > > > +    conn_state = drm_atomic_get_new_connector_state(state, connector);
> > > > > +    if (!conn_state)
> > > > > +            return;
> > > > > 
> > > > > -struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
> > > > > -{
> > > > > -    struct msm_drm_private *priv = drm_enc->dev->dev_private;
> > > > > -    struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
> > > > > -    int index = dpu_enc->disp_info.h_tile_instance[0];
> > > > > +    disp_info = &dpu_enc->disp_info;
> > > > > 
> > > > > -    if (dpu_enc->disp_info.intf_type == INTF_DSI)
> > > > > -            return msm_dsi_get_dsc_config(priv->dsi[index]);
> > > > > +    priv = drm_enc->dev->dev_private;
> > > > > 
> > > > > -    return NULL;
> > > > > +    /*
> > > > > +     * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
> > > > > +     * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
> > > > > +     * earlier.
> > > > > +     */
> > > > > +    if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
> > > > > +            fb = conn_state->writeback_job->fb;
> > > > > +
> > > > > +            if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
> > > > > +                    topology->needs_cdm = true;
> > > > > +    } else if (disp_info->intf_type == INTF_DP) {
> > > > > +            if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
> > > > > +                    topology->needs_cdm = true;
> > > > > +    }
> > > > 
> > > > Just to note, the needs_cdm is not enough once you introduce CWB
> > > > support. E.g. DP/YUV420 + WB/YUV case requires two CDM blocks (which we
> > > > don't have), but this doesn't get reflected in the topology.
> > > 
> > > Hi Dmitry,
> > > 
> > > Ack. I can add something to make atomic_check fail if the input FB is
> > > YUV format and CWB is enabled.
> > 
> > I'd prefer for this to be more natural rather than just checking for
> > the DP && DP_YUV420 && WB && WB_FMT_YUV. In the worst case, count CDM
> > requests and then in RM check them against the catalog. But I had a
> > more logical (although more intrusive) implementation on my mind:
> > 
> > struct msm_display_topology {
> >      struct {
> >        u32 num_intf;
> >        u32 num_wb;
> >        u32 num_dsc;
> >        bool needs_cdm;
> >      } outputs[MAX_OUTPUTS];
> >      u32 num_lm;
> > };
> > 
> > WDYT?
> > 
> 
> the struct msm_display_topology was originally designed as a per-encoder
> struct (dpu_encoder_get_topology() indicates the same). Making this an array
> of outputs was not needed as there is expected to be one struct
> msm_display_topology for each virt encoder's requested topology and the
> blocks inside of it other than LM, are "encoder" hw blocks.
> 
> needs_cdm was made a boolean instead of a num_cdm_count like other hardware
> blocks because till the most recent chipset, we have only one CDM block.
> Whenever we do have more CDM blocks why will introducing num_cdm to the
> topology struct not solve your problem rather than making it an array of
> outputs?
> 
> Because then, RM will know that the request exceeds the max blocks.
> 
> I think what you are trying to do now is make struct msm_display_topology's
> encoder parts per-encoder and rest like num_lm per "RM session".
> 
> The thought is not wrong but at the same time seems a bit of an overkill
> because its mostly already like that. Apart from CDM for which I have no
> indication of another one getting added, rest of the blocks are already
> aligned towards a per-encoder model and not a "RM session" model.

But we should be leaning towards RM session.

> 
> Even if we end up doing it this way, most of the model is kind of unused
> really because each encoder will request its own topology anyway, there is
> just no aggregation for CDM which at this point is not needed as there is no
> HW we are aware of needing this.

With the resource allocation shifted to the CRTC individual encoders
do not request their own topology (as it is now a property of the full
output pipeline, not just an encoder). Yes, CDM aggregation into num_cdm
seems unnecessary as there is just one CDM block.

> I think the atomic_check validation is needed either way because if two
> encoders request cdm, we cannot do clone mode as there is only one cdm block
> today. Its just that we are not tracking num_cdm today due to reasons
> explained above but basically doing something like below seems right to me:
> 
> if (enc_is_in_clone_mode && needs_cdm)
> 	return -ENOTSUPPORTED;

This check is incorrect in my opinion. The hardware should be able to
support DP/YUV420 + WB/RGB and DP/RGB + WB/YUV combinations. Please
correct me if I'm wrong.

> When we add more cdm_blocks, we can drop this check and making needs_cdm a
> num_cdm will make it naturally fail.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 4c1be2f0555f..b918c80d30b3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1163,6 +1163,78 @@  static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
 	return false;
 }
 
+#define MAX_HDISPLAY_SPLIT 1080
+
+static struct msm_display_topology dpu_crtc_get_topology(
+		struct drm_crtc *crtc,
+		struct dpu_kms *dpu_kms,
+		struct drm_crtc_state *crtc_state)
+{
+	struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+	struct msm_display_topology topology = {0};
+	struct drm_encoder *drm_enc;
+
+	drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask)
+		dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state,
+					    &crtc_state->adjusted_mode);
+
+	/*
+	 * Datapath topology selection
+	 *
+	 * Dual display
+	 * 2 LM, 2 INTF ( Split display using 2 interfaces)
+	 *
+	 * Single display
+	 * 1 LM, 1 INTF
+	 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
+	 *
+	 * Add dspps to the reservation requirements if ctm is requested
+	 */
+
+	if (topology.num_intf == 2)
+		topology.num_lm = 2;
+	else if (topology.num_dsc == 2)
+		topology.num_lm = 2;
+	else if (dpu_kms->catalog->caps->has_3d_merge)
+		topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+	else
+		topology.num_lm = 1;
+
+	if (crtc_state->ctm)
+		topology.num_dspp = topology.num_lm;
+
+	return topology;
+}
+
+static int dpu_crtc_assign_resources(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
+{
+	struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+	struct dpu_global_state *global_state;
+	struct msm_display_topology topology;
+	int ret;
+
+	/*
+	 * Release and Allocate resources on every modeset
+	 * Dont allocate when enable is false.
+	 */
+	global_state = dpu_kms_get_existing_global_state(dpu_kms);
+	if (IS_ERR(global_state))
+		return PTR_ERR(global_state);
+
+	dpu_rm_release(global_state, crtc);
+
+	if (!crtc_state->enable)
+		return 0;
+
+	topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state);
+	ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
+			     crtc, &topology);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 		struct drm_atomic_state *state)
 {
@@ -1174,10 +1246,24 @@  static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 	const struct drm_plane_state *pstate;
 	struct drm_plane *plane;
 
+	struct drm_encoder *drm_enc;
+
 	int rc = 0;
 
 	bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
 
+	/* there might be cases where encoder needs a modeset too */
+	drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) {
+		if (dpu_encoder_needs_modeset(drm_enc, crtc_state->state))
+			crtc_state->mode_changed = true;
+	}
+
+	if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+		rc = dpu_crtc_assign_resources(crtc, crtc_state);
+		if (rc < 0)
+			return rc;
+	}
+
 	if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
 		DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
 				crtc->base.id, crtc_state->enable,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 4a9edcfbcaae..ada9119326ca 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -58,8 +58,6 @@ 
 
 #define IDLE_SHORT_TIMEOUT	1
 
-#define MAX_HDISPLAY_SPLIT 1080
-
 /* timeout in frames waiting for frame done */
 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
 
@@ -544,159 +542,117 @@  void dpu_encoder_helper_split_config(
 	}
 }
 
-bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+				 struct msm_display_topology *topology,
+				 struct drm_atomic_state *state,
+				 const struct drm_display_mode *adj_mode)
 {
 	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
-	int i, intf_count = 0, num_dsc = 0;
+	struct drm_connector *connector;
+	struct drm_connector_state *conn_state;
+	struct msm_display_info *disp_info;
+	struct drm_framebuffer *fb;
+	struct msm_drm_private *priv;
+	int i;
 
 	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
 		if (dpu_enc->phys_encs[i])
-			intf_count++;
+			topology->num_intf++;
 
-	/* See dpu_encoder_get_topology, we only support 2:2:1 topology */
+	/* We only support 2 DSC mode (with 2 LM and 1 INTF) */
 	if (dpu_enc->dsc)
-		num_dsc = 2;
+		topology->num_dsc += 2;
 
-	return (num_dsc > 0) && (num_dsc > intf_count);
-}
+	connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+	if (!connector)
+		return;
+	conn_state = drm_atomic_get_new_connector_state(state, connector);
+	if (!conn_state)
+		return;
 
-struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
-{
-	struct msm_drm_private *priv = drm_enc->dev->dev_private;
-	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
-	int index = dpu_enc->disp_info.h_tile_instance[0];
+	disp_info = &dpu_enc->disp_info;
 
-	if (dpu_enc->disp_info.intf_type == INTF_DSI)
-		return msm_dsi_get_dsc_config(priv->dsi[index]);
+	priv = drm_enc->dev->dev_private;
 
-	return NULL;
+	/*
+	 * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
+	 * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
+	 * earlier.
+	 */
+	if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
+		fb = conn_state->writeback_job->fb;
+
+		if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
+			topology->needs_cdm = true;
+	} else if (disp_info->intf_type == INTF_DP) {
+		if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
+			topology->needs_cdm = true;
+	}
 }
 
-static struct msm_display_topology dpu_encoder_get_topology(
-			struct dpu_encoder_virt *dpu_enc,
-			struct dpu_kms *dpu_kms,
-			struct drm_display_mode *mode,
-			struct drm_crtc_state *crtc_state,
-			struct drm_dsc_config *dsc)
+static bool dpu_encoder_needs_dsc_merge(struct drm_encoder *drm_enc)
 {
-	struct msm_display_topology topology = {0};
-	int i, intf_count = 0;
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+	u32 num_intf = 0;
+	u32 num_dsc = 0;
+	int i;
 
 	for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
 		if (dpu_enc->phys_encs[i])
-			intf_count++;
-
-	/* Datapath topology selection
-	 *
-	 * Dual display
-	 * 2 LM, 2 INTF ( Split display using 2 interfaces)
-	 *
-	 * Single display
-	 * 1 LM, 1 INTF
-	 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
-	 *
-	 * Add dspps to the reservation requirements if ctm is requested
-	 */
-	if (intf_count == 2)
-		topology.num_lm = 2;
-	else if (!dpu_kms->catalog->caps->has_3d_merge)
-		topology.num_lm = 1;
-	else
-		topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
-
-	if (crtc_state->ctm)
-		topology.num_dspp = topology.num_lm;
-
-	topology.num_intf = intf_count;
+			num_intf++;
 
-	if (dsc) {
-		/*
-		 * In case of Display Stream Compression (DSC), we would use
-		 * 2 DSC encoders, 2 layer mixers and 1 interface
-		 * this is power optimal and can drive up to (including) 4k
-		 * screens
-		 */
-		topology.num_dsc = 2;
-		topology.num_lm = 2;
-		topology.num_intf = 1;
-	}
+	/* We only support 2 DSC mode (with 2 LM and 1 INTF) */
+	if (dpu_enc->dsc)
+		num_dsc += 2;
 
-	return topology;
+	return (num_dsc > 0) && (num_dsc > num_intf);
 }
 
-static int dpu_encoder_virt_atomic_check(
-		struct drm_encoder *drm_enc,
-		struct drm_crtc_state *crtc_state,
-		struct drm_connector_state *conn_state)
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state)
 {
-	struct dpu_encoder_virt *dpu_enc;
-	struct msm_drm_private *priv;
-	struct dpu_kms *dpu_kms;
-	struct drm_display_mode *adj_mode;
-	struct msm_display_topology topology;
-	struct msm_display_info *disp_info;
-	struct dpu_global_state *global_state;
+	struct drm_connector *connector;
+	struct drm_connector_state *conn_state;
 	struct drm_framebuffer *fb;
-	struct drm_dsc_config *dsc;
-	int ret = 0;
-
-	if (!drm_enc || !crtc_state || !conn_state) {
-		DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
-				drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
-		return -EINVAL;
-	}
-
-	dpu_enc = to_dpu_encoder_virt(drm_enc);
-	DPU_DEBUG_ENC(dpu_enc, "\n");
-
-	priv = drm_enc->dev->dev_private;
-	disp_info = &dpu_enc->disp_info;
-	dpu_kms = to_dpu_kms(priv->kms);
-	adj_mode = &crtc_state->adjusted_mode;
-	global_state = dpu_kms_get_global_state(crtc_state->state);
-	if (IS_ERR(global_state))
-		return PTR_ERR(global_state);
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
 
-	trace_dpu_enc_atomic_check(DRMID(drm_enc));
+	if (!drm_enc || !state)
+		return false;
 
-	dsc = dpu_encoder_get_dsc_config(drm_enc);
+	connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
+	if (!connector)
+		return false;
 
-	topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
+	conn_state = drm_atomic_get_new_connector_state(state, connector);
 
-	/*
-	 * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it.
-	 * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check()
-	 * earlier.
-	 */
-	if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
+	if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
 		fb = conn_state->writeback_job->fb;
-
-		if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
-			topology.needs_cdm = true;
-	} else if (disp_info->intf_type == INTF_DP) {
-		if (msm_dp_is_yuv_420_enabled(priv->dp[disp_info->h_tile_instance[0]], adj_mode))
-			topology.needs_cdm = true;
+		if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) {
+			if (!dpu_enc->cur_master->hw_cdm)
+				return true;
+		} else {
+			if (dpu_enc->cur_master->hw_cdm)
+				return true;
+		}
 	}
 
-	if (topology.needs_cdm && !dpu_enc->cur_master->hw_cdm)
-		crtc_state->mode_changed = true;
-	else if (!topology.needs_cdm && dpu_enc->cur_master->hw_cdm)
-		crtc_state->mode_changed = true;
-	/*
-	 * Release and Allocate resources on every modeset
-	 * Dont allocate when active is false.
-	 */
-	if (drm_atomic_crtc_needs_modeset(crtc_state)) {
-		dpu_rm_release(global_state, crtc_state->crtc);
+	return false;
+}
 
-		if (!crtc_state->active_changed || crtc_state->enable)
-			ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
-					crtc_state->crtc, &topology);
-	}
+struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
+{
+	struct msm_drm_private *priv = drm_enc->dev->dev_private;
+	struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+	int index = dpu_enc->disp_info.h_tile_instance[0];
 
-	trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
+	if (dpu_enc->disp_info.intf_type == INTF_DSI)
+		return msm_dsi_get_dsc_config(priv->dsi[index]);
 
-	return ret;
+	return NULL;
+}
+
+bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
+{
+	return dpu_encoder_needs_dsc_merge(drm_enc);
 }
 
 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
@@ -2449,7 +2405,6 @@  static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
 	.atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
 	.atomic_disable = dpu_encoder_virt_atomic_disable,
 	.atomic_enable = dpu_encoder_virt_atomic_enable,
-	.atomic_check = dpu_encoder_virt_atomic_check,
 };
 
 static const struct drm_encoder_funcs dpu_encoder_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index f7465a1774aa..0d27e50384f0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -169,6 +169,25 @@  int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos);
  */
 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc);
 
+/**
+ * dpu_encoder_update_topology - update topology with the requirements for the encoder
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ * @topology:   Topology to be updated
+ * @state:      Current DRM atomic state
+ * @adj_mode:   Current DRM display mode associated with the crtc
+ */
+void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
+				 struct msm_display_topology *topology,
+				 struct drm_atomic_state *state,
+				 const struct drm_display_mode *adj_mode);
+
+/**
+ * dpu_encoder_update_topology - update topology with the requirements for the encoder
+ * @drm_enc:    Pointer to previously created drm encoder structure
+ * @topology:   Current DRM atomic state
+ */
+bool dpu_encoder_needs_modeset(struct drm_encoder *drm_enc, struct drm_atomic_state *state);
+
 /**
  * dpu_encoder_prepare_wb_job - prepare writeback job for the encoder.
  * @drm_enc:    Pointer to previously created drm encoder structure