diff mbox series

[v1,11/14] drm/msm/disp/dpu1: add supports of new flush mechanism

Message ID 1674498274-6010-12-git-send-email-quic_khsieh@quicinc.com (mailing list archive)
State Not Applicable
Headers show
Series add display port DSC feature | expand

Commit Message

Kuogee Hsieh Jan. 23, 2023, 6:24 p.m. UTC
A new flushing mechanism is introduced to decouple peripheral metadata
flushing from timing engine related flush. This patch add peripheral
flushing functions.

Signed-off-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
---
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c        | 24 ++++++++++--
 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h   |  2 +
 .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c   |  7 ++++
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c         | 43 ++++++++++++++++++++--
 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h         | 21 +++++++++++
 5 files changed, 91 insertions(+), 6 deletions(-)

Comments

Dmitry Baryshkov Jan. 23, 2023, 9:43 p.m. UTC | #1
On 23/01/2023 20:24, Kuogee Hsieh wrote:
> A new flushing mechanism is introduced to decouple peripheral metadata
> flushing from timing engine related flush. This patch add peripheral
> flushing functions.
> 
> Signed-off-by: Kuogee Hsieh <quic_khsieh@quicinc.com>
> ---
>   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c        | 24 ++++++++++--
>   drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h   |  2 +
>   .../gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c   |  7 ++++
>   drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c         | 43 ++++++++++++++++++++--
>   drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h         | 21 +++++++++++
>   5 files changed, 91 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> index 901e317..d2625b3 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
> @@ -1472,6 +1472,12 @@ static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
>   	if (extra_flush_bits && ctl->ops.update_pending_flush)
>   		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
>   
> +	if (phys->hw_intf->cap->type == INTF_DP &&
> +		phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
> +						phys->comp_ratio) {

Do you really need to know comp_ratio here? And the comp_type?

> +		ctl->ops.update_pending_flush_periph(ctl, phys->hw_intf->idx);
> +	}
> +
>   	ctl->ops.trigger_flush(ctl);
>   
>   	if (ctl->ops.get_pending_flush)
> @@ -1814,12 +1820,18 @@ dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
>   	return DIV_ROUND_UP(total_pixels, dsc->slice_width);
>   }
>   
> -static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
> +static void dpu_encoder_dsc_pipe_cfg(struct dpu_encoder_virt *dpu_enc,
> +				     struct dpu_hw_dsc *hw_dsc,
>   				     struct dpu_hw_pingpong *hw_pp,
>   				     struct drm_dsc_config *dsc,
>   				     u32 common_mode,
>   				     u32 initial_lines)
>   {
> +	struct dpu_encoder_phys *cur_master = dpu_enc->cur_master;
> +	struct dpu_hw_ctl *ctl;
> +
> +	ctl = cur_master->hw_ctl;
> +
>   	if (hw_dsc->ops.dsc_config)
>   		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines, false);
>   
> @@ -1834,6 +1846,10 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
>   
>   	if (hw_pp->ops.enable_dsc)
>   		hw_pp->ops.enable_dsc(hw_pp);
> +
> +	if (ctl->ops.update_pending_flush_dsc)
> +		ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
> +
>   }
>   
>   static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
> @@ -1877,8 +1893,10 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
>   	enc_ip_w = intf_ip_w / 2;
>   	initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
>   
> -	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
> -		dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
> +	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
> +		dpu_encoder_dsc_pipe_cfg(dpu_enc, hw_dsc[i], hw_pp[i], dsc,
> +					dsc_common_mode, initial_lines);
> +	}
>   }
>   
>   void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
> index 1d434b2..0569b36 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
> @@ -200,6 +200,8 @@ struct dpu_encoder_phys {
>   	atomic_t pending_kickoff_cnt;
>   	wait_queue_head_t pending_kickoff_wq;
>   	int irq[INTR_IDX_MAX];
> +	enum msm_display_compression_type comp_type;
> +	u32 comp_ratio;
>   };
>   
>   static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
> index 48c4810..2d864f9 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
> @@ -1,5 +1,6 @@
>   // SPDX-License-Identifier: GPL-2.0-only
>   /* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
> + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
>    */
>   
>   #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
> @@ -427,6 +428,12 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
>   	if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
>   		ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
>   
> +	if (phys_enc->hw_intf->cap->type == INTF_DP &&
> +		phys_enc->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
> +					phys_enc->comp_ratio) {
> +		ctl->ops.update_pending_flush_periph(ctl, phys_enc->hw_intf->idx);
> +	}
> +
>   skip_flush:
>   	DPU_DEBUG_VIDENC(phys_enc,
>   		"update pending flush ctl %d intf %d\n",
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
> index b88a2f3..1891c57 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
> @@ -33,6 +33,7 @@
>   #define   CTL_DSC_FLUSH                0x104
>   #define   CTL_WB_FLUSH                  0x108
>   #define   CTL_INTF_FLUSH                0x110
> +#define   CTL_PERIPH_FLUSH              0x128
>   #define   CTL_INTF_MASTER               0x134
>   #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
>   
> @@ -42,11 +43,13 @@
>   #define DPU_REG_RESET_TIMEOUT_US        2000
>   #define  MERGE_3D_IDX   23
>   #define  DSC_IDX        22
> +#define  PERIPH_IDX     30
>   #define  INTF_IDX       31
>   #define WB_IDX          16
>   #define CTL_INVALID_BIT                 0xffff
>   #define CTL_DEFAULT_GROUP_ID		0xf
>   
> +

No extra empty lines please.

>   static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
>   	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
>   	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
> @@ -123,6 +126,7 @@ static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
>   	trace_dpu_hw_ctl_update_pending_flush(flushbits,
>   					      ctx->pending_flush_mask);
>   	ctx->pending_flush_mask |= flushbits;
> +
>   }
>   
>   static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
> @@ -142,6 +146,15 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
>   		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
>   				ctx->pending_wb_flush_mask);
>   
> +	if (ctx->pending_flush_mask & BIT(PERIPH_IDX))
> +		DPU_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
> +				ctx->pending_periph_flush_mask);
> +
> +	if (ctx->pending_flush_mask & BIT(DSC_IDX)) {
> +		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
> +				ctx->pending_dsc_flush_mask);
> +	}
> +
>   	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
>   }
>   
> @@ -281,6 +294,13 @@ static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
>   	ctx->pending_flush_mask |= BIT(INTF_IDX);
>   }
>   
> +static void dpu_hw_ctl_update_pending_flush_periph(struct dpu_hw_ctl *ctx,
> +		enum dpu_intf intf)
> +{
> +	ctx->pending_periph_flush_mask |= BIT(intf - INTF_0);
> +	ctx->pending_flush_mask |= BIT(PERIPH_IDX);
> +}
> +
>   static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
>   		enum dpu_merge_3d merge_3d)
>   {
> @@ -288,6 +308,13 @@ static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
>   	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
>   }
>   
> +static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
> +		enum dpu_dsc dsc_num)
> +{
> +	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
> +	ctx->pending_flush_mask |= BIT(DSC_IDX);
> +}
> +
>   static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
>   	enum dpu_dspp dspp)
>   {
> @@ -472,6 +499,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
>   	u32 intf_active = 0;
>   	u32 wb_active = 0;
>   	u32 mode_sel = 0;
> +	u32 dsc_active = 0;
>   
>   	/* CTL_TOP[31:28] carries group_id to collate CTL paths
>   	 * per VM. Explicitly disable it until VM support is
> @@ -502,9 +530,11 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
>   	if (cfg->merge_3d)
>   		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
>   			      BIT(cfg->merge_3d - MERGE_3D_0));
> -	if (cfg->dsc) {
> -		DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
> -		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
> +
> +	if (cfg->dsc_num) {
> +		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
> +		dsc_active |= BIT(cfg->dsc_num - DSC_0);
> +		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);

This deserves a separate patch with Fixed tag.

>   	}
>   }
>   
> @@ -605,9 +635,16 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
>   		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
>   		ops->update_pending_flush_intf =
>   			dpu_hw_ctl_update_pending_flush_intf_v1;
> +
> +		ops->update_pending_flush_periph =
> +			dpu_hw_ctl_update_pending_flush_periph;
> +
>   		ops->update_pending_flush_merge_3d =
>   			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
>   		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
> +
> +		ops->update_pending_flush_dsc =
> +			dpu_hw_ctl_update_pending_flush_dsc_v1;
>   	} else {
>   		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
>   		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
> diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
> index 96c012e..d3faa0b1 100644
> --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
> +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
> @@ -48,6 +48,7 @@ struct dpu_hw_intf_cfg {
>   	enum dpu_3d_blend_mode mode_3d;
>   	enum dpu_merge_3d merge_3d;
>   	enum dpu_ctl_mode_sel intf_mode_sel;
> +	enum dpu_dsc dsc_num;
>   	int stream_sel;
>   	unsigned int dsc;
>   };
> @@ -121,6 +122,15 @@ struct dpu_hw_ctl_ops {
>   		enum dpu_intf blk);
>   
>   	/**
> +	 * OR in the given flushbits to the cached pending_(periph_)flush_mask
> +	 * No effect on hardware
> +	 * @ctx       : ctl path ctx pointer
> +	 * @blk       : interface block index
> +	 */
> +	void (*update_pending_flush_periph)(struct dpu_hw_ctl *ctx,
> +		enum dpu_intf blk);
> +
> +	/**
>   	 * OR in the given flushbits to the cached pending_(merge_3d_)flush_mask
>   	 * No effect on hardware
>   	 * @ctx       : ctl path ctx pointer
> @@ -156,6 +166,15 @@ struct dpu_hw_ctl_ops {
>   	void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx,
>   		enum dpu_dspp blk);
>   	/**
> +	 * OR in the given flushbits to the cached pending_(dsc_)flush_mask
> +	 * No effect on hardware
> +	 * @ctx       : ctl path ctx pointer
> +	 * @blk       : interface block index
> +	 */
> +	void (*update_pending_flush_dsc)(struct dpu_hw_ctl *ctx,
> +		enum dpu_dsc blk);
> +
> +	/**
>   	 * Write the value of the pending_flush_mask to hardware
>   	 * @ctx       : ctl path ctx pointer
>   	 */
> @@ -241,7 +260,9 @@ struct dpu_hw_ctl {
>   	u32 pending_flush_mask;
>   	u32 pending_intf_flush_mask;
>   	u32 pending_wb_flush_mask;
> +	u32 pending_periph_flush_mask;
>   	u32 pending_merge_3d_flush_mask;
> +	u32 pending_dsc_flush_mask;
>   
>   	/* ops */
>   	struct dpu_hw_ctl_ops ops;
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 901e317..d2625b3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -1472,6 +1472,12 @@  static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
 	if (extra_flush_bits && ctl->ops.update_pending_flush)
 		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
 
+	if (phys->hw_intf->cap->type == INTF_DP &&
+		phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
+						phys->comp_ratio) {
+		ctl->ops.update_pending_flush_periph(ctl, phys->hw_intf->idx);
+	}
+
 	ctl->ops.trigger_flush(ctl);
 
 	if (ctl->ops.get_pending_flush)
@@ -1814,12 +1820,18 @@  dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
 	return DIV_ROUND_UP(total_pixels, dsc->slice_width);
 }
 
-static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
+static void dpu_encoder_dsc_pipe_cfg(struct dpu_encoder_virt *dpu_enc,
+				     struct dpu_hw_dsc *hw_dsc,
 				     struct dpu_hw_pingpong *hw_pp,
 				     struct drm_dsc_config *dsc,
 				     u32 common_mode,
 				     u32 initial_lines)
 {
+	struct dpu_encoder_phys *cur_master = dpu_enc->cur_master;
+	struct dpu_hw_ctl *ctl;
+
+	ctl = cur_master->hw_ctl;
+
 	if (hw_dsc->ops.dsc_config)
 		hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines, false);
 
@@ -1834,6 +1846,10 @@  static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
 
 	if (hw_pp->ops.enable_dsc)
 		hw_pp->ops.enable_dsc(hw_pp);
+
+	if (ctl->ops.update_pending_flush_dsc)
+		ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
+
 }
 
 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
@@ -1877,8 +1893,10 @@  static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
 	enc_ip_w = intf_ip_w / 2;
 	initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
 
-	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
-		dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
+	for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+		dpu_encoder_dsc_pipe_cfg(dpu_enc, hw_dsc[i], hw_pp[i], dsc,
+					dsc_common_mode, initial_lines);
+	}
 }
 
 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 1d434b2..0569b36 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -200,6 +200,8 @@  struct dpu_encoder_phys {
 	atomic_t pending_kickoff_cnt;
 	wait_queue_head_t pending_kickoff_wq;
 	int irq[INTR_IDX_MAX];
+	enum msm_display_compression_type comp_type;
+	u32 comp_ratio;
 };
 
 static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 48c4810..2d864f9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -1,5 +1,6 @@ 
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
@@ -427,6 +428,12 @@  static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
 	if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
 		ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
 
+	if (phys_enc->hw_intf->cap->type == INTF_DP &&
+		phys_enc->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
+					phys_enc->comp_ratio) {
+		ctl->ops.update_pending_flush_periph(ctl, phys_enc->hw_intf->idx);
+	}
+
 skip_flush:
 	DPU_DEBUG_VIDENC(phys_enc,
 		"update pending flush ctl %d intf %d\n",
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index b88a2f3..1891c57 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -33,6 +33,7 @@ 
 #define   CTL_DSC_FLUSH                0x104
 #define   CTL_WB_FLUSH                  0x108
 #define   CTL_INTF_FLUSH                0x110
+#define   CTL_PERIPH_FLUSH              0x128
 #define   CTL_INTF_MASTER               0x134
 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
 
@@ -42,11 +43,13 @@ 
 #define DPU_REG_RESET_TIMEOUT_US        2000
 #define  MERGE_3D_IDX   23
 #define  DSC_IDX        22
+#define  PERIPH_IDX     30
 #define  INTF_IDX       31
 #define WB_IDX          16
 #define CTL_INVALID_BIT                 0xffff
 #define CTL_DEFAULT_GROUP_ID		0xf
 
+
 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
 	1, 2, 3, CTL_INVALID_BIT, CTL_INVALID_BIT};
@@ -123,6 +126,7 @@  static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
 					      ctx->pending_flush_mask);
 	ctx->pending_flush_mask |= flushbits;
+
 }
 
 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
@@ -142,6 +146,15 @@  static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
 				ctx->pending_wb_flush_mask);
 
+	if (ctx->pending_flush_mask & BIT(PERIPH_IDX))
+		DPU_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
+				ctx->pending_periph_flush_mask);
+
+	if (ctx->pending_flush_mask & BIT(DSC_IDX)) {
+		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
+				ctx->pending_dsc_flush_mask);
+	}
+
 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
 }
 
@@ -281,6 +294,13 @@  static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
 	ctx->pending_flush_mask |= BIT(INTF_IDX);
 }
 
+static void dpu_hw_ctl_update_pending_flush_periph(struct dpu_hw_ctl *ctx,
+		enum dpu_intf intf)
+{
+	ctx->pending_periph_flush_mask |= BIT(intf - INTF_0);
+	ctx->pending_flush_mask |= BIT(PERIPH_IDX);
+}
+
 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
 		enum dpu_merge_3d merge_3d)
 {
@@ -288,6 +308,13 @@  static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
 }
 
+static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
+		enum dpu_dsc dsc_num)
+{
+	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
+	ctx->pending_flush_mask |= BIT(DSC_IDX);
+}
+
 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
 	enum dpu_dspp dspp)
 {
@@ -472,6 +499,7 @@  static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
 	u32 intf_active = 0;
 	u32 wb_active = 0;
 	u32 mode_sel = 0;
+	u32 dsc_active = 0;
 
 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
 	 * per VM. Explicitly disable it until VM support is
@@ -502,9 +530,11 @@  static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
 	if (cfg->merge_3d)
 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
 			      BIT(cfg->merge_3d - MERGE_3D_0));
-	if (cfg->dsc) {
-		DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, DSC_IDX);
-		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+
+	if (cfg->dsc_num) {
+		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
+		dsc_active |= BIT(cfg->dsc_num - DSC_0);
+		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
 	}
 }
 
@@ -605,9 +635,16 @@  static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
 		ops->update_pending_flush_intf =
 			dpu_hw_ctl_update_pending_flush_intf_v1;
+
+		ops->update_pending_flush_periph =
+			dpu_hw_ctl_update_pending_flush_periph;
+
 		ops->update_pending_flush_merge_3d =
 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
+
+		ops->update_pending_flush_dsc =
+			dpu_hw_ctl_update_pending_flush_dsc_v1;
 	} else {
 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 96c012e..d3faa0b1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -48,6 +48,7 @@  struct dpu_hw_intf_cfg {
 	enum dpu_3d_blend_mode mode_3d;
 	enum dpu_merge_3d merge_3d;
 	enum dpu_ctl_mode_sel intf_mode_sel;
+	enum dpu_dsc dsc_num;
 	int stream_sel;
 	unsigned int dsc;
 };
@@ -121,6 +122,15 @@  struct dpu_hw_ctl_ops {
 		enum dpu_intf blk);
 
 	/**
+	 * OR in the given flushbits to the cached pending_(periph_)flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 * @blk       : interface block index
+	 */
+	void (*update_pending_flush_periph)(struct dpu_hw_ctl *ctx,
+		enum dpu_intf blk);
+
+	/**
 	 * OR in the given flushbits to the cached pending_(merge_3d_)flush_mask
 	 * No effect on hardware
 	 * @ctx       : ctl path ctx pointer
@@ -156,6 +166,15 @@  struct dpu_hw_ctl_ops {
 	void (*update_pending_flush_dspp)(struct dpu_hw_ctl *ctx,
 		enum dpu_dspp blk);
 	/**
+	 * OR in the given flushbits to the cached pending_(dsc_)flush_mask
+	 * No effect on hardware
+	 * @ctx       : ctl path ctx pointer
+	 * @blk       : interface block index
+	 */
+	void (*update_pending_flush_dsc)(struct dpu_hw_ctl *ctx,
+		enum dpu_dsc blk);
+
+	/**
 	 * Write the value of the pending_flush_mask to hardware
 	 * @ctx       : ctl path ctx pointer
 	 */
@@ -241,7 +260,9 @@  struct dpu_hw_ctl {
 	u32 pending_flush_mask;
 	u32 pending_intf_flush_mask;
 	u32 pending_wb_flush_mask;
+	u32 pending_periph_flush_mask;
 	u32 pending_merge_3d_flush_mask;
+	u32 pending_dsc_flush_mask;
 
 	/* ops */
 	struct dpu_hw_ctl_ops ops;