@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -23,10 +24,12 @@
#define CTL_SW_RESET 0x030
#define CTL_LAYER_EXTN_OFFSET 0x40
#define CTL_MERGE_3D_ACTIVE 0x0E4
+#define CTL_WB_ACTIVE 0x0EC
#define CTL_INTF_ACTIVE 0x0F4
#define CTL_MERGE_3D_FLUSH 0x100
#define CTL_DSC_ACTIVE 0x0E8
#define CTL_DSC_FLUSH 0x104
+#define CTL_WB_FLUSH 0x108
#define CTL_INTF_FLUSH 0x110
#define CTL_INTF_MASTER 0x134
#define CTL_FETCH_PIPE_ACTIVE 0x0FC
@@ -38,6 +41,7 @@
#define MERGE_3D_IDX 23
#define DSC_IDX 22
#define INTF_IDX 31
+#define WB_IDX 16
#define CTL_INVALID_BIT 0xffff
#define CTL_DEFAULT_GROUP_ID 0xf
@@ -135,6 +139,9 @@ static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
if (ctx->pending_flush_mask & BIT(INTF_IDX))
DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
ctx->pending_intf_flush_mask);
+ if (ctx->pending_flush_mask & BIT(WB_IDX))
+ DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
+ ctx->pending_wb_flush_mask);
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
@@ -255,6 +262,26 @@ static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
}
}
+static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ switch (wb) {
+ case WB_0:
+ case WB_1:
+ case WB_2:
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+ default:
+ break;
+ }
+}
+
+static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
+ enum dpu_wb wb)
+{
+ ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
+ ctx->pending_flush_mask |= BIT(WB_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
enum dpu_intf intf)
{
@@ -504,6 +531,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 wb_active = 0;
u32 mode_sel = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
@@ -520,10 +548,18 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
mode_sel |= BIT(17);
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
- intf_active |= BIT(cfg->intf - INTF_0);
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+
+ if (cfg->intf)
+ intf_active |= BIT(cfg->intf - INTF_0);
+
+ if (cfg->wb)
+ wb_active |= BIT(cfg->wb - WB_0);
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+
if (cfg->merge_3d)
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
BIT(cfg->merge_3d - MERGE_3D_0));
@@ -546,6 +582,9 @@ static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
intf_cfg |= (cfg->mode_3d - 0x1) << 20;
}
+ if (cfg->wb)
+ intf_cfg |= (cfg->wb & 0x3) + 2;
+
switch (cfg->intf_mode_sel) {
case DPU_CTL_MODE_SEL_VID:
intf_cfg &= ~BIT(17);
@@ -568,12 +607,13 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 wb_active = 0;
u32 merge3d_active = 0;
/*
* This API resets each portion of the CTL path namely,
* clearing the sspps staged on the lm, merge_3d block,
- * interfaces etc to ensure clean teardown of the pipeline.
+ * interfaces , writeback etc to ensure clean teardown of the pipeline.
* This will be used for writeback to begin with to have a
* proper teardown of the writeback session but upon further
* validation, this can be extended to all interfaces.
@@ -592,6 +632,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
intf_active &= ~BIT(cfg->intf - INTF_0);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
}
+
+ if (cfg->wb) {
+ wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ wb_active &= ~BIT(cfg->wb - WB_0);
+ DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ }
}
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
@@ -622,11 +668,13 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
dpu_hw_ctl_update_pending_flush_intf_v1;
ops->update_pending_flush_merge_3d =
dpu_hw_ctl_update_pending_flush_merge_3d_v1;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf;
+ ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_HW_CTL_H
@@ -44,6 +45,7 @@ struct dpu_hw_stage_cfg {
*/
struct dpu_hw_intf_cfg {
enum dpu_intf intf;
+ enum dpu_wb wb;
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
enum dpu_ctl_mode_sel intf_mode_sel;
@@ -102,6 +104,15 @@ struct dpu_hw_ctl_ops {
u32 flushbits);
/**
+ * OR in the given flushbits to the cached pending_(wb_)flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @blk : writeback block index
+ */
+ void (*update_pending_flush_wb)(struct dpu_hw_ctl *ctx,
+ enum dpu_wb blk);
+
+ /**
* OR in the given flushbits to the cached pending_(intf_)flush_mask
* No effect on hardware
* @ctx : ctl path ctx pointer
@@ -199,6 +210,7 @@ struct dpu_hw_ctl_ops {
* @mixer_hw_caps: mixer hardware capabilities
* @pending_flush_mask: storage for pending ctl_flush managed via ops
* @pending_intf_flush_mask: pending INTF flush
+ * @pending_wb_flush_mask: pending WB flush
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -212,6 +224,7 @@ struct dpu_hw_ctl {
const struct dpu_lm_cfg *mixer_hw_caps;
u32 pending_flush_mask;
u32 pending_intf_flush_mask;
+ u32 pending_wb_flush_mask;
u32 pending_merge_3d_flush_mask;
/* ops */