@@ -129,6 +129,59 @@ static const struct adreno_reglist a615_hwcg[] = {
{},
};
+static const struct adreno_reglist a620_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
static const struct adreno_reglist a630_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
@@ -490,7 +543,6 @@ static const u32 a630_protect_regs[] = {
};
DECLARE_ADRENO_PROTECT(a630_protect, 32);
-/* These are for a620 and a650 */
static const u32 a650_protect_regs[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
@@ -774,6 +826,30 @@ static const struct adreno_info a6xx_gpus[] = {
{ 169, 2 },
{ 180, 1 },
),
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06020100),
+ .family = ADRENO_6XX_GEN3,
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a621_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a620_zap.mbn",
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a620_hwcg,
+ .protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00010000,
+ },
+ .address_space_size = SZ_16G,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 137, 1 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
@@ -423,6 +423,20 @@ static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
}
+static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+
+ /*
+ * GEMNoC can power collapse whilst the GPU is being powered down, resulting
+ * in the power down sequence not being fully executed. That in turn can
+ * prevent CX_GDSC from collapsing. Assert Qactive to avoid this.
+ */
+ if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu))
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0));
+}
+
/* Let the GMU know that we are about to go into slumber */
static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
{
@@ -456,6 +470,8 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
out:
+ a6xx_gemnoc_workaround(gmu);
+
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
return ret;
@@ -945,6 +961,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Force off SPTP in case the GMU is managing it */
a6xx_sptprac_disable(gmu);
+ a6xx_gemnoc_workaround(gmu);
+
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
@@ -523,6 +523,12 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;
+ if (adreno_is_a621(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 13;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ }
+
if (adreno_is_a640_family(gpu))
gpu->ubwc_config.amsbc = 1;
@@ -384,6 +384,11 @@ static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu)
return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
}
+static inline int adreno_is_a621(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06020100;
+}
+
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 630);