@@ -17,6 +17,18 @@ static bool qcom_adreno_smmu_is_gpu_device(struct arm_smmu_domain *smmu_domain)
return of_device_is_compatible(smmu_domain->dev->of_node, "qcom,adreno");
}
+static void qcom_adreno_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_cb *cb)
+{
+ /*
+ * On the GPU device we want to process subsequent transactions after a
+ * fault to keep the GPU from hanging
+ */
+
+ if (qcom_adreno_smmu_is_gpu_device(smmu_domain))
+ cb->sctlr |= ARM_SMMU_SCTLR_HUPCF;
+}
+
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg)
{
@@ -92,6 +104,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_impl = {
.init_context = qcom_adreno_smmu_init_context,
.def_domain_type = qcom_smmu_def_domain_type,
.reset = qcom_smmu500_reset,
+ .init_context_bank = qcom_adreno_smmu_init_context_bank,
};
@@ -86,14 +86,6 @@ struct arm_smmu_smr {
bool valid;
};
-struct arm_smmu_cb {
- u64 ttbr[2];
- u32 tcr[2];
- u32 mair[2];
- struct arm_smmu_cfg *cfg;
- atomic_t aux;
-};
-
struct arm_smmu_master_cfg {
struct arm_smmu_device *smmu;
s16 smendx[];
@@ -580,6 +572,18 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
}
}
+
+ cb->sctlr = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
+ ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
+
+ if (stage1)
+ cb->sctlr |= ARM_SMMU_SCTLR_S1_ASIDPNE;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ cb->sctlr |= ARM_SMMU_SCTLR_E;
+
+ /* Give the implementation a chance to adjust the configuration */
+ if (smmu_domain->smmu->impl && smmu_domain->smmu->impl->init_context_bank)
+ smmu_domain->smmu->impl->init_context_bank(smmu_domain, cb);
}
static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
@@ -658,14 +662,7 @@ static void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx)
}
/* SCTLR */
- reg = ARM_SMMU_SCTLR_CFIE | ARM_SMMU_SCTLR_CFRE | ARM_SMMU_SCTLR_AFE |
- ARM_SMMU_SCTLR_TRE | ARM_SMMU_SCTLR_M;
- if (stage1)
- reg |= ARM_SMMU_SCTLR_S1_ASIDPNE;
- if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
- reg |= ARM_SMMU_SCTLR_E;
-
- arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg);
+ arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, cb->sctlr);
}
/*
@@ -142,6 +142,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_SCTLR 0x0
#define ARM_SMMU_SCTLR_S1_ASIDPNE BIT(12)
+#define ARM_SMMU_SCTLR_HUPCF BIT(8)
#define ARM_SMMU_SCTLR_CFCFG BIT(7)
#define ARM_SMMU_SCTLR_CFIE BIT(6)
#define ARM_SMMU_SCTLR_CFRE BIT(5)
@@ -349,6 +350,15 @@ struct arm_smmu_domain {
bool aux;
};
+struct arm_smmu_cb {
+ u64 ttbr[2];
+ u32 tcr[2];
+ u32 mair[2];
+ u32 sctlr;
+ struct arm_smmu_cfg *cfg;
+ atomic_t aux;
+};
+
static inline u32 arm_smmu_lpae_tcr(struct io_pgtable_cfg *cfg)
{
u32 tcr = FIELD_PREP(ARM_SMMU_TCR_TG0, cfg->arm_lpae_s1_cfg.tcr.tg) |
@@ -403,6 +413,8 @@ struct arm_smmu_impl {
void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
int status);
int (*def_domain_type)(struct device *dev);
+ void (*init_context_bank)(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_cb *cb);
};
static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n)