@@ -14,6 +14,16 @@
#define QCOM_DUMMY_VAL -1
+/*
+ * SMMU-500 TRM defines BIT(0) as CMTLB (Enable context caching in the
+ * macro TLB) and BIT(1) as CPRE (Enable context caching in the prefetch
+ * buffer). The remaining bits are implementation defined and vary across
+ * SoCs.
+ */
+
+#define CPRE (1 << 1)
+#define CMTLB (1 << 0)
+
static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu)
{
return container_of(smmu, struct qcom_smmu, smmu);
@@ -379,11 +389,31 @@ static int qcom_smmu_def_domain_type(struct device *dev)
return match ? IOMMU_DOMAIN_IDENTITY : 0;
}
+static int qcom_smmu500_reset(struct arm_smmu_device *smmu)
+{
+ int ret;
+ u32 val;
+ int i;
+
+ ret = arm_mmu500_reset(smmu);
+ if (ret)
+ return ret;
+
+ /* arm_mmu500_reset() disables CPRE which is re-enabled here */
+ for (i = 0; i < smmu->num_context_banks; ++i) {
+ val = arm_smmu_cb_read(smmu, i, ARM_SMMU_CB_ACTLR);
+ val |= CPRE;
+ arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_ACTLR, val);
+ }
+
+ return 0;
+}
+
static int qcom_sdm845_smmu500_reset(struct arm_smmu_device *smmu)
{
int ret;
- arm_mmu500_reset(smmu);
+ qcom_smmu500_reset(smmu);
/*
* To address performance degradation in non-real time clients,
@@ -410,7 +440,7 @@ static const struct arm_smmu_impl qcom_smmu_500_impl = {
.init_context = qcom_smmu_init_context,
.cfg_probe = qcom_smmu_cfg_probe,
.def_domain_type = qcom_smmu_def_domain_type,
- .reset = arm_mmu500_reset,
+ .reset = qcom_smmu500_reset,
.write_s2cr = qcom_smmu_write_s2cr,
.tlb_sync = qcom_smmu_tlb_sync,
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
@@ -443,7 +473,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_v2_impl = {
static const struct arm_smmu_impl qcom_adreno_smmu_500_impl = {
.init_context = qcom_adreno_smmu_init_context,
.def_domain_type = qcom_smmu_def_domain_type,
- .reset = arm_mmu500_reset,
+ .reset = qcom_smmu500_reset,
.alloc_context_bank = qcom_adreno_smmu_alloc_context_bank,
.write_sctlr = qcom_adreno_smmu_write_sctlr,
.tlb_sync = qcom_smmu_tlb_sync,