@@ -215,10 +215,42 @@ static bool qcom_adreno_can_do_ttbr1(struct arm_smmu_device *smmu)
return true;
}
+static void qcom_smmu_set_actlr(struct device *dev, struct arm_smmu_device *smmu, int cbndx,
+ const struct actlr_config *actlrcfg, const size_t num_actlrcfg)
+{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_smr *smr;
+ u16 mask;
+ int idx;
+ u16 id;
+ int i;
+ int j;
+
+ for (i = 0; i < num_actlrcfg; i++) {
+ id = actlrcfg[i].sid;
+ mask = actlrcfg[i].mask;
+
+ for_each_cfg_sme(cfg, fwspec, j, idx) {
+ smr = &smmu->smrs[idx];
+ if (smr_is_subset(smr, id, mask)) {
+ arm_smmu_cb_write(smmu, cbndx, ARM_SMMU_CB_ACTLR,
+ actlrcfg[i].actlr);
+ break;
+ }
+ }
+ }
+}
+
static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct actlr_variant *actlrvar;
+ int cbndx = smmu_domain->cfg.cbndx;
struct adreno_smmu_priv *priv;
+ int i;
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
@@ -248,6 +280,18 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
priv->set_stall = qcom_adreno_smmu_set_stall;
priv->resume_translation = qcom_adreno_smmu_resume_translation;
+ actlrvar = qsmmu->data->actlrvar;
+ if (!actlrvar)
+ return 0;
+
+ for (i = 0; i < qsmmu->data->num_smmu ; i++) {
+ if (actlrvar[i].io_start == smmu->ioaddr) {
+ qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar[i].actlrcfg,
+ actlrvar[i].num_actlrcfg);
+ break;
+ }
+ }
+
return 0;
}
@@ -277,7 +321,24 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
static int qcom_smmu_init_context(struct arm_smmu_domain *smmu_domain,
struct io_pgtable_cfg *pgtbl_cfg, struct device *dev)
{
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct qcom_smmu *qsmmu = to_qcom_smmu(smmu);
+ const struct actlr_variant *actlrvar;
+ int cbndx = smmu_domain->cfg.cbndx;
+ int i;
+
smmu_domain->cfg.flush_walk_prefer_tlbiasid = true;
+ actlrvar = qsmmu->data->actlrvar;
+ if (!actlrvar)
+ return 0;
+
+ for (i = 0; i < qsmmu->data->num_smmu ; i++) {
+ if (actlrvar[i].io_start == smmu->ioaddr) {
+ qcom_smmu_set_actlr(dev, smmu, cbndx, actlrvar[i].actlrcfg,
+ actlrvar[i].num_actlrcfg);
+ break;
+ }
+ }
return 0;
}
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ARM_SMMU_QCOM_H
@@ -24,8 +24,22 @@ struct qcom_smmu_config {
const u32 *reg_offset;
};
+struct actlr_config {
+ u16 sid;
+ u16 mask;
+ u32 actlr;
+};
+
+struct actlr_variant {
+ const resource_size_t io_start;
+ const struct actlr_config * const actlrcfg;
+ const size_t num_actlrcfg;
+};
+
struct qcom_smmu_match_data {
+ const struct actlr_variant * const actlrvar;
const struct qcom_smmu_config *cfg;
+ const size_t num_smmu;
const struct arm_smmu_impl *impl;
const struct arm_smmu_impl *adreno_impl;
};
@@ -1003,9 +1003,10 @@ static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
* expect simply identical entries for this case, but there's
* no harm in accommodating the generalisation.
*/
- if ((mask & smrs[i].mask) == mask &&
- !((id ^ smrs[i].id) & ~smrs[i].mask))
+
+ if (smr_is_subset(&smrs[i], id, mask))
return i;
+
/*
* If the new entry has any other overlap with an existing one,
* though, then there always exists at least one stream ID
@@ -503,6 +503,11 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page,
writeq_relaxed(val, arm_smmu_page(smmu, page) + offset);
}
+static inline bool smr_is_subset(struct arm_smmu_smr *smrs, u16 id, u16 mask)
+{
+ return (mask & smrs->mask) == mask && !((id ^ smrs->id) & ~smrs->mask);
+}
+
#define ARM_SMMU_GR0 0
#define ARM_SMMU_GR1 1
#define ARM_SMMU_CB(s, n) ((s)->numpage + (n))