@@ -54,6 +54,29 @@ static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
}
+static void
+arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_master *master;
+ struct arm_smmu_cd target_cd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ struct arm_smmu_cd *cdptr;
+
+ /* S1 domains only support RID attachment right now */
+ cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
+ if (WARN_ON(!cdptr))
+ continue;
+
+ arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
+ arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
+ &target_cd);
+ }
+ spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+}
+
/*
* Check if the CPU ASID is available on the SMMU side. If a private context
* descriptor is using it, try to replace it.
@@ -97,7 +120,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
- arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd);
+ arm_smmu_update_s1_domain_cd_entry(smmu_domain);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
@@ -1118,8 +1118,8 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
WRITE_ONCE(*dst, cpu_to_le64(val));
}
-static struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
- u32 ssid)
+struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid)
{
__le64 *l1ptr;
unsigned int idx;
@@ -1181,9 +1181,9 @@ static bool arm_smmu_write_cd_step(struct arm_smmu_cd *cur,
}
-static void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
- struct arm_smmu_cd *cdptr,
- const struct arm_smmu_cd *target)
+void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
+ struct arm_smmu_cd *cdptr,
+ const struct arm_smmu_cd *target)
{
struct arm_smmu_cd target_used;
@@ -1195,6 +1195,32 @@ static void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
}
}
+void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain)
+{
+ struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
+
+ memset(target, 0, sizeof(*target));
+
+ target->data[0] = cpu_to_le64(
+ cd->tcr |
+#ifdef __BIG_ENDIAN
+ CTXDESC_CD_0_ENDI |
+#endif
+ CTXDESC_CD_0_V |
+ CTXDESC_CD_0_AA64 |
+ (master->stall_enabled ? CTXDESC_CD_0_S : 0) |
+ CTXDESC_CD_0_R |
+ CTXDESC_CD_0_A |
+ CTXDESC_CD_0_ASET |
+ FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
+ );
+
+ target->data[1] = cpu_to_le64(cd->ttbr & CTXDESC_CD_1_TTB0_MASK);
+ target->data[3] = cpu_to_le64(cd->mair);
+}
+
void arm_smmu_clear_cd(struct arm_smmu_master *master, int ssid)
{
struct arm_smmu_cd target = {};
@@ -2609,29 +2635,29 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
switch (smmu_domain->stage) {
- case ARM_SMMU_DOMAIN_S1:
+ case ARM_SMMU_DOMAIN_S1: {
+ struct arm_smmu_cd target_cd;
+ struct arm_smmu_cd *cdptr;
+
if (!master->cd_table.cdtab) {
ret = arm_smmu_alloc_cd_tables(master);
if (ret)
goto out_list_del;
- } else {
- /*
- * arm_smmu_write_ctx_desc() relies on the entry being
- * invalid to work, clear any existing entry.
- */
- ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID,
- NULL);
- if (ret)
- goto out_list_del;
}
- ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
- if (ret)
+ cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
+ if (!cdptr) {
+ ret = -ENOMEM;
goto out_list_del;
+ }
+ arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
+ arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
+ &target_cd);
arm_smmu_make_cdtable_ste(&target, master, &master->cd_table);
arm_smmu_install_ste_for_dev(master, &target);
break;
+ }
case ARM_SMMU_DOMAIN_S2:
arm_smmu_make_s2_domain_ste(&target, master, smmu_domain);
arm_smmu_install_ste_for_dev(master, &target);
@@ -764,6 +764,14 @@ extern struct mutex arm_smmu_asid_lock;
extern struct arm_smmu_ctx_desc quiet_cd;
void arm_smmu_clear_cd(struct arm_smmu_master *master, int ssid);
+struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
+ u32 ssid);
+void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
+ struct arm_smmu_master *master,
+ struct arm_smmu_domain *smmu_domain);
+void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
+ struct arm_smmu_cd *cdptr,
+ const struct arm_smmu_cd *target);
int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid,
struct arm_smmu_ctx_desc *cd);
Introduce arm_smmu_make_s1_cd() to build the CD from the paging S1 domain, and reorganize all the places programming S1 domain CD table entries to call it. Split arm_smmu_update_s1_domain_cd_entry() from arm_smmu_update_ctx_desc_devices() so that the S1 path has its own call chain separate from the unrelated SVA path. arm_smmu_update_s1_domain_cd_entry() only works on S1 domains attached to RIDs and refreshes all their CDs. Remove the forced clear of the CD during S1 domain attach, arm_smmu_write_cd_entry() will do this automatically if necessary. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 25 +++++++- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 60 +++++++++++++------ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 8 +++ 3 files changed, 75 insertions(+), 18 deletions(-)