Message ID | 7-v2-de8b10590bf5+400-smmuv3_newapi_p1_jgg@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Update SMMUv3 to the modern iommu API (part 1/3) | expand |
On Mon, Nov 13, 2023 at 01:53:14PM -0400, Jason Gunthorpe wrote: > This is preparation to move the STE calculation higher up in to the call > chain and remove arm_smmu_write_strtab_ent(). These new functions will be > called directly from attach_dev. > > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Moritz Fischer <mdf@kernel.org> > --- > drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 115 +++++++++++--------- > 1 file changed, 63 insertions(+), 52 deletions(-) > > diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > index 3fc8787db2dbc1..1c63fdebbda9d4 100644 > --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > @@ -1463,13 +1463,70 @@ static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target) > FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); > } > > +static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, > + struct arm_smmu_master *master, > + struct arm_smmu_ctx_desc_cfg *cd_table) > +{ > + struct arm_smmu_device *smmu = master->smmu; > + > + memset(target, 0, sizeof(*target)); > + target->data[0] = cpu_to_le64( > + STRTAB_STE_0_V | > + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | > + FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) | > + (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | > + FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax)); > + > + target->data[1] = cpu_to_le64( > + FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | > + FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | > + FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | > + FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | > + ((smmu->features & ARM_SMMU_FEAT_STALLS && > + !master->stall_enabled) ? > + STRTAB_STE_1_S1STALLD : > + 0) | > + FIELD_PREP(STRTAB_STE_1_EATS, > + master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) | > + FIELD_PREP(STRTAB_STE_1_STRW, > + (smmu->features & ARM_SMMU_FEAT_E2H) ? > + STRTAB_STE_1_STRW_EL2 : > + STRTAB_STE_1_STRW_NSEL1)); > +} > + > +static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, > + struct arm_smmu_master *master, > + struct arm_smmu_domain *smmu_domain) > +{ > + struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; > + > + memset(target, 0, sizeof(*target)); > + > + target->data[0] = cpu_to_le64( > + STRTAB_STE_0_V | > + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS)); > + > + target->data[1] |= cpu_to_le64( > + FIELD_PREP(STRTAB_STE_1_EATS, > + master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); > + > + target->data[2] = cpu_to_le64( > + FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | > + FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | > + STRTAB_STE_2_S2AA64 | > +#ifdef __BIG_ENDIAN > + STRTAB_STE_2_S2ENDI | > +#endif > + STRTAB_STE_2_S2PTW | > + STRTAB_STE_2_S2R); > + > + target->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); > +} > + > static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, > struct arm_smmu_ste *dst) > { > - u64 val; > struct arm_smmu_device *smmu = master->smmu; > - struct arm_smmu_ctx_desc_cfg *cd_table = NULL; > - struct arm_smmu_s2_cfg *s2_cfg = NULL; > struct arm_smmu_domain *smmu_domain = master->domain; > struct arm_smmu_ste target = {}; > > @@ -1484,61 +1541,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, > > switch (smmu_domain->stage) { > case ARM_SMMU_DOMAIN_S1: > - cd_table = &master->cd_table; > + arm_smmu_make_cdtable_ste(&target, master, &master->cd_table); > break; > case ARM_SMMU_DOMAIN_S2: > - s2_cfg = &smmu_domain->s2_cfg; > + arm_smmu_make_s2_domain_ste(&target, master, smmu_domain); > break; > case ARM_SMMU_DOMAIN_BYPASS: > arm_smmu_make_bypass_ste(&target); > - arm_smmu_write_ste(smmu, sid, dst, &target); > - return; > + break; > } > - > - /* Nuke the existing STE_0 value, as we're going to rewrite it */ > - val = STRTAB_STE_0_V; > - > - if (cd_table) { > - u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? > - STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; > - > - target.data[1] = cpu_to_le64( > - FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | > - FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | > - FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | > - FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | > - FIELD_PREP(STRTAB_STE_1_STRW, strw)); > - > - if (smmu->features & ARM_SMMU_FEAT_STALLS && > - !master->stall_enabled) > - target.data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); > - > - val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | > - FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | > - FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) | > - FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt); > - } > - > - if (s2_cfg) { > - target.data[2] = cpu_to_le64( > - FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | > - FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | > -#ifdef __BIG_ENDIAN > - STRTAB_STE_2_S2ENDI | > -#endif > - STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 | > - STRTAB_STE_2_S2R); > - > - target.data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); > - > - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS); > - } > - > - if (master->ats_enabled) > - target.data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS, > - STRTAB_STE_1_EATS_TRANS)); > - > - target.data[0] = cpu_to_le64(val); > arm_smmu_write_ste(smmu, sid, dst, &target); > } > > -- > 2.42.0 >
On Tue, Nov 14, 2023 at 1:53 AM Jason Gunthorpe <jgg@nvidia.com> wrote: > > This is preparation to move the STE calculation higher up in to the call > chain and remove arm_smmu_write_strtab_ent(). These new functions will be > called directly from attach_dev. > > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Michael Shavit <mshavit@google.com> > --- > drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 115 +++++++++++--------- > 1 file changed, 63 insertions(+), 52 deletions(-) > > diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > index 3fc8787db2dbc1..1c63fdebbda9d4 100644 > --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > @@ -1463,13 +1463,70 @@ static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target) > FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); > } > > +static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, > + struct arm_smmu_master *master, > + struct arm_smmu_ctx_desc_cfg *cd_table) > +{ > + struct arm_smmu_device *smmu = master->smmu; > + > + memset(target, 0, sizeof(*target)); > + target->data[0] = cpu_to_le64( > + STRTAB_STE_0_V | > + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | > + FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) | > + (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | > + FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax)); > + > + target->data[1] = cpu_to_le64( > + FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | > + FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | > + FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | > + FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | > + ((smmu->features & ARM_SMMU_FEAT_STALLS && > + !master->stall_enabled) ? > + STRTAB_STE_1_S1STALLD : > + 0) | > + FIELD_PREP(STRTAB_STE_1_EATS, > + master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) | > + FIELD_PREP(STRTAB_STE_1_STRW, > + (smmu->features & ARM_SMMU_FEAT_E2H) ? > + STRTAB_STE_1_STRW_EL2 : > + STRTAB_STE_1_STRW_NSEL1)); > +} > + > +static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, > + struct arm_smmu_master *master, > + struct arm_smmu_domain *smmu_domain) > +{ > + struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; > + > + memset(target, 0, sizeof(*target)); > + > + target->data[0] = cpu_to_le64( > + STRTAB_STE_0_V | > + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS)); > + > + target->data[1] |= cpu_to_le64( > + FIELD_PREP(STRTAB_STE_1_EATS, > + master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); > + > + target->data[2] = cpu_to_le64( > + FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | > + FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | > + STRTAB_STE_2_S2AA64 | > +#ifdef __BIG_ENDIAN > + STRTAB_STE_2_S2ENDI | > +#endif > + STRTAB_STE_2_S2PTW | > + STRTAB_STE_2_S2R); > + > + target->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); > +} > + > static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, > struct arm_smmu_ste *dst) > { > - u64 val; > struct arm_smmu_device *smmu = master->smmu; > - struct arm_smmu_ctx_desc_cfg *cd_table = NULL; > - struct arm_smmu_s2_cfg *s2_cfg = NULL; > struct arm_smmu_domain *smmu_domain = master->domain; > struct arm_smmu_ste target = {}; > > @@ -1484,61 +1541,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, > > switch (smmu_domain->stage) { > case ARM_SMMU_DOMAIN_S1: > - cd_table = &master->cd_table; > + arm_smmu_make_cdtable_ste(&target, master, &master->cd_table); > break; > case ARM_SMMU_DOMAIN_S2: > - s2_cfg = &smmu_domain->s2_cfg; > + arm_smmu_make_s2_domain_ste(&target, master, smmu_domain); > break; > case ARM_SMMU_DOMAIN_BYPASS: > arm_smmu_make_bypass_ste(&target); > - arm_smmu_write_ste(smmu, sid, dst, &target); > - return; > + break; > } > - > - /* Nuke the existing STE_0 value, as we're going to rewrite it */ > - val = STRTAB_STE_0_V; > - > - if (cd_table) { > - u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? > - STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; > - > - target.data[1] = cpu_to_le64( > - FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | > - FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | > - FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | > - FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | > - FIELD_PREP(STRTAB_STE_1_STRW, strw)); > - > - if (smmu->features & ARM_SMMU_FEAT_STALLS && > - !master->stall_enabled) > - target.data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); > - > - val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | > - FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | > - FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) | > - FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt); > - } > - > - if (s2_cfg) { > - target.data[2] = cpu_to_le64( > - FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | > - FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | > -#ifdef __BIG_ENDIAN > - STRTAB_STE_2_S2ENDI | > -#endif > - STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 | > - STRTAB_STE_2_S2R); > - > - target.data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); > - > - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS); > - } > - > - if (master->ats_enabled) > - target.data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS, > - STRTAB_STE_1_EATS_TRANS)); > - > - target.data[0] = cpu_to_le64(val); > arm_smmu_write_ste(smmu, sid, dst, &target); > } > > -- > 2.42.0 >
On Mon, Nov 13, 2023 at 01:53:14PM -0400, Jason Gunthorpe wrote: > This is preparation to move the STE calculation higher up in to the call > chain and remove arm_smmu_write_strtab_ent(). These new functions will be > called directly from attach_dev. > > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> > +static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, > + struct arm_smmu_master *master, > + struct arm_smmu_ctx_desc_cfg *cd_table) > +{ > + struct arm_smmu_device *smmu = master->smmu; > + > + memset(target, 0, sizeof(*target)); > + target->data[0] = cpu_to_le64( Nit: can add a line in-between like arm_smmu_make_s2_domain_ste does? Reviewed-by: Nicolin Chen <nicolinc@nvidia.com> > +static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, > + struct arm_smmu_master *master, > + struct arm_smmu_domain *smmu_domain) > +{ > + struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; > + > + memset(target, 0, sizeof(*target)); > + > + target->data[0] = cpu_to_le64(
On Mon, Dec 04, 2023 at 05:55:03PM -0800, Nicolin Chen wrote: > On Mon, Nov 13, 2023 at 01:53:14PM -0400, Jason Gunthorpe wrote: > > This is preparation to move the STE calculation higher up in to the call > > chain and remove arm_smmu_write_strtab_ent(). These new functions will be > > called directly from attach_dev. > > > > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> > > > +static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, > > + struct arm_smmu_master *master, > > + struct arm_smmu_ctx_desc_cfg *cd_table) > > +{ > > + struct arm_smmu_device *smmu = master->smmu; > > + > > + memset(target, 0, sizeof(*target)); > > + target->data[0] = cpu_to_le64( > > Nit: can add a line in-between like arm_smmu_make_s2_domain_ste does? I removed the line since the arm_smmu_make_abort_ste/bypass functions also had no line Thanks, Jason
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 3fc8787db2dbc1..1c63fdebbda9d4 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1463,13 +1463,70 @@ static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target) FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); } +static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, + struct arm_smmu_ctx_desc_cfg *cd_table) +{ + struct arm_smmu_device *smmu = master->smmu; + + memset(target, 0, sizeof(*target)); + target->data[0] = cpu_to_le64( + STRTAB_STE_0_V | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | + FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt) | + (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | + FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax)); + + target->data[1] = cpu_to_le64( + FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | + FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | + FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | + FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | + ((smmu->features & ARM_SMMU_FEAT_STALLS && + !master->stall_enabled) ? + STRTAB_STE_1_S1STALLD : + 0) | + FIELD_PREP(STRTAB_STE_1_EATS, + master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) | + FIELD_PREP(STRTAB_STE_1_STRW, + (smmu->features & ARM_SMMU_FEAT_E2H) ? + STRTAB_STE_1_STRW_EL2 : + STRTAB_STE_1_STRW_NSEL1)); +} + +static void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, + struct arm_smmu_master *master, + struct arm_smmu_domain *smmu_domain) +{ + struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; + + memset(target, 0, sizeof(*target)); + + target->data[0] = cpu_to_le64( + STRTAB_STE_0_V | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS)); + + target->data[1] |= cpu_to_le64( + FIELD_PREP(STRTAB_STE_1_EATS, + master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); + + target->data[2] = cpu_to_le64( + FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | + FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | + STRTAB_STE_2_S2AA64 | +#ifdef __BIG_ENDIAN + STRTAB_STE_2_S2ENDI | +#endif + STRTAB_STE_2_S2PTW | + STRTAB_STE_2_S2R); + + target->data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); +} + static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, struct arm_smmu_ste *dst) { - u64 val; struct arm_smmu_device *smmu = master->smmu; - struct arm_smmu_ctx_desc_cfg *cd_table = NULL; - struct arm_smmu_s2_cfg *s2_cfg = NULL; struct arm_smmu_domain *smmu_domain = master->domain; struct arm_smmu_ste target = {}; @@ -1484,61 +1541,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, switch (smmu_domain->stage) { case ARM_SMMU_DOMAIN_S1: - cd_table = &master->cd_table; + arm_smmu_make_cdtable_ste(&target, master, &master->cd_table); break; case ARM_SMMU_DOMAIN_S2: - s2_cfg = &smmu_domain->s2_cfg; + arm_smmu_make_s2_domain_ste(&target, master, smmu_domain); break; case ARM_SMMU_DOMAIN_BYPASS: arm_smmu_make_bypass_ste(&target); - arm_smmu_write_ste(smmu, sid, dst, &target); - return; + break; } - - /* Nuke the existing STE_0 value, as we're going to rewrite it */ - val = STRTAB_STE_0_V; - - if (cd_table) { - u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? - STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; - - target.data[1] = cpu_to_le64( - FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | - FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | - FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | - FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | - FIELD_PREP(STRTAB_STE_1_STRW, strw)); - - if (smmu->features & ARM_SMMU_FEAT_STALLS && - !master->stall_enabled) - target.data[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); - - val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) | - FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) | - FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) | - FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt); - } - - if (s2_cfg) { - target.data[2] = cpu_to_le64( - FIELD_PREP(STRTAB_STE_2_S2VMID, s2_cfg->vmid) | - FIELD_PREP(STRTAB_STE_2_VTCR, s2_cfg->vtcr) | -#ifdef __BIG_ENDIAN - STRTAB_STE_2_S2ENDI | -#endif - STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 | - STRTAB_STE_2_S2R); - - target.data[3] = cpu_to_le64(s2_cfg->vttbr & STRTAB_STE_3_S2TTB_MASK); - - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S2_TRANS); - } - - if (master->ats_enabled) - target.data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_EATS, - STRTAB_STE_1_EATS_TRANS)); - - target.data[0] = cpu_to_le64(val); arm_smmu_write_ste(smmu, sid, dst, &target); }
This is preparation to move the STE calculation higher up in to the call chain and remove arm_smmu_write_strtab_ent(). These new functions will be called directly from attach_dev. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 115 +++++++++++--------- 1 file changed, 63 insertions(+), 52 deletions(-)