Message ID | 5-v2-de8b10590bf5+400-smmuv3_newapi_p1_jgg@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Update SMMUv3 to the modern iommu API (part 1/3) | expand |
On Tue, Nov 14, 2023 at 1:53 AM Jason Gunthorpe <jgg@nvidia.com> wrote: > > This allows writing the flow of arm_smmu_write_strtab_ent() around abort > and bypass domains more naturally. > > Note that the core code no longer supplies NULL domains, though there is > still a flow in the driver that end up in arm_smmu_write_strtab_ent() with > NULL. A later patch will remove it. > > Remove the duplicate calculation of the STE in arm_smmu_init_bypass_stes() > and remove the force parameter. arm_smmu_rmr_install_bypass_ste() can now > simply invoke arm_smmu_make_bypass_ste() directly. > > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Michael Shavit <mshavit@google.com> > --- > drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 89 +++++++++++---------- > 1 file changed, 47 insertions(+), 42 deletions(-) > > diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > index 6430a8d89cb471..13cdb959ec8f58 100644 > --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c > @@ -1443,6 +1443,24 @@ static void arm_smmu_write_ste(struct arm_smmu_device *smmu, u32 sid, > } > } > > +static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target) > +{ > + memset(target, 0, sizeof(*target)); > + target->data[0] = cpu_to_le64( > + STRTAB_STE_0_V | > + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT)); > +} > + > +static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target) > +{ > + memset(target, 0, sizeof(*target)); > + target->data[0] = cpu_to_le64( > + STRTAB_STE_0_V | > + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS)); > + target->data[1] = cpu_to_le64( > + FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); > +} > + > static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, > struct arm_smmu_ste *dst) > { > @@ -1453,37 +1471,31 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, > struct arm_smmu_domain *smmu_domain = master->domain; > struct arm_smmu_ste target = {}; > > - if (smmu_domain) { > - switch (smmu_domain->stage) { > - case ARM_SMMU_DOMAIN_S1: > - cd_table = &master->cd_table; > - break; > - case ARM_SMMU_DOMAIN_S2: > - s2_cfg = &smmu_domain->s2_cfg; > - break; > - default: > - break; > - } > + if (!smmu_domain) { > + if (disable_bypass) > + arm_smmu_make_abort_ste(&target); > + else > + arm_smmu_make_bypass_ste(&target); > + arm_smmu_write_ste(smmu, sid, dst, &target); > + return; > + } > + > + switch (smmu_domain->stage) { > + case ARM_SMMU_DOMAIN_S1: > + cd_table = &master->cd_table; > + break; > + case ARM_SMMU_DOMAIN_S2: > + s2_cfg = &smmu_domain->s2_cfg; > + break; > + case ARM_SMMU_DOMAIN_BYPASS: > + arm_smmu_make_bypass_ste(&target); > + arm_smmu_write_ste(smmu, sid, dst, &target); > + return; > } > > /* Nuke the existing STE_0 value, as we're going to rewrite it */ > val = STRTAB_STE_0_V; > > - /* Bypass/fault */ > - if (!smmu_domain || !(cd_table || s2_cfg)) { > - if (!smmu_domain && disable_bypass) > - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT); > - else > - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); > - > - target.data[0] = cpu_to_le64(val); > - target.data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, > - STRTAB_STE_1_SHCFG_INCOMING)); > - target.data[2] = 0; /* Nuke the VMID */ > - arm_smmu_write_ste(smmu, sid, dst, &target); > - return; > - } > - > if (cd_table) { > u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? > STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; > @@ -1529,21 +1541,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, > } > > static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab, > - unsigned int nent, bool force) > + unsigned int nent) > { > unsigned int i; > - u64 val = STRTAB_STE_0_V; > - > - if (disable_bypass && !force) > - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT); > - else > - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); > > for (i = 0; i < nent; ++i) { > - strtab->data[0] = cpu_to_le64(val); > - strtab->data[1] = cpu_to_le64(FIELD_PREP( > - STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); > - strtab->data[2] = 0; > + if (disable_bypass) > + arm_smmu_make_abort_ste(strtab); > + else > + arm_smmu_make_bypass_ste(strtab); > strtab++; > } > } > @@ -1571,7 +1577,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) > return -ENOMEM; > } > > - arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false); > + arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT); > arm_smmu_write_strtab_l1_desc(strtab, desc); > return 0; > } > @@ -3193,7 +3199,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) > reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); > cfg->strtab_base_cfg = reg; > > - arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false); > + arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents); > return 0; > } > > @@ -3904,7 +3910,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) > iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); > > list_for_each_entry(e, &rmr_list, list) { > - struct arm_smmu_ste *step; > struct iommu_iort_rmr_data *rmr; > int ret, i; > > @@ -3917,8 +3922,8 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) > continue; > } > > - step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]); > - arm_smmu_init_bypass_stes(step, 1, true); > + arm_smmu_make_bypass_ste( > + arm_smmu_get_step_for_sid(smmu, rmr->sids[i])); > } > } > > -- > 2.42.0 >
On Mon, Nov 13, 2023 at 01:53:12PM -0400, Jason Gunthorpe wrote: > This allows writing the flow of arm_smmu_write_strtab_ent() around abort > and bypass domains more naturally. > > Note that the core code no longer supplies NULL domains, though there is > still a flow in the driver that end up in arm_smmu_write_strtab_ent() with > NULL. A later patch will remove it. > > Remove the duplicate calculation of the STE in arm_smmu_init_bypass_stes() > and remove the force parameter. arm_smmu_rmr_install_bypass_ste() can now > simply invoke arm_smmu_make_bypass_ste() directly. > > Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 6430a8d89cb471..13cdb959ec8f58 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1443,6 +1443,24 @@ static void arm_smmu_write_ste(struct arm_smmu_device *smmu, u32 sid, } } +static void arm_smmu_make_abort_ste(struct arm_smmu_ste *target) +{ + memset(target, 0, sizeof(*target)); + target->data[0] = cpu_to_le64( + STRTAB_STE_0_V | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT)); +} + +static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target) +{ + memset(target, 0, sizeof(*target)); + target->data[0] = cpu_to_le64( + STRTAB_STE_0_V | + FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS)); + target->data[1] = cpu_to_le64( + FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); +} + static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, struct arm_smmu_ste *dst) { @@ -1453,37 +1471,31 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, struct arm_smmu_domain *smmu_domain = master->domain; struct arm_smmu_ste target = {}; - if (smmu_domain) { - switch (smmu_domain->stage) { - case ARM_SMMU_DOMAIN_S1: - cd_table = &master->cd_table; - break; - case ARM_SMMU_DOMAIN_S2: - s2_cfg = &smmu_domain->s2_cfg; - break; - default: - break; - } + if (!smmu_domain) { + if (disable_bypass) + arm_smmu_make_abort_ste(&target); + else + arm_smmu_make_bypass_ste(&target); + arm_smmu_write_ste(smmu, sid, dst, &target); + return; + } + + switch (smmu_domain->stage) { + case ARM_SMMU_DOMAIN_S1: + cd_table = &master->cd_table; + break; + case ARM_SMMU_DOMAIN_S2: + s2_cfg = &smmu_domain->s2_cfg; + break; + case ARM_SMMU_DOMAIN_BYPASS: + arm_smmu_make_bypass_ste(&target); + arm_smmu_write_ste(smmu, sid, dst, &target); + return; } /* Nuke the existing STE_0 value, as we're going to rewrite it */ val = STRTAB_STE_0_V; - /* Bypass/fault */ - if (!smmu_domain || !(cd_table || s2_cfg)) { - if (!smmu_domain && disable_bypass) - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT); - else - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); - - target.data[0] = cpu_to_le64(val); - target.data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, - STRTAB_STE_1_SHCFG_INCOMING)); - target.data[2] = 0; /* Nuke the VMID */ - arm_smmu_write_ste(smmu, sid, dst, &target); - return; - } - if (cd_table) { u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ? STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1; @@ -1529,21 +1541,15 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid, } static void arm_smmu_init_bypass_stes(struct arm_smmu_ste *strtab, - unsigned int nent, bool force) + unsigned int nent) { unsigned int i; - u64 val = STRTAB_STE_0_V; - - if (disable_bypass && !force) - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT); - else - val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS); for (i = 0; i < nent; ++i) { - strtab->data[0] = cpu_to_le64(val); - strtab->data[1] = cpu_to_le64(FIELD_PREP( - STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING)); - strtab->data[2] = 0; + if (disable_bypass) + arm_smmu_make_abort_ste(strtab); + else + arm_smmu_make_bypass_ste(strtab); strtab++; } } @@ -1571,7 +1577,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return -ENOMEM; } - arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT, false); + arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT); arm_smmu_write_strtab_l1_desc(strtab, desc); return 0; } @@ -3193,7 +3199,7 @@ static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits); cfg->strtab_base_cfg = reg; - arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents, false); + arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents); return 0; } @@ -3904,7 +3910,6 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); list_for_each_entry(e, &rmr_list, list) { - struct arm_smmu_ste *step; struct iommu_iort_rmr_data *rmr; int ret, i; @@ -3917,8 +3922,8 @@ static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu) continue; } - step = arm_smmu_get_step_for_sid(smmu, rmr->sids[i]); - arm_smmu_init_bypass_stes(step, 1, true); + arm_smmu_make_bypass_ste( + arm_smmu_get_step_for_sid(smmu, rmr->sids[i])); } }
This allows writing the flow of arm_smmu_write_strtab_ent() around abort and bypass domains more naturally. Note that the core code no longer supplies NULL domains, though there is still a flow in the driver that end up in arm_smmu_write_strtab_ent() with NULL. A later patch will remove it. Remove the duplicate calculation of the STE in arm_smmu_init_bypass_stes() and remove the force parameter. arm_smmu_rmr_install_bypass_ste() can now simply invoke arm_smmu_make_bypass_ste() directly. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 89 +++++++++++---------- 1 file changed, 47 insertions(+), 42 deletions(-)