diff mbox series

[v2,18/19] iommu/arm-smmu-v3: Pass arm_smmu_domain and arm_smmu_device to finalize

Message ID 18-v2-de8b10590bf5+400-smmuv3_newapi_p1_jgg@nvidia.com (mailing list archive)
State New, archived
Headers show
Series Update SMMUv3 to the modern iommu API (part 1/3) | expand

Commit Message

Jason Gunthorpe Nov. 13, 2023, 5:53 p.m. UTC
Instead of putting container_of() casts in the internals, use the proper
type in this call chain. This makes it easier to check that the two global
static domains are not leaking into call chains they should not.

Passing the smmu avoids the only caller from having to set it and unset it
in the error path.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 34 ++++++++++-----------
 1 file changed, 17 insertions(+), 17 deletions(-)

Comments

Michael Shavit Nov. 15, 2023, 4:02 p.m. UTC | #1
On Tue, Nov 14, 2023 at 1:53 AM Jason Gunthorpe <jgg@nvidia.com> wrote:
>
> Instead of putting container_of() casts in the internals, use the proper
> type in this call chain. This makes it easier to check that the two global
> static domains are not leaking into call chains they should not.
>
> Passing the smmu avoids the only caller from having to set it and unset it
> in the error path.
>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Michael Shavit <mshavit@google.com>
> ---
>  drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 34 ++++++++++-----------
>  1 file changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> index 331568e086c70a..50c26792391b56 100644
> --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
> @@ -87,6 +87,8 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
>  };
>
>  static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu);
> +static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
> +                                   struct arm_smmu_device *smmu);
>
>  static void parse_driver_options(struct arm_smmu_device *smmu)
>  {
> @@ -2216,12 +2218,12 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>         kfree(smmu_domain);
>  }
>
> -static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
> +static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
> +                                      struct arm_smmu_domain *smmu_domain,
>                                        struct io_pgtable_cfg *pgtbl_cfg)
>  {
>         int ret;
>         u32 asid;
> -       struct arm_smmu_device *smmu = smmu_domain->smmu;
>         struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
>         typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
>
> @@ -2253,11 +2255,11 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
>         return ret;
>  }
>
> -static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
> +static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
> +                                      struct arm_smmu_domain *smmu_domain,
>                                        struct io_pgtable_cfg *pgtbl_cfg)
>  {
>         int vmid;
> -       struct arm_smmu_device *smmu = smmu_domain->smmu;
>         struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
>
>         /* Reserve VMID 0 for stage-2 bypass STEs */
> @@ -2270,17 +2272,17 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>         return 0;
>  }
>
> -static int arm_smmu_domain_finalise(struct iommu_domain *domain)
> +static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
> +                                   struct arm_smmu_device *smmu)
>  {
>         int ret;
>         unsigned long ias, oas;
>         enum io_pgtable_fmt fmt;
>         struct io_pgtable_cfg pgtbl_cfg;
>         struct io_pgtable_ops *pgtbl_ops;
> -       int (*finalise_stage_fn)(struct arm_smmu_domain *,
> -                                struct io_pgtable_cfg *);
> -       struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -       struct arm_smmu_device *smmu = smmu_domain->smmu;
> +       int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
> +                                struct arm_smmu_domain *smmu_domain,
> +                                struct io_pgtable_cfg *pgtbl_cfg);
>
>         /* Restrict the stage to what we can actually support */
>         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
> @@ -2319,17 +2321,18 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
>         if (!pgtbl_ops)
>                 return -ENOMEM;
>
> -       domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
> -       domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
> -       domain->geometry.force_aperture = true;
> +       smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
> +       smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
> +       smmu_domain->domain.geometry.force_aperture = true;
>
> -       ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
> +       ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
>         if (ret < 0) {
>                 free_io_pgtable_ops(pgtbl_ops);
>                 return ret;
>         }
>
>         smmu_domain->pgtbl_ops = pgtbl_ops;
> +       smmu_domain->smmu = smmu;
>         return 0;
>  }
>
> @@ -2520,10 +2523,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
>         mutex_lock(&smmu_domain->init_mutex);
>
>         if (!smmu_domain->smmu) {
> -               smmu_domain->smmu = smmu;
> -               ret = arm_smmu_domain_finalise(domain);
> -               if (ret)
> -                       smmu_domain->smmu = NULL;
> +               ret = arm_smmu_domain_finalise(smmu_domain, smmu);
>         } else if (smmu_domain->smmu != smmu)
>                 ret = -EINVAL;
>
> --
> 2.42.0
>
Nicolin Chen Dec. 5, 2023, 4:42 a.m. UTC | #2
On Mon, Nov 13, 2023 at 01:53:25PM -0400, Jason Gunthorpe wrote:
> Instead of putting container_of() casts in the internals, use the proper
> type in this call chain. This makes it easier to check that the two global
> static domains are not leaking into call chains they should not.
> 
> Passing the smmu avoids the only caller from having to set it and unset it
> in the error path.
> 
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
diff mbox series

Patch

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 331568e086c70a..50c26792391b56 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -87,6 +87,8 @@  static struct arm_smmu_option_prop arm_smmu_options[] = {
 };
 
 static void arm_smmu_rmr_install_bypass_ste(struct arm_smmu_device *smmu);
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+				    struct arm_smmu_device *smmu);
 
 static void parse_driver_options(struct arm_smmu_device *smmu)
 {
@@ -2216,12 +2218,12 @@  static void arm_smmu_domain_free(struct iommu_domain *domain)
 	kfree(smmu_domain);
 }
 
-static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
+				       struct arm_smmu_domain *smmu_domain,
 				       struct io_pgtable_cfg *pgtbl_cfg)
 {
 	int ret;
 	u32 asid;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
 	typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
 
@@ -2253,11 +2255,11 @@  static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
 	return ret;
 }
 
-static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
+static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
+				       struct arm_smmu_domain *smmu_domain,
 				       struct io_pgtable_cfg *pgtbl_cfg)
 {
 	int vmid;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
 
 	/* Reserve VMID 0 for stage-2 bypass STEs */
@@ -2270,17 +2272,17 @@  static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
 	return 0;
 }
 
-static int arm_smmu_domain_finalise(struct iommu_domain *domain)
+static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
+				    struct arm_smmu_device *smmu)
 {
 	int ret;
 	unsigned long ias, oas;
 	enum io_pgtable_fmt fmt;
 	struct io_pgtable_cfg pgtbl_cfg;
 	struct io_pgtable_ops *pgtbl_ops;
-	int (*finalise_stage_fn)(struct arm_smmu_domain *,
-				 struct io_pgtable_cfg *);
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
+				 struct arm_smmu_domain *smmu_domain,
+				 struct io_pgtable_cfg *pgtbl_cfg);
 
 	/* Restrict the stage to what we can actually support */
 	if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
@@ -2319,17 +2321,18 @@  static int arm_smmu_domain_finalise(struct iommu_domain *domain)
 	if (!pgtbl_ops)
 		return -ENOMEM;
 
-	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
-	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
-	domain->geometry.force_aperture = true;
+	smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+	smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
+	smmu_domain->domain.geometry.force_aperture = true;
 
-	ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
+	ret = finalise_stage_fn(smmu, smmu_domain, &pgtbl_cfg);
 	if (ret < 0) {
 		free_io_pgtable_ops(pgtbl_ops);
 		return ret;
 	}
 
 	smmu_domain->pgtbl_ops = pgtbl_ops;
+	smmu_domain->smmu = smmu;
 	return 0;
 }
 
@@ -2520,10 +2523,7 @@  static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 	mutex_lock(&smmu_domain->init_mutex);
 
 	if (!smmu_domain->smmu) {
-		smmu_domain->smmu = smmu;
-		ret = arm_smmu_domain_finalise(domain);
-		if (ret)
-			smmu_domain->smmu = NULL;
+		ret = arm_smmu_domain_finalise(smmu_domain, smmu);
 	} else if (smmu_domain->smmu != smmu)
 		ret = -EINVAL;