diff mbox series

[v3,9/9] iommu/arm-smmu-v3: Reorganize struct arm_smmu_ctx_desc_cfg

Message ID 9-v3-9fef8cdc2ff6+150d1-smmuv3_tidy_jgg@nvidia.com (mailing list archive)
State New, archived
Headers show
Series Tidy some minor things in the stream table/cd table area | expand

Commit Message

Jason Gunthorpe Aug. 6, 2024, 11:31 p.m. UTC
The members here are being used for both the linear and the 2 level case,
with the meaning of each item slightly different in the two cases.

Split it into a clean union where both cases have their own struct with
their own logical names and correct types.

Adjust all the users to detect linear/2lvl and use the right sub structure
and types consistently.

Remove CTXDESC_CD_DWORDS by changing the last places to use
sizeof(struct arm_smmu_cd).

Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 119 +++++++++-----------
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h |  24 ++--
 2 files changed, 71 insertions(+), 72 deletions(-)

Comments

Will Deacon Sept. 6, 2024, 1:22 p.m. UTC | #1
On Tue, Aug 06, 2024 at 08:31:23PM -0300, Jason Gunthorpe wrote:
> @@ -1373,8 +1368,6 @@ void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
>  
>  static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
>  {
> -	int ret;
> -	size_t l1size;
>  	size_t max_contexts;
>  	struct arm_smmu_device *smmu = master->smmu;
>  	struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
> @@ -1385,71 +1378,67 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
>  	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
>  	    max_contexts <= CTXDESC_L2_ENTRIES) {
>  		cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
> -		cd_table->num_l1_ents = max_contexts;
> +		cd_table->linear.num_ents = max_contexts;
>  
> -		l1size = max_contexts * sizeof(struct arm_smmu_cd);
> +		cd_table->linear.table = dma_alloc_coherent(
> +			smmu->dev, max_contexts * sizeof(struct arm_smmu_cd),
> +			&cd_table->cdtab_dma, GFP_KERNEL);
> +		if (!cd_table->linear.table)
> +			return -ENOMEM;
>  	} else {
>  		cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
> -		cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts,
> -						  CTXDESC_L2_ENTRIES);
> +		cd_table->l2.num_l1_ents =
> +			DIV_ROUND_UP(max_contexts, CTXDESC_L2_ENTRIES);
>  
> -		cd_table->l1_desc = kcalloc(cd_table->num_l1_ents,
> -					    sizeof(*cd_table->l1_desc),
> -					    GFP_KERNEL);
> -		if (!cd_table->l1_desc)
> +		cd_table->l2.l2ptrs = kcalloc(cd_table->l2.num_l1_ents,
> +					     sizeof(*cd_table->l2.l2ptrs),
> +					     GFP_KERNEL);
> +		if (!cd_table->l2.l2ptrs)
>  			return -ENOMEM;
>  
> -		l1size = cd_table->num_l1_ents *
> -			 sizeof(struct arm_smmu_cdtab_l1);
> +		cd_table->l2.l1tab = dma_alloc_coherent(
> +			smmu->dev,
> +			cd_table->l2.num_l1_ents *
> +				sizeof(struct arm_smmu_cdtab_l1),
> +			&cd_table->cdtab_dma, GFP_KERNEL);
> +		if (!cd_table->l2.l1tab) {
> +			kfree(cd_table->l2.l2ptrs);
> +			cd_table->l2.l2ptrs = NULL;
> +			return -ENOMEM;
> +		}
>  	}
> -
> -	cd_table->cdtab = dma_alloc_coherent(smmu->dev, l1size,
> -					     &cd_table->cdtab_dma, GFP_KERNEL);
> -	if (!cd_table->cdtab) {
> -		dev_warn(smmu->dev, "failed to allocate context descriptor\n");
> -		ret = -ENOMEM;
> -		goto err_free_l1;
> -	}
> -
>  	return 0;
> -
> -err_free_l1:
> -	if (cd_table->l1_desc) {
> -		kfree(cd_table->l1_desc);
> -		cd_table->l1_desc = NULL;
> -	}
> -	return ret;

Why inline the error path here? Sure, I get that it works both ways, but
it seems a little gratuitous to me.

Will
Jason Gunthorpe Sept. 6, 2024, 3:13 p.m. UTC | #2
On Fri, Sep 06, 2024 at 02:22:22PM +0100, Will Deacon wrote:
> On Tue, Aug 06, 2024 at 08:31:23PM -0300, Jason Gunthorpe wrote:
> > @@ -1373,8 +1368,6 @@ void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
> >  
> >  static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
> >  {
> > -	int ret;
> > -	size_t l1size;
> >  	size_t max_contexts;
> >  	struct arm_smmu_device *smmu = master->smmu;
> >  	struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
> > @@ -1385,71 +1378,67 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
> >  	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
> >  	    max_contexts <= CTXDESC_L2_ENTRIES) {
> >  		cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
> > -		cd_table->num_l1_ents = max_contexts;
> > +		cd_table->linear.num_ents = max_contexts;
> >  
> > -		l1size = max_contexts * sizeof(struct arm_smmu_cd);
> > +		cd_table->linear.table = dma_alloc_coherent(
> > +			smmu->dev, max_contexts * sizeof(struct arm_smmu_cd),
> > +			&cd_table->cdtab_dma, GFP_KERNEL);
> > +		if (!cd_table->linear.table)
> > +			return -ENOMEM;
> >  	} else {
> >  		cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
> > -		cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts,
> > -						  CTXDESC_L2_ENTRIES);
> > +		cd_table->l2.num_l1_ents =
> > +			DIV_ROUND_UP(max_contexts, CTXDESC_L2_ENTRIES);
> >  
> > -		cd_table->l1_desc = kcalloc(cd_table->num_l1_ents,
> > -					    sizeof(*cd_table->l1_desc),
> > -					    GFP_KERNEL);
> > -		if (!cd_table->l1_desc)
> > +		cd_table->l2.l2ptrs = kcalloc(cd_table->l2.num_l1_ents,
> > +					     sizeof(*cd_table->l2.l2ptrs),
> > +					     GFP_KERNEL);
> > +		if (!cd_table->l2.l2ptrs)
> >  			return -ENOMEM;
> >  
> > -		l1size = cd_table->num_l1_ents *
> > -			 sizeof(struct arm_smmu_cdtab_l1);
> > +		cd_table->l2.l1tab = dma_alloc_coherent(
> > +			smmu->dev,
> > +			cd_table->l2.num_l1_ents *
> > +				sizeof(struct arm_smmu_cdtab_l1),
> > +			&cd_table->cdtab_dma, GFP_KERNEL);
> > +		if (!cd_table->l2.l1tab) {
> > +			kfree(cd_table->l2.l2ptrs);
> > +			cd_table->l2.l2ptrs = NULL;
> > +			return -ENOMEM;
> > +		}
> >  	}
> > -
> > -	cd_table->cdtab = dma_alloc_coherent(smmu->dev, l1size,
> > -					     &cd_table->cdtab_dma, GFP_KERNEL);
> > -	if (!cd_table->cdtab) {
> > -		dev_warn(smmu->dev, "failed to allocate context descriptor\n");
> > -		ret = -ENOMEM;
> > -		goto err_free_l1;
> > -	}
> > -
> >  	return 0;
> > -
> > -err_free_l1:
> > -	if (cd_table->l1_desc) {
> > -		kfree(cd_table->l1_desc);
> > -		cd_table->l1_desc = NULL;
> > -	}
> > -	return ret;
> 
> Why inline the error path here? Sure, I get that it works both ways, but
> it seems a little gratuitous to me.

With the change there is only one goto.

But sure, can leave it:

[..]
		if (!cd_table->l2.l2ptrs) {
			ret = -ENOMEM;
			goto err_free_l2ptrs;
		}
	}
	return 0;

err_free_l2ptrs:
	kfree(cd_table->l2.l2ptrs);
	cd_table->l2.l2ptrs = NULL;
	return ret;
}

Jason
diff mbox series

Patch

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 70b37d7f0e245d..e5db5325f7eaed 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1196,19 +1196,19 @@  static dma_addr_t arm_smmu_cd_l1_get_desc(const struct arm_smmu_cdtab_l1 *src)
 struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
 					u32 ssid)
 {
-	struct arm_smmu_l1_ctx_desc *l1_desc;
+	struct arm_smmu_cdtab_l2 *l2;
 	struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
 
-	if (!cd_table->cdtab)
+	if (!arm_smmu_cdtab_allocated(cd_table))
 		return NULL;
 
 	if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
-		return &((struct arm_smmu_cd *)cd_table->cdtab)[ssid];
+		return &cd_table->linear.table[ssid];
 
-	l1_desc = &cd_table->l1_desc[arm_smmu_cdtab_l1_idx(ssid)];
-	if (!l1_desc->l2ptr)
+	l2 = cd_table->l2.l2ptrs[arm_smmu_cdtab_l1_idx(ssid)];
+	if (!l2)
 		return NULL;
-	return &l1_desc->l2ptr->cds[arm_smmu_cdtab_l2_idx(ssid)];
+	return &l2->cds[arm_smmu_cdtab_l2_idx(ssid)];
 }
 
 static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
@@ -1220,30 +1220,25 @@  static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
 	might_sleep();
 	iommu_group_mutex_assert(master->dev);
 
-	if (!cd_table->cdtab) {
+	if (!arm_smmu_cdtab_allocated(cd_table)) {
 		if (arm_smmu_alloc_cd_tables(master))
 			return NULL;
 	}
 
 	if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_64K_L2) {
 		unsigned int idx = arm_smmu_cdtab_l1_idx(ssid);
-		struct arm_smmu_l1_ctx_desc *l1_desc;
+		struct arm_smmu_cdtab_l2 **l2ptr = &cd_table->l2.l2ptrs[idx];
 
-		l1_desc = &cd_table->l1_desc[idx];
-		if (!l1_desc->l2ptr) {
+		if (!*l2ptr) {
 			dma_addr_t l2ptr_dma;
 
-			l1_desc->l2ptr = dma_alloc_coherent(
-				smmu->dev,
-				CTXDESC_L2_ENTRIES * sizeof(struct arm_smmu_cd),
-				&l2ptr_dma, GFP_KERNEL);
-			if (!l1_desc->l2ptr)
+			*l2ptr = dma_alloc_coherent(smmu->dev, sizeof(**l2ptr),
+						    &l2ptr_dma, GFP_KERNEL);
+			if (!*l2ptr)
 				return NULL;
 
-			arm_smmu_write_cd_l1_desc(
-				&((struct arm_smmu_cdtab_l1 *)
-					  cd_table->cdtab)[idx],
-				l2ptr_dma);
+			arm_smmu_write_cd_l1_desc(&cd_table->l2.l1tab[idx],
+						  l2ptr_dma);
 			/* An invalid L1CD can be cached */
 			arm_smmu_sync_cd(master, ssid, false);
 		}
@@ -1363,7 +1358,7 @@  void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
 	struct arm_smmu_cd target = {};
 	struct arm_smmu_cd *cdptr;
 
-	if (!master->cd_table.cdtab)
+	if (!arm_smmu_cdtab_allocated(&master->cd_table))
 		return;
 	cdptr = arm_smmu_get_cd_ptr(master, ssid);
 	if (WARN_ON(!cdptr))
@@ -1373,8 +1368,6 @@  void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
 
 static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
 {
-	int ret;
-	size_t l1size;
 	size_t max_contexts;
 	struct arm_smmu_device *smmu = master->smmu;
 	struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
@@ -1385,71 +1378,67 @@  static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
 	if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
 	    max_contexts <= CTXDESC_L2_ENTRIES) {
 		cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
-		cd_table->num_l1_ents = max_contexts;
+		cd_table->linear.num_ents = max_contexts;
 
-		l1size = max_contexts * sizeof(struct arm_smmu_cd);
+		cd_table->linear.table = dma_alloc_coherent(
+			smmu->dev, max_contexts * sizeof(struct arm_smmu_cd),
+			&cd_table->cdtab_dma, GFP_KERNEL);
+		if (!cd_table->linear.table)
+			return -ENOMEM;
 	} else {
 		cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
-		cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts,
-						  CTXDESC_L2_ENTRIES);
+		cd_table->l2.num_l1_ents =
+			DIV_ROUND_UP(max_contexts, CTXDESC_L2_ENTRIES);
 
-		cd_table->l1_desc = kcalloc(cd_table->num_l1_ents,
-					    sizeof(*cd_table->l1_desc),
-					    GFP_KERNEL);
-		if (!cd_table->l1_desc)
+		cd_table->l2.l2ptrs = kcalloc(cd_table->l2.num_l1_ents,
+					     sizeof(*cd_table->l2.l2ptrs),
+					     GFP_KERNEL);
+		if (!cd_table->l2.l2ptrs)
 			return -ENOMEM;
 
-		l1size = cd_table->num_l1_ents *
-			 sizeof(struct arm_smmu_cdtab_l1);
+		cd_table->l2.l1tab = dma_alloc_coherent(
+			smmu->dev,
+			cd_table->l2.num_l1_ents *
+				sizeof(struct arm_smmu_cdtab_l1),
+			&cd_table->cdtab_dma, GFP_KERNEL);
+		if (!cd_table->l2.l1tab) {
+			kfree(cd_table->l2.l2ptrs);
+			cd_table->l2.l2ptrs = NULL;
+			return -ENOMEM;
+		}
 	}
-
-	cd_table->cdtab = dma_alloc_coherent(smmu->dev, l1size,
-					     &cd_table->cdtab_dma, GFP_KERNEL);
-	if (!cd_table->cdtab) {
-		dev_warn(smmu->dev, "failed to allocate context descriptor\n");
-		ret = -ENOMEM;
-		goto err_free_l1;
-	}
-
 	return 0;
-
-err_free_l1:
-	if (cd_table->l1_desc) {
-		kfree(cd_table->l1_desc);
-		cd_table->l1_desc = NULL;
-	}
-	return ret;
 }
 
 static void arm_smmu_free_cd_tables(struct arm_smmu_master *master)
 {
 	int i;
-	size_t l1size;
 	struct arm_smmu_device *smmu = master->smmu;
 	struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
 
-	if (cd_table->l1_desc) {
-		for (i = 0; i < cd_table->num_l1_ents; i++) {
-			if (!cd_table->l1_desc[i].l2ptr)
+	if (cd_table->s1fmt != STRTAB_STE_0_S1FMT_LINEAR) {
+		for (i = 0; i < cd_table->l2.num_l1_ents; i++) {
+			if (!cd_table->l2.l2ptrs[i])
 				continue;
 
 			dma_free_coherent(smmu->dev,
-					  sizeof(*cd_table->l1_desc[i].l2ptr),
-					  cd_table->l1_desc[i].l2ptr,
-					  arm_smmu_cd_l1_get_desc(&(
-						  (struct arm_smmu_cdtab_l1 *)
-							  cd_table->cdtab)[i]));
+					  sizeof(*cd_table->l2.l2ptrs[i]),
+					  cd_table->l2.l2ptrs[i],
+					  arm_smmu_cd_l1_get_desc(
+						  &cd_table->l2.l1tab[i]));
 		}
-		kfree(cd_table->l1_desc);
+		kfree(cd_table->l2.l2ptrs);
 
-		l1size = cd_table->num_l1_ents *
-			 sizeof(struct arm_smmu_cdtab_l1);
+		dma_free_coherent(smmu->dev,
+				  cd_table->l2.num_l1_ents *
+					  sizeof(struct arm_smmu_cdtab_l1),
+				  cd_table->l2.l1tab, cd_table->cdtab_dma);
 	} else {
-		l1size = cd_table->num_l1_ents * sizeof(struct arm_smmu_cd);
+		dma_free_coherent(smmu->dev,
+				  cd_table->linear.num_ents *
+					  sizeof(struct arm_smmu_cd),
+				  cd_table->linear.table, cd_table->cdtab_dma);
 	}
-
-	dma_free_coherent(smmu->dev, l1size, cd_table->cdtab,
-			  cd_table->cdtab_dma);
 }
 
 /* Stream table manipulation functions */
@@ -3299,7 +3288,7 @@  static void arm_smmu_release_device(struct device *dev)
 
 	arm_smmu_disable_pasid(master);
 	arm_smmu_remove_master(master);
-	if (master->cd_table.cdtab)
+	if (arm_smmu_cdtab_allocated(&master->cd_table))
 		arm_smmu_free_cd_tables(master);
 	kfree(master);
 }
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 681804a3f86bec..8851a7abb5f0f3 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -619,15 +619,19 @@  struct arm_smmu_ctx_desc {
 	u16				asid;
 };
 
-struct arm_smmu_l1_ctx_desc {
-	struct arm_smmu_cdtab_l2	*l2ptr;
-};
-
 struct arm_smmu_ctx_desc_cfg {
-	__le64				*cdtab;
+	union {
+		struct {
+			struct arm_smmu_cd *table;
+			unsigned int num_ents;
+		} linear;
+		struct {
+			struct arm_smmu_cdtab_l1 *l1tab;
+			struct arm_smmu_cdtab_l2 **l2ptrs;
+			unsigned int num_l1_ents;
+		} l2;
+	};
 	dma_addr_t			cdtab_dma;
-	struct arm_smmu_l1_ctx_desc	*l1_desc;
-	unsigned int			num_l1_ents;
 	unsigned int			used_ssids;
 	u8				in_ste;
 	u8				s1fmt;
@@ -635,6 +639,12 @@  struct arm_smmu_ctx_desc_cfg {
 	u8				s1cdmax;
 };
 
+static inline bool
+arm_smmu_cdtab_allocated(struct arm_smmu_ctx_desc_cfg *cfg)
+{
+	return cfg->linear.table || cfg->l2.l1tab;
+}
+
 /* True if the cd table has SSIDS > 0 in use. */
 static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table)
 {