diff mbox series

[v2,7/8] xen/arm: Remove Linux specific code that is not usable in XEN

Message ID 1d9da8ed4845aeb9e86a5ce6750b811bd7e2020e.1606406359.git.rahul.singh@arm.com (mailing list archive)
State Superseded
Headers show
Series xen/arm: Add support for SMMUv3 driver | expand

Commit Message

Rahul Singh Nov. 26, 2020, 5:02 p.m. UTC
struct io_pgtable_ops, struct io_pgtable_cfg, struct iommu_flush_ops,
and struct iommu_ops related code are linux specific.

Remove code related to above struct as code is dead code in XEN.

Signed-off-by: Rahul Singh <rahul.singh@arm.com>
---
 xen/drivers/passthrough/arm/smmu-v3.c | 457 --------------------------
 1 file changed, 457 deletions(-)

Comments

Stefano Stabellini Dec. 2, 2020, 1:48 a.m. UTC | #1
On Thu, 26 Nov 2020, Rahul Singh wrote:
> struct io_pgtable_ops, struct io_pgtable_cfg, struct iommu_flush_ops,
> and struct iommu_ops related code are linux specific.
> 
> Remove code related to above struct as code is dead code in XEN.

There are still instances of struct io_pgtable_cfg after applying this
patch in the following functions:
- arm_smmu_domain_finalise_s2
- arm_smmu_domain_finalise



> Signed-off-by: Rahul Singh <rahul.singh@arm.com>
> ---
>  xen/drivers/passthrough/arm/smmu-v3.c | 457 --------------------------
>  1 file changed, 457 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c
> index 40e3890a58..55d1cba194 100644
> --- a/xen/drivers/passthrough/arm/smmu-v3.c
> +++ b/xen/drivers/passthrough/arm/smmu-v3.c
> @@ -599,7 +593,6 @@ struct arm_smmu_domain {
>  	struct arm_smmu_device		*smmu;
>  	struct mutex			init_mutex; /* Protects smmu pointer */
>  
> -	struct io_pgtable_ops		*pgtbl_ops;
>  	bool				non_strict;
>  
>  	enum arm_smmu_domain_stage	stage;
> @@ -1297,74 +1290,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
>  	arm_smmu_cmdq_issue_sync(smmu);
>  }
>  
> -static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
> -					  size_t granule, bool leaf, void *cookie)
> -{
> -	struct arm_smmu_domain *smmu_domain = cookie;
> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
> -	struct arm_smmu_cmdq_ent cmd = {
> -		.tlbi = {
> -			.leaf	= leaf,
> -			.addr	= iova,
> -		},
> -	};
> -
> -	cmd.opcode	= CMDQ_OP_TLBI_S2_IPA;
> -	cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
> -
> -	do {
> -		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
> -		cmd.tlbi.addr += granule;
> -	} while (size -= granule);
> -}
> -
> -static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
> -					 unsigned long iova, size_t granule,
> -					 void *cookie)
> -{
> -	arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
> -}
> -
> -static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
> -				  size_t granule, void *cookie)
> -{
> -	struct arm_smmu_domain *smmu_domain = cookie;
> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
> -
> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
> -	arm_smmu_cmdq_issue_sync(smmu);
> -}
> -
> -static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
> -				  size_t granule, void *cookie)
> -{
> -	struct arm_smmu_domain *smmu_domain = cookie;
> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
> -
> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
> -	arm_smmu_cmdq_issue_sync(smmu);
> -}
> -
> -static const struct iommu_flush_ops arm_smmu_flush_ops = {
> -	.tlb_flush_all	= arm_smmu_tlb_inv_context,
> -	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
> -	.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
> -	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
> -};
> -
> -/* IOMMU API */
> -static bool arm_smmu_capable(enum iommu_cap cap)
> -{
> -	switch (cap) {
> -	case IOMMU_CAP_CACHE_COHERENCY:
> -		return true;
> -	case IOMMU_CAP_NOEXEC:
> -		return true;
> -	default:
> -		return false;
> -	}
> -}
> -
>  static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
>  {
>  	struct arm_smmu_domain *smmu_domain;
> @@ -1421,7 +1346,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>  	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
>  
>  	iommu_put_dma_cookie(domain);
> -	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
>  
>  	if (cfg->vmid)
>  		arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
> @@ -1429,7 +1353,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>  	kfree(smmu_domain);
>  }
>  
> -
>  static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>  				       struct arm_smmu_master *master,
>  				       struct io_pgtable_cfg *pgtbl_cfg)
> @@ -1437,7 +1360,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>  	int vmid;
>  	struct arm_smmu_device *smmu = smmu_domain->smmu;
>  	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
> -	typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
>  
>  	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
>  	if (vmid < 0)
> @@ -1461,20 +1383,12 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>  {
>  	int ret;
>  	unsigned long ias, oas;
> -	enum io_pgtable_fmt fmt;
> -	struct io_pgtable_cfg pgtbl_cfg;
> -	struct io_pgtable_ops *pgtbl_ops;
>  	int (*finalise_stage_fn)(struct arm_smmu_domain *,
>  				 struct arm_smmu_master *,
>  				 struct io_pgtable_cfg *);
>  	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>  	struct arm_smmu_device *smmu = smmu_domain->smmu;
>  
> -	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
> -		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
> -		return 0;
> -	}
> -
>  	/* Restrict the stage to what we can actually support */
>  	smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
>  
> @@ -1483,40 +1397,17 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>  	case ARM_SMMU_DOMAIN_S2:
>  		ias = smmu->ias;
>  		oas = smmu->oas;
> -		fmt = ARM_64_LPAE_S2;
>  		finalise_stage_fn = arm_smmu_domain_finalise_s2;
>  		break;
>  	default:
>  		return -EINVAL;
>  	}
>  
> -	pgtbl_cfg = (struct io_pgtable_cfg) {
> -		.pgsize_bitmap	= smmu->pgsize_bitmap,
> -		.ias		= ias,
> -		.oas		= oas,
> -		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENCY,
> -		.tlb		= &arm_smmu_flush_ops,
> -		.iommu_dev	= smmu->dev,
> -	};
> -
> -	if (smmu_domain->non_strict)
> -		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
> -
> -	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
> -	if (!pgtbl_ops)
> -		return -ENOMEM;
> -
> -	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
> -	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
> -	domain->geometry.force_aperture = true;
> -
>  	ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
>  	if (ret < 0) {
> -		free_io_pgtable_ops(pgtbl_ops);
>  		return ret;
>  	}
>  
> -	smmu_domain->pgtbl_ops = pgtbl_ops;
>  	return 0;
>  }
>  
> @@ -1626,71 +1517,6 @@ out_unlock:
>  	return ret;
>  }
>  
> -static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
> -			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
> -{
> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
> -
> -	if (!ops)
> -		return -ENODEV;
> -
> -	return ops->map(ops, iova, paddr, size, prot, gfp);
> -}
> -
> -static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
> -			     size_t size, struct iommu_iotlb_gather *gather)
> -{
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
> -
> -	if (!ops)
> -		return 0;
> -
> -	return ops->unmap(ops, iova, size, gather);
> -}
> -
> -static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
> -{
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -
> -	if (smmu_domain->smmu)
> -		arm_smmu_tlb_inv_context(smmu_domain);
> -}
> -
> -static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
> -				struct iommu_iotlb_gather *gather)
> -{
> -	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
> -
> -	if (smmu)
> -		arm_smmu_cmdq_issue_sync(smmu);
> -}
> -
> -static phys_addr_t
> -arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
> -{
> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
> -
> -	if (domain->type == IOMMU_DOMAIN_IDENTITY)
> -		return iova;
> -
> -	if (!ops)
> -		return 0;
> -
> -	return ops->iova_to_phys(ops, iova);
> -}
> -
> -static struct platform_driver arm_smmu_driver;
> -
> -static
> -struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
> -{
> -	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
> -							  fwnode);
> -	put_device(dev);
> -	return dev ? dev_get_drvdata(dev) : NULL;
> -}
> -
>  static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>  {
>  	unsigned long limit = smmu->strtab_cfg.num_l1_ents;
> @@ -1701,206 +1527,6 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>  	return sid < limit;
>  }
>  
> -static struct iommu_ops arm_smmu_ops;
> -
> -static struct iommu_device *arm_smmu_probe_device(struct device *dev)
> -{
> -	int i, ret;
> -	struct arm_smmu_device *smmu;
> -	struct arm_smmu_master *master;
> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
> -
> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
> -		return ERR_PTR(-ENODEV);
> -
> -	if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
> -		return ERR_PTR(-EBUSY);
> -
> -	smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
> -	if (!smmu)
> -		return ERR_PTR(-ENODEV);
> -
> -	master = kzalloc(sizeof(*master), GFP_KERNEL);
> -	if (!master)
> -		return ERR_PTR(-ENOMEM);
> -
> -	master->dev = dev;
> -	master->smmu = smmu;
> -	master->sids = fwspec->ids;
> -	master->num_sids = fwspec->num_ids;
> -	dev_iommu_priv_set(dev, master);
> -
> -	/* Check the SIDs are in range of the SMMU and our stream table */
> -	for (i = 0; i < master->num_sids; i++) {
> -		u32 sid = master->sids[i];
> -
> -		if (!arm_smmu_sid_in_range(smmu, sid)) {
> -			ret = -ERANGE;
> -			goto err_free_master;
> -		}
> -
> -		/* Ensure l2 strtab is initialised */
> -		if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
> -			ret = arm_smmu_init_l2_strtab(smmu, sid);
> -			if (ret)
> -				goto err_free_master;
> -		}
> -	}
> -
> -	return &smmu->iommu;
> -
> -err_free_master:
> -	kfree(master);
> -	dev_iommu_priv_set(dev, NULL);
> -	return ERR_PTR(ret);
> -}
> -
> -static void arm_smmu_release_device(struct device *dev)
> -{
> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
> -	struct arm_smmu_master *master;
> -
> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
> -		return;
> -
> -	master = dev_iommu_priv_get(dev);
> -	arm_smmu_detach_dev(master);
> -	kfree(master);
> -	iommu_fwspec_free(dev);
> -}
> -
> -static struct iommu_group *arm_smmu_device_group(struct device *dev)
> -{
> -	struct iommu_group *group;
> -
> -	/*
> -	 * We don't support devices sharing stream IDs other than PCI RID
> -	 * aliases, since the necessary ID-to-device lookup becomes rather
> -	 * impractical given a potential sparse 32-bit stream ID space.
> -	 */
> -	if (dev_is_pci(dev))
> -		group = pci_device_group(dev);
> -	else
> -		group = generic_device_group(dev);
> -
> -	return group;
> -}
> -
> -static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
> -				    enum iommu_attr attr, void *data)
> -{
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -
> -	switch (domain->type) {
> -	case IOMMU_DOMAIN_UNMANAGED:
> -		switch (attr) {
> -		case DOMAIN_ATTR_NESTING:
> -			*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
> -			return 0;
> -		default:
> -			return -ENODEV;
> -		}
> -		break;
> -	case IOMMU_DOMAIN_DMA:
> -		switch (attr) {
> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
> -			*(int *)data = smmu_domain->non_strict;
> -			return 0;
> -		default:
> -			return -ENODEV;
> -		}
> -		break;
> -	default:
> -		return -EINVAL;
> -	}
> -}
> -
> -static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
> -				    enum iommu_attr attr, void *data)
> -{
> -	int ret = 0;
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -
> -	mutex_lock(&smmu_domain->init_mutex);
> -
> -	switch (domain->type) {
> -	case IOMMU_DOMAIN_UNMANAGED:
> -		switch (attr) {
> -		case DOMAIN_ATTR_NESTING:
> -			if (smmu_domain->smmu) {
> -				ret = -EPERM;
> -				goto out_unlock;
> -			}
> -
> -			if (*(int *)data)
> -				smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
> -			else
> -				smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
> -			break;
> -		default:
> -			ret = -ENODEV;
> -		}
> -		break;
> -	case IOMMU_DOMAIN_DMA:
> -		switch(attr) {
> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
> -			smmu_domain->non_strict = *(int *)data;
> -			break;
> -		default:
> -			ret = -ENODEV;
> -		}
> -		break;
> -	default:
> -		ret = -EINVAL;
> -	}
> -
> -out_unlock:
> -	mutex_unlock(&smmu_domain->init_mutex);
> -	return ret;
> -}
> -
> -static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
> -{
> -	return iommu_fwspec_add_ids(dev, args->args, 1);
> -}
> -
> -static void arm_smmu_get_resv_regions(struct device *dev,
> -				      struct list_head *head)
> -{
> -	struct iommu_resv_region *region;
> -	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
> -
> -	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
> -					 prot, IOMMU_RESV_SW_MSI);
> -	if (!region)
> -		return;
> -
> -	list_add_tail(&region->list, head);
> -
> -	iommu_dma_get_resv_regions(dev, head);
> -}

Arguably this could have been removed previously as part of the MSI
patch, but that's OK either way.


> -static struct iommu_ops arm_smmu_ops = {
> -	.capable		= arm_smmu_capable,
> -	.domain_alloc		= arm_smmu_domain_alloc,
> -	.domain_free		= arm_smmu_domain_free,
> -	.attach_dev		= arm_smmu_attach_dev,
> -	.map			= arm_smmu_map,
> -	.unmap			= arm_smmu_unmap,
> -	.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
> -	.iotlb_sync		= arm_smmu_iotlb_sync,
> -	.iova_to_phys		= arm_smmu_iova_to_phys,
> -	.probe_device		= arm_smmu_probe_device,
> -	.release_device		= arm_smmu_release_device,
> -	.device_group		= arm_smmu_device_group,
> -	.domain_get_attr	= arm_smmu_domain_get_attr,
> -	.domain_set_attr	= arm_smmu_domain_set_attr,
> -	.of_xlate		= arm_smmu_of_xlate,
> -	.get_resv_regions	= arm_smmu_get_resv_regions,
> -	.put_resv_regions	= generic_iommu_put_resv_regions,
> -	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
> -};
> -
>  /* Probing and initialisation functions */
>  static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
>  				   struct arm_smmu_queue *q,
> @@ -2515,21 +2139,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>  	default:
>  		dev_info(smmu->dev,
>  			"unknown output address size. Truncating to 48-bit\n");
> -		fallthrough;
>  	case IDR5_OAS_48_BIT:
>  		smmu->oas = 48;
>  	}
>  
> -	if (arm_smmu_ops.pgsize_bitmap == -1UL)
> -		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
> -	else
> -		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
> -
> -	/* Set the DMA mask for our table walker */
> -	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
> -		dev_warn(smmu->dev,
> -			 "failed to set DMA mask for table walker\n");
> -
>  	smmu->ias = max(smmu->ias, smmu->oas);
>  
>  	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
> @@ -2595,9 +2208,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
>  
>  	parse_driver_options(smmu);
>  
> -	if (of_dma_is_coherent(dev->of_node))
> -		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
> -

Why this change? The ARM_SMMU_FEAT_COHERENCY flag is still used in
arm_smmu_device_hw_probe.
Rahul Singh Dec. 2, 2020, 2:34 p.m. UTC | #2
Hello Stefano,

> On 2 Dec 2020, at 1:48 am, Stefano Stabellini <sstabellini@kernel.org> wrote:
> 
> On Thu, 26 Nov 2020, Rahul Singh wrote:
>> struct io_pgtable_ops, struct io_pgtable_cfg, struct iommu_flush_ops,
>> and struct iommu_ops related code are linux specific.
>> 
>> Remove code related to above struct as code is dead code in XEN.
> 
> There are still instances of struct io_pgtable_cfg after applying this
> patch in the following functions:
> - arm_smmu_domain_finalise_s2
> - arm_smmu_domain_finalise
> 

Ok. I will remove the instances.
> 
> 
>> Signed-off-by: Rahul Singh <rahul.singh@arm.com>
>> ---
>> xen/drivers/passthrough/arm/smmu-v3.c | 457 --------------------------
>> 1 file changed, 457 deletions(-)
>> 
>> diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c
>> index 40e3890a58..55d1cba194 100644
>> --- a/xen/drivers/passthrough/arm/smmu-v3.c
>> +++ b/xen/drivers/passthrough/arm/smmu-v3.c
>> @@ -599,7 +593,6 @@ struct arm_smmu_domain {
>> 	struct arm_smmu_device		*smmu;
>> 	struct mutex			init_mutex; /* Protects smmu pointer */
>> 
>> -	struct io_pgtable_ops		*pgtbl_ops;
>> 	bool				non_strict;
>> 
>> 	enum arm_smmu_domain_stage	stage;
>> @@ -1297,74 +1290,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
>> 	arm_smmu_cmdq_issue_sync(smmu);
>> }
>> 
>> -static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
>> -					  size_t granule, bool leaf, void *cookie)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = cookie;
>> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
>> -	struct arm_smmu_cmdq_ent cmd = {
>> -		.tlbi = {
>> -			.leaf	= leaf,
>> -			.addr	= iova,
>> -		},
>> -	};
>> -
>> -	cmd.opcode	= CMDQ_OP_TLBI_S2_IPA;
>> -	cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
>> -
>> -	do {
>> -		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
>> -		cmd.tlbi.addr += granule;
>> -	} while (size -= granule);
>> -}
>> -
>> -static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
>> -					 unsigned long iova, size_t granule,
>> -					 void *cookie)
>> -{
>> -	arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
>> -}
>> -
>> -static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
>> -				  size_t granule, void *cookie)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = cookie;
>> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
>> -
>> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
>> -	arm_smmu_cmdq_issue_sync(smmu);
>> -}
>> -
>> -static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
>> -				  size_t granule, void *cookie)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = cookie;
>> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
>> -
>> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
>> -	arm_smmu_cmdq_issue_sync(smmu);
>> -}
>> -
>> -static const struct iommu_flush_ops arm_smmu_flush_ops = {
>> -	.tlb_flush_all	= arm_smmu_tlb_inv_context,
>> -	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
>> -	.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
>> -	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
>> -};
>> -
>> -/* IOMMU API */
>> -static bool arm_smmu_capable(enum iommu_cap cap)
>> -{
>> -	switch (cap) {
>> -	case IOMMU_CAP_CACHE_COHERENCY:
>> -		return true;
>> -	case IOMMU_CAP_NOEXEC:
>> -		return true;
>> -	default:
>> -		return false;
>> -	}
>> -}
>> -
>> static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
>> {
>> 	struct arm_smmu_domain *smmu_domain;
>> @@ -1421,7 +1346,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>> 	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
>> 
>> 	iommu_put_dma_cookie(domain);
>> -	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
>> 
>> 	if (cfg->vmid)
>> 		arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
>> @@ -1429,7 +1353,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>> 	kfree(smmu_domain);
>> }
>> 
>> -
>> static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>> 				       struct arm_smmu_master *master,
>> 				       struct io_pgtable_cfg *pgtbl_cfg)
>> @@ -1437,7 +1360,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>> 	int vmid;
>> 	struct arm_smmu_device *smmu = smmu_domain->smmu;
>> 	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
>> -	typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
>> 
>> 	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
>> 	if (vmid < 0)
>> @@ -1461,20 +1383,12 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>> {
>> 	int ret;
>> 	unsigned long ias, oas;
>> -	enum io_pgtable_fmt fmt;
>> -	struct io_pgtable_cfg pgtbl_cfg;
>> -	struct io_pgtable_ops *pgtbl_ops;
>> 	int (*finalise_stage_fn)(struct arm_smmu_domain *,
>> 				 struct arm_smmu_master *,
>> 				 struct io_pgtable_cfg *);
>> 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>> 	struct arm_smmu_device *smmu = smmu_domain->smmu;
>> 
>> -	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
>> -		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
>> -		return 0;
>> -	}
>> -
>> 	/* Restrict the stage to what we can actually support */
>> 	smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
>> 
>> @@ -1483,40 +1397,17 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>> 	case ARM_SMMU_DOMAIN_S2:
>> 		ias = smmu->ias;
>> 		oas = smmu->oas;
>> -		fmt = ARM_64_LPAE_S2;
>> 		finalise_stage_fn = arm_smmu_domain_finalise_s2;
>> 		break;
>> 	default:
>> 		return -EINVAL;
>> 	}
>> 
>> -	pgtbl_cfg = (struct io_pgtable_cfg) {
>> -		.pgsize_bitmap	= smmu->pgsize_bitmap,
>> -		.ias		= ias,
>> -		.oas		= oas,
>> -		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENCY,
>> -		.tlb		= &arm_smmu_flush_ops,
>> -		.iommu_dev	= smmu->dev,
>> -	};
>> -
>> -	if (smmu_domain->non_strict)
>> -		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
>> -
>> -	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
>> -	if (!pgtbl_ops)
>> -		return -ENOMEM;
>> -
>> -	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
>> -	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
>> -	domain->geometry.force_aperture = true;
>> -
>> 	ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
>> 	if (ret < 0) {
>> -		free_io_pgtable_ops(pgtbl_ops);
>> 		return ret;
>> 	}
>> 
>> -	smmu_domain->pgtbl_ops = pgtbl_ops;
>> 	return 0;
>> }
>> 
>> @@ -1626,71 +1517,6 @@ out_unlock:
>> 	return ret;
>> }
>> 
>> -static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
>> -			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
>> -{
>> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
>> -
>> -	if (!ops)
>> -		return -ENODEV;
>> -
>> -	return ops->map(ops, iova, paddr, size, prot, gfp);
>> -}
>> -
>> -static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
>> -			     size_t size, struct iommu_iotlb_gather *gather)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>> -	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
>> -
>> -	if (!ops)
>> -		return 0;
>> -
>> -	return ops->unmap(ops, iova, size, gather);
>> -}
>> -
>> -static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>> -
>> -	if (smmu_domain->smmu)
>> -		arm_smmu_tlb_inv_context(smmu_domain);
>> -}
>> -
>> -static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
>> -				struct iommu_iotlb_gather *gather)
>> -{
>> -	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
>> -
>> -	if (smmu)
>> -		arm_smmu_cmdq_issue_sync(smmu);
>> -}
>> -
>> -static phys_addr_t
>> -arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
>> -{
>> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
>> -
>> -	if (domain->type == IOMMU_DOMAIN_IDENTITY)
>> -		return iova;
>> -
>> -	if (!ops)
>> -		return 0;
>> -
>> -	return ops->iova_to_phys(ops, iova);
>> -}
>> -
>> -static struct platform_driver arm_smmu_driver;
>> -
>> -static
>> -struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
>> -{
>> -	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
>> -							  fwnode);
>> -	put_device(dev);
>> -	return dev ? dev_get_drvdata(dev) : NULL;
>> -}
>> -
>> static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>> {
>> 	unsigned long limit = smmu->strtab_cfg.num_l1_ents;
>> @@ -1701,206 +1527,6 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>> 	return sid < limit;
>> }
>> 
>> -static struct iommu_ops arm_smmu_ops;
>> -
>> -static struct iommu_device *arm_smmu_probe_device(struct device *dev)
>> -{
>> -	int i, ret;
>> -	struct arm_smmu_device *smmu;
>> -	struct arm_smmu_master *master;
>> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
>> -
>> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
>> -		return ERR_PTR(-ENODEV);
>> -
>> -	if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
>> -		return ERR_PTR(-EBUSY);
>> -
>> -	smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
>> -	if (!smmu)
>> -		return ERR_PTR(-ENODEV);
>> -
>> -	master = kzalloc(sizeof(*master), GFP_KERNEL);
>> -	if (!master)
>> -		return ERR_PTR(-ENOMEM);
>> -
>> -	master->dev = dev;
>> -	master->smmu = smmu;
>> -	master->sids = fwspec->ids;
>> -	master->num_sids = fwspec->num_ids;
>> -	dev_iommu_priv_set(dev, master);
>> -
>> -	/* Check the SIDs are in range of the SMMU and our stream table */
>> -	for (i = 0; i < master->num_sids; i++) {
>> -		u32 sid = master->sids[i];
>> -
>> -		if (!arm_smmu_sid_in_range(smmu, sid)) {
>> -			ret = -ERANGE;
>> -			goto err_free_master;
>> -		}
>> -
>> -		/* Ensure l2 strtab is initialised */
>> -		if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
>> -			ret = arm_smmu_init_l2_strtab(smmu, sid);
>> -			if (ret)
>> -				goto err_free_master;
>> -		}
>> -	}
>> -
>> -	return &smmu->iommu;
>> -
>> -err_free_master:
>> -	kfree(master);
>> -	dev_iommu_priv_set(dev, NULL);
>> -	return ERR_PTR(ret);
>> -}
>> -
>> -static void arm_smmu_release_device(struct device *dev)
>> -{
>> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
>> -	struct arm_smmu_master *master;
>> -
>> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
>> -		return;
>> -
>> -	master = dev_iommu_priv_get(dev);
>> -	arm_smmu_detach_dev(master);
>> -	kfree(master);
>> -	iommu_fwspec_free(dev);
>> -}
>> -
>> -static struct iommu_group *arm_smmu_device_group(struct device *dev)
>> -{
>> -	struct iommu_group *group;
>> -
>> -	/*
>> -	 * We don't support devices sharing stream IDs other than PCI RID
>> -	 * aliases, since the necessary ID-to-device lookup becomes rather
>> -	 * impractical given a potential sparse 32-bit stream ID space.
>> -	 */
>> -	if (dev_is_pci(dev))
>> -		group = pci_device_group(dev);
>> -	else
>> -		group = generic_device_group(dev);
>> -
>> -	return group;
>> -}
>> -
>> -static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
>> -				    enum iommu_attr attr, void *data)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>> -
>> -	switch (domain->type) {
>> -	case IOMMU_DOMAIN_UNMANAGED:
>> -		switch (attr) {
>> -		case DOMAIN_ATTR_NESTING:
>> -			*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
>> -			return 0;
>> -		default:
>> -			return -ENODEV;
>> -		}
>> -		break;
>> -	case IOMMU_DOMAIN_DMA:
>> -		switch (attr) {
>> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
>> -			*(int *)data = smmu_domain->non_strict;
>> -			return 0;
>> -		default:
>> -			return -ENODEV;
>> -		}
>> -		break;
>> -	default:
>> -		return -EINVAL;
>> -	}
>> -}
>> -
>> -static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
>> -				    enum iommu_attr attr, void *data)
>> -{
>> -	int ret = 0;
>> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>> -
>> -	mutex_lock(&smmu_domain->init_mutex);
>> -
>> -	switch (domain->type) {
>> -	case IOMMU_DOMAIN_UNMANAGED:
>> -		switch (attr) {
>> -		case DOMAIN_ATTR_NESTING:
>> -			if (smmu_domain->smmu) {
>> -				ret = -EPERM;
>> -				goto out_unlock;
>> -			}
>> -
>> -			if (*(int *)data)
>> -				smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
>> -			else
>> -				smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
>> -			break;
>> -		default:
>> -			ret = -ENODEV;
>> -		}
>> -		break;
>> -	case IOMMU_DOMAIN_DMA:
>> -		switch(attr) {
>> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
>> -			smmu_domain->non_strict = *(int *)data;
>> -			break;
>> -		default:
>> -			ret = -ENODEV;
>> -		}
>> -		break;
>> -	default:
>> -		ret = -EINVAL;
>> -	}
>> -
>> -out_unlock:
>> -	mutex_unlock(&smmu_domain->init_mutex);
>> -	return ret;
>> -}
>> -
>> -static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
>> -{
>> -	return iommu_fwspec_add_ids(dev, args->args, 1);
>> -}
>> -
>> -static void arm_smmu_get_resv_regions(struct device *dev,
>> -				      struct list_head *head)
>> -{
>> -	struct iommu_resv_region *region;
>> -	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
>> -
>> -	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
>> -					 prot, IOMMU_RESV_SW_MSI);
>> -	if (!region)
>> -		return;
>> -
>> -	list_add_tail(&region->list, head);
>> -
>> -	iommu_dma_get_resv_regions(dev, head);
>> -}
> 
> Arguably this could have been removed previously as part of the MSI
> patch, but that's OK either way.

Ack. 
> 
> 
>> -static struct iommu_ops arm_smmu_ops = {
>> -	.capable		= arm_smmu_capable,
>> -	.domain_alloc		= arm_smmu_domain_alloc,
>> -	.domain_free		= arm_smmu_domain_free,
>> -	.attach_dev		= arm_smmu_attach_dev,
>> -	.map			= arm_smmu_map,
>> -	.unmap			= arm_smmu_unmap,
>> -	.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
>> -	.iotlb_sync		= arm_smmu_iotlb_sync,
>> -	.iova_to_phys		= arm_smmu_iova_to_phys,
>> -	.probe_device		= arm_smmu_probe_device,
>> -	.release_device		= arm_smmu_release_device,
>> -	.device_group		= arm_smmu_device_group,
>> -	.domain_get_attr	= arm_smmu_domain_get_attr,
>> -	.domain_set_attr	= arm_smmu_domain_set_attr,
>> -	.of_xlate		= arm_smmu_of_xlate,
>> -	.get_resv_regions	= arm_smmu_get_resv_regions,
>> -	.put_resv_regions	= generic_iommu_put_resv_regions,
>> -	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
>> -};
>> -
>> /* Probing and initialisation functions */
>> static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
>> 				   struct arm_smmu_queue *q,
>> @@ -2515,21 +2139,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>> 	default:
>> 		dev_info(smmu->dev,
>> 			"unknown output address size. Truncating to 48-bit\n");
>> -		fallthrough;
>> 	case IDR5_OAS_48_BIT:
>> 		smmu->oas = 48;
>> 	}
>> 
>> -	if (arm_smmu_ops.pgsize_bitmap == -1UL)
>> -		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
>> -	else
>> -		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
>> -
>> -	/* Set the DMA mask for our table walker */
>> -	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
>> -		dev_warn(smmu->dev,
>> -			 "failed to set DMA mask for table walker\n");
>> -
>> 	smmu->ias = max(smmu->ias, smmu->oas);
>> 
>> 	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
>> @@ -2595,9 +2208,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
>> 
>> 	parse_driver_options(smmu);
>> 
>> -	if (of_dma_is_coherent(dev->of_node))
>> -		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
>> -
> 
> Why this change? The ARM_SMMU_FEAT_COHERENCY flag is still used in
> arm_smmu_device_hw_probe.

I remove this as this is linux specific.  I will remove ARM_SMMU_FEAT_COHERENCY flag  used in arm_smmu_device_hw_probe

Regards,
Rahul
Julien Grall Dec. 2, 2020, 2:39 p.m. UTC | #3
Hi Rahul,

On 02/12/2020 14:34, Rahul Singh wrote:
>>> 	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
>>> @@ -2595,9 +2208,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
>>>
>>> 	parse_driver_options(smmu);
>>>
>>> -	if (of_dma_is_coherent(dev->of_node))
>>> -		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
>>> -
>>
>> Why this change? The ARM_SMMU_FEAT_COHERENCY flag is still used in
>> arm_smmu_device_hw_probe.
> 
> I remove this as this is linux specific.  I will remove ARM_SMMU_FEAT_COHERENCY flag  used in arm_smmu_device_hw_probe

 From my understanding, ARM_SMMU_FEAT_COHERENCY indicate whether the 
SMMU page table walker will snoop the cache. If the flag is not set, 
then Xen will have to clean to PoC every entry updated in the p2m.

Therefore, I think we need to keep this code.

In the case we don't need to keep the code, then I think the reason 
should be explained in the commit message.

Cheers,
Julien Grall Dec. 2, 2020, 2:45 p.m. UTC | #4
On 26/11/2020 17:02, Rahul Singh wrote:
> struct io_pgtable_ops, struct io_pgtable_cfg, struct iommu_flush_ops,
> and struct iommu_ops related code are linux specific.

So the assumption is we are going to support only sharing with the P2M 
and the IOMMU. That's probably fine short term, but long-term we are 
going to need unsharing the page-tables (there are issues on some 
platforms if the ITS doorbell is accessed by the processors).


I am ok with removing anything related to the unsharing code. But I 
think it should be clarified here.

> 
> Remove code related to above struct as code is dead code in XEN.
> 
> Signed-off-by: Rahul Singh <rahul.singh@arm.com>
> ---
>   xen/drivers/passthrough/arm/smmu-v3.c | 457 --------------------------
>   1 file changed, 457 deletions(-)
> 
> diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c
> index 40e3890a58..55d1cba194 100644
> --- a/xen/drivers/passthrough/arm/smmu-v3.c
> +++ b/xen/drivers/passthrough/arm/smmu-v3.c
> @@ -402,13 +402,7 @@
>   #define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US	1000000 /* 1s! */
>   #define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT	10
>   
> -#define MSI_IOVA_BASE			0x8000000
> -#define MSI_IOVA_LENGTH			0x100000
> -
>   static bool disable_bypass = 1;
> -module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
> -MODULE_PARM_DESC(disable_bypass,
> -	"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");

Per your commit message, this doesn't look related to what you intend to 
remove. Maybe your commit message should be updated?

>   
>   enum pri_resp {
>   	PRI_RESP_DENY = 0,
> @@ -599,7 +593,6 @@ struct arm_smmu_domain {
>   	struct arm_smmu_device		*smmu;
>   	struct mutex			init_mutex; /* Protects smmu pointer */
>   
> -	struct io_pgtable_ops		*pgtbl_ops;
>   	bool				non_strict;
>   
>   	enum arm_smmu_domain_stage	stage;
> @@ -1297,74 +1290,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
>   	arm_smmu_cmdq_issue_sync(smmu);
>   }
>   
> -static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
> -					  size_t granule, bool leaf, void *cookie)
> -{
> -	struct arm_smmu_domain *smmu_domain = cookie;
> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
> -	struct arm_smmu_cmdq_ent cmd = {
> -		.tlbi = {
> -			.leaf	= leaf,
> -			.addr	= iova,
> -		},
> -	};
> -
> -	cmd.opcode	= CMDQ_OP_TLBI_S2_IPA;
> -	cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
> -
> -	do {
> -		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
> -		cmd.tlbi.addr += granule;
> -	} while (size -= granule);
> -}
> -
> -static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
> -					 unsigned long iova, size_t granule,
> -					 void *cookie)
> -{
> -	arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
> -}
> -
> -static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
> -				  size_t granule, void *cookie)
> -{
> -	struct arm_smmu_domain *smmu_domain = cookie;
> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
> -
> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
> -	arm_smmu_cmdq_issue_sync(smmu);
> -}
> -
> -static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
> -				  size_t granule, void *cookie)
> -{
> -	struct arm_smmu_domain *smmu_domain = cookie;
> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
> -
> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
> -	arm_smmu_cmdq_issue_sync(smmu);
> -}
> -
> -static const struct iommu_flush_ops arm_smmu_flush_ops = {
> -	.tlb_flush_all	= arm_smmu_tlb_inv_context,
> -	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
> -	.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
> -	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
> -};
> -
> -/* IOMMU API */
> -static bool arm_smmu_capable(enum iommu_cap cap)
> -{
> -	switch (cap) {
> -	case IOMMU_CAP_CACHE_COHERENCY:
> -		return true;
> -	case IOMMU_CAP_NOEXEC:
> -		return true;
> -	default:
> -		return false;
> -	}
> -}
> -
>   static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
>   {
>   	struct arm_smmu_domain *smmu_domain;
> @@ -1421,7 +1346,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>   	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
>   
>   	iommu_put_dma_cookie(domain);
> -	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
>   
>   	if (cfg->vmid)
>   		arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
> @@ -1429,7 +1353,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>   	kfree(smmu_domain);
>   }
>   
> -

Looks like a spurious change.

>   static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>   				       struct arm_smmu_master *master,
>   				       struct io_pgtable_cfg *pgtbl_cfg)

You commit message leads to think that all the use of struct 
io_pgtable_cfg will be removed.

> @@ -1437,7 +1360,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>   	int vmid;
>   	struct arm_smmu_device *smmu = smmu_domain->smmu;
>   	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
> -	typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;

It feels a bit odd that the definition of 'vtcr' is removed but there 
are still users.

>   
>   	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
>   	if (vmid < 0)
> @@ -1461,20 +1383,12 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>   {
>   	int ret;
>   	unsigned long ias, oas;
> -	enum io_pgtable_fmt fmt;
> -	struct io_pgtable_cfg pgtbl_cfg;
> -	struct io_pgtable_ops *pgtbl_ops;
>   	int (*finalise_stage_fn)(struct arm_smmu_domain *,
>   				 struct arm_smmu_master *,
>   				 struct io_pgtable_cfg *);
>   	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>   	struct arm_smmu_device *smmu = smmu_domain->smmu;
>   
> -	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
> -		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
> -		return 0;
> -	}
> -

Per your commit message, this doesn't look related to what you intend to 
remove. Maybe your commit message should be updated?


>   	/* Restrict the stage to what we can actually support */
>   	smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
>   
> @@ -1483,40 +1397,17 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>   	case ARM_SMMU_DOMAIN_S2:
>   		ias = smmu->ias;
>   		oas = smmu->oas;
> -		fmt = ARM_64_LPAE_S2;
>   		finalise_stage_fn = arm_smmu_domain_finalise_s2;
>   		break;
>   	default:
>   		return -EINVAL;
>   	}
>   
> -	pgtbl_cfg = (struct io_pgtable_cfg) {
> -		.pgsize_bitmap	= smmu->pgsize_bitmap,
> -		.ias		= ias,
> -		.oas		= oas,
> -		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENCY,
> -		.tlb		= &arm_smmu_flush_ops,
> -		.iommu_dev	= smmu->dev,
> -	};
> -
> -	if (smmu_domain->non_strict)
> -		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
> -
> -	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
> -	if (!pgtbl_ops)
> -		return -ENOMEM;
> -
> -	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
> -	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
> -	domain->geometry.force_aperture = true;
> -
>   	ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
>   	if (ret < 0) {
> -		free_io_pgtable_ops(pgtbl_ops);
>   		return ret;
>   	}
>   
> -	smmu_domain->pgtbl_ops = pgtbl_ops;
>   	return 0;
>   }
>   
> @@ -1626,71 +1517,6 @@ out_unlock:
>   	return ret;
>   }
>   
> -static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
> -			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
> -{
> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
> -
> -	if (!ops)
> -		return -ENODEV;
> -
> -	return ops->map(ops, iova, paddr, size, prot, gfp);
> -}
> -
> -static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
> -			     size_t size, struct iommu_iotlb_gather *gather)
> -{
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
> -
> -	if (!ops)
> -		return 0;
> -
> -	return ops->unmap(ops, iova, size, gather);
> -}
> -
> -static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
> -{
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -
> -	if (smmu_domain->smmu)
> -		arm_smmu_tlb_inv_context(smmu_domain);
> -}
> -
> -static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
> -				struct iommu_iotlb_gather *gather)
> -{
> -	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
> -
> -	if (smmu)
> -		arm_smmu_cmdq_issue_sync(smmu);
> -}
> -
> -static phys_addr_t
> -arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
> -{
> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
> -
> -	if (domain->type == IOMMU_DOMAIN_IDENTITY)
> -		return iova;
> -
> -	if (!ops)
> -		return 0;
> -
> -	return ops->iova_to_phys(ops, iova);
> -}
> -
> -static struct platform_driver arm_smmu_driver;
> -
> -static
> -struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
> -{
> -	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
> -							  fwnode);
> -	put_device(dev);
> -	return dev ? dev_get_drvdata(dev) : NULL;
> -}
> -
>   static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>   {
>   	unsigned long limit = smmu->strtab_cfg.num_l1_ents;
> @@ -1701,206 +1527,6 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>   	return sid < limit;
>   }
>   
> -static struct iommu_ops arm_smmu_ops;
> -
> -static struct iommu_device *arm_smmu_probe_device(struct device *dev)
> -{

Most of the code here looks useful to Xen. I think you want to keep the 
code and re-use it afterwards.

> -	int i, ret;
> -	struct arm_smmu_device *smmu;
> -	struct arm_smmu_master *master;
> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
> -
> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
> -		return ERR_PTR(-ENODEV);
> -
> -	if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
> -		return ERR_PTR(-EBUSY);
> -
> -	smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
> -	if (!smmu)
> -		return ERR_PTR(-ENODEV);
> -
> -	master = kzalloc(sizeof(*master), GFP_KERNEL);
> -	if (!master)
> -		return ERR_PTR(-ENOMEM);
> -
> -	master->dev = dev;
> -	master->smmu = smmu;
> -	master->sids = fwspec->ids;
> -	master->num_sids = fwspec->num_ids;
> -	dev_iommu_priv_set(dev, master);
> -
> -	/* Check the SIDs are in range of the SMMU and our stream table */
> -	for (i = 0; i < master->num_sids; i++) {
> -		u32 sid = master->sids[i];
> -
> -		if (!arm_smmu_sid_in_range(smmu, sid)) {
> -			ret = -ERANGE;
> -			goto err_free_master;
> -		}
> -
> -		/* Ensure l2 strtab is initialised */
> -		if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
> -			ret = arm_smmu_init_l2_strtab(smmu, sid);
> -			if (ret)
> -				goto err_free_master;
> -		}
> -	}
> -
> -	return &smmu->iommu;
> -
> -err_free_master:
> -	kfree(master);
> -	dev_iommu_priv_set(dev, NULL);
> -	return ERR_PTR(ret);
> -}
> -
> -static void arm_smmu_release_device(struct device *dev)
> -{
> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
> -	struct arm_smmu_master *master;
> -
> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
> -		return;
> -
> -	master = dev_iommu_priv_get(dev);
> -	arm_smmu_detach_dev(master);
> -	kfree(master);
> -	iommu_fwspec_free(dev);
> -}
> -
> -static struct iommu_group *arm_smmu_device_group(struct device *dev)
> -{
> -	struct iommu_group *group;
> -
> -	/*
> -	 * We don't support devices sharing stream IDs other than PCI RID
> -	 * aliases, since the necessary ID-to-device lookup becomes rather
> -	 * impractical given a potential sparse 32-bit stream ID space.
> -	 */
> -	if (dev_is_pci(dev))
> -		group = pci_device_group(dev);
> -	else
> -		group = generic_device_group(dev);
> -
> -	return group;
> -}
> -
> -static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
> -				    enum iommu_attr attr, void *data)
> -{
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -
> -	switch (domain->type) {
> -	case IOMMU_DOMAIN_UNMANAGED:
> -		switch (attr) {
> -		case DOMAIN_ATTR_NESTING:
> -			*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
> -			return 0;
> -		default:
> -			return -ENODEV;
> -		}
> -		break;
> -	case IOMMU_DOMAIN_DMA:
> -		switch (attr) {
> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
> -			*(int *)data = smmu_domain->non_strict;
> -			return 0;
> -		default:
> -			return -ENODEV;
> -		}
> -		break;
> -	default:
> -		return -EINVAL;
> -	}
> -}
> -
> -static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
> -				    enum iommu_attr attr, void *data)
> -{
> -	int ret = 0;
> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
> -
> -	mutex_lock(&smmu_domain->init_mutex);
> -
> -	switch (domain->type) {
> -	case IOMMU_DOMAIN_UNMANAGED:
> -		switch (attr) {
> -		case DOMAIN_ATTR_NESTING:
> -			if (smmu_domain->smmu) {
> -				ret = -EPERM;
> -				goto out_unlock;
> -			}
> -
> -			if (*(int *)data)
> -				smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
> -			else
> -				smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
> -			break;
> -		default:
> -			ret = -ENODEV;
> -		}
> -		break;
> -	case IOMMU_DOMAIN_DMA:
> -		switch(attr) {
> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
> -			smmu_domain->non_strict = *(int *)data;
> -			break;
> -		default:
> -			ret = -ENODEV;
> -		}
> -		break;
> -	default:
> -		ret = -EINVAL;
> -	}
> -
> -out_unlock:
> -	mutex_unlock(&smmu_domain->init_mutex);
> -	return ret;
> -}
> -
> -static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
> -{
> -	return iommu_fwspec_add_ids(dev, args->args, 1);
> -}
> -
> -static void arm_smmu_get_resv_regions(struct device *dev,
> -				      struct list_head *head)
> -{
> -	struct iommu_resv_region *region;
> -	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
> -
> -	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
> -					 prot, IOMMU_RESV_SW_MSI);
> -	if (!region)
> -		return;
> -
> -	list_add_tail(&region->list, head);
> -
> -	iommu_dma_get_resv_regions(dev, head);
> -}
> -
> -static struct iommu_ops arm_smmu_ops = {
> -	.capable		= arm_smmu_capable,
> -	.domain_alloc		= arm_smmu_domain_alloc,
> -	.domain_free		= arm_smmu_domain_free,
> -	.attach_dev		= arm_smmu_attach_dev,
> -	.map			= arm_smmu_map,
> -	.unmap			= arm_smmu_unmap,
> -	.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
> -	.iotlb_sync		= arm_smmu_iotlb_sync,
> -	.iova_to_phys		= arm_smmu_iova_to_phys,
> -	.probe_device		= arm_smmu_probe_device,
> -	.release_device		= arm_smmu_release_device,
> -	.device_group		= arm_smmu_device_group,
> -	.domain_get_attr	= arm_smmu_domain_get_attr,
> -	.domain_set_attr	= arm_smmu_domain_set_attr,
> -	.of_xlate		= arm_smmu_of_xlate,
> -	.get_resv_regions	= arm_smmu_get_resv_regions,
> -	.put_resv_regions	= generic_iommu_put_resv_regions,
> -	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
> -};
> -
>   /* Probing and initialisation functions */
>   static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
>   				   struct arm_smmu_queue *q,
> @@ -2406,7 +2032,6 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>   	switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
>   	case IDR0_STALL_MODEL_FORCE:
>   		smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
> -		fallthrough;

We should keep all the fallthrough documented. So I think we want to 
introduce the fallthrough in Xen as well.

>   	case IDR0_STALL_MODEL_STALL:
>   		smmu->features |= ARM_SMMU_FEAT_STALLS;
>   	}
> @@ -2426,7 +2051,6 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>   	switch (FIELD_GET(IDR0_TTF, reg)) {
>   	case IDR0_TTF_AARCH32_64:
>   		smmu->ias = 40;
> -		fallthrough;
>   	case IDR0_TTF_AARCH64:
>   		break;
>   	default:
> @@ -2515,21 +2139,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>   	default:
>   		dev_info(smmu->dev,
>   			"unknown output address size. Truncating to 48-bit\n");
> -		fallthrough;
>   	case IDR5_OAS_48_BIT:
>   		smmu->oas = 48;
>   	}
>   
> -	if (arm_smmu_ops.pgsize_bitmap == -1UL)
> -		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
> -	else
> -		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
> -
> -	/* Set the DMA mask for our table walker */
> -	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
> -		dev_warn(smmu->dev,
> -			 "failed to set DMA mask for table walker\n");
> -
>   	smmu->ias = max(smmu->ias, smmu->oas);
>   
>   	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
> @@ -2595,9 +2208,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
>   
>   	parse_driver_options(smmu);
>   
> -	if (of_dma_is_coherent(dev->of_node))
> -		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
> -
>   	return ret;
>   }
>   
> @@ -2609,55 +2219,6 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
>   		return SZ_128K;
>   }
>   
> -static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
> -{
> -	int err;
> -
> -#ifdef CONFIG_PCI
> -	if (pci_bus_type.iommu_ops != ops) {
> -		err = bus_set_iommu(&pci_bus_type, ops);
> -		if (err)
> -			return err;
> -	}
> -#endif
> -#ifdef CONFIG_ARM_AMBA
> -	if (amba_bustype.iommu_ops != ops) {
> -		err = bus_set_iommu(&amba_bustype, ops);
> -		if (err)
> -			goto err_reset_pci_ops;
> -	}
> -#endif
> -	if (platform_bus_type.iommu_ops != ops) {
> -		err = bus_set_iommu(&platform_bus_type, ops);
> -		if (err)
> -			goto err_reset_amba_ops;
> -	}
> -
> -	return 0;
> -
> -err_reset_amba_ops:
> -#ifdef CONFIG_ARM_AMBA
> -	bus_set_iommu(&amba_bustype, NULL);
> -#endif
> -err_reset_pci_ops: __maybe_unused;
> -#ifdef CONFIG_PCI
> -	bus_set_iommu(&pci_bus_type, NULL);
> -#endif
> -	return err;
> -}
> -
> -static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
> -				      resource_size_t size)

This seems a bit odd that you are removing the function but not the 
callers. Shouldn't you keep this function until next patch?

> -{
> -	struct resource res = {
> -		.flags = IORESOURCE_MEM,
> -		.start = start,
> -		.end = start + size - 1,
> -	};
> -
> -	return devm_ioremap_resource(dev, &res);
> -}
> -
>   static int arm_smmu_device_probe(struct platform_device *pdev)
>   {
>   	int irq, ret;
> @@ -2785,21 +2346,3 @@ static const struct of_device_id arm_smmu_of_match[] = {
>   	{ .compatible = "arm,smmu-v3", },
>   	{ },
>   };
> -MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
> -
> -static struct platform_driver arm_smmu_driver = {
> -	.driver	= {
> -		.name			= "arm-smmu-v3",
> -		.of_match_table		= arm_smmu_of_match,
> -		.suppress_bind_attrs	= true,
> -	},
> -	.probe	= arm_smmu_device_probe,
> -	.remove	= arm_smmu_device_remove,
> -	.shutdown = arm_smmu_device_shutdown,
> -};
> -module_platform_driver(arm_smmu_driver);
> -
> -MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
> -MODULE_AUTHOR("Will Deacon <will@kernel.org>");
> -MODULE_ALIAS("platform:arm-smmu-v3");
> -MODULE_LICENSE("GPL v2");
>
Rahul Singh Dec. 3, 2020, 2:33 p.m. UTC | #5
Hello Julien,

> On 2 Dec 2020, at 2:45 pm, Julien Grall <julien@xen.org> wrote:
> 
> 
> 
> On 26/11/2020 17:02, Rahul Singh wrote:
>> struct io_pgtable_ops, struct io_pgtable_cfg, struct iommu_flush_ops,
>> and struct iommu_ops related code are linux specific.
> 
> So the assumption is we are going to support only sharing with the P2M and the IOMMU. That's probably fine short term, but long-term we are going to need unsharing the page-tables (there are issues on some platforms if the ITS doorbell is accessed by the processors).
> 
> I am ok with removing anything related to the unsharing code. But I think it should be clarified here.

Ok I will add in commit message what is removed from the code.
> 
>> Remove code related to above struct as code is dead code in XEN.
>> Signed-off-by: Rahul Singh <rahul.singh@arm.com>
>> ---
>>  xen/drivers/passthrough/arm/smmu-v3.c | 457 --------------------------
>>  1 file changed, 457 deletions(-)
>> diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c
>> index 40e3890a58..55d1cba194 100644
>> --- a/xen/drivers/passthrough/arm/smmu-v3.c
>> +++ b/xen/drivers/passthrough/arm/smmu-v3.c
>> @@ -402,13 +402,7 @@
>>  #define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US	1000000 /* 1s! */
>>  #define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT	10
>>  -#define MSI_IOVA_BASE			0x8000000
>> -#define MSI_IOVA_LENGTH			0x100000
>> -
>>  static bool disable_bypass = 1;
>> -module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
>> -MODULE_PARM_DESC(disable_bypass,
>> -	"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
> 
> Per your commit message, this doesn't look related to what you intend to remove. Maybe your commit message should be updated?

Ok.
> 
>>    enum pri_resp {
>>  	PRI_RESP_DENY = 0,
>> @@ -599,7 +593,6 @@ struct arm_smmu_domain {
>>  	struct arm_smmu_device		*smmu;
>>  	struct mutex			init_mutex; /* Protects smmu pointer */
>>  -	struct io_pgtable_ops		*pgtbl_ops;
>>  	bool				non_strict;
>>    	enum arm_smmu_domain_stage	stage;
>> @@ -1297,74 +1290,6 @@ static void arm_smmu_tlb_inv_context(void *cookie)
>>  	arm_smmu_cmdq_issue_sync(smmu);
>>  }
>>  -static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
>> -					  size_t granule, bool leaf, void *cookie)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = cookie;
>> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
>> -	struct arm_smmu_cmdq_ent cmd = {
>> -		.tlbi = {
>> -			.leaf	= leaf,
>> -			.addr	= iova,
>> -		},
>> -	};
>> -
>> -	cmd.opcode	= CMDQ_OP_TLBI_S2_IPA;
>> -	cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
>> -
>> -	do {
>> -		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
>> -		cmd.tlbi.addr += granule;
>> -	} while (size -= granule);
>> -}
>> -
>> -static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
>> -					 unsigned long iova, size_t granule,
>> -					 void *cookie)
>> -{
>> -	arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
>> -}
>> -
>> -static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
>> -				  size_t granule, void *cookie)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = cookie;
>> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
>> -
>> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
>> -	arm_smmu_cmdq_issue_sync(smmu);
>> -}
>> -
>> -static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
>> -				  size_t granule, void *cookie)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = cookie;
>> -	struct arm_smmu_device *smmu = smmu_domain->smmu;
>> -
>> -	arm_smmu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
>> -	arm_smmu_cmdq_issue_sync(smmu);
>> -}
>> -
>> -static const struct iommu_flush_ops arm_smmu_flush_ops = {
>> -	.tlb_flush_all	= arm_smmu_tlb_inv_context,
>> -	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
>> -	.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
>> -	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
>> -};
>> -
>> -/* IOMMU API */
>> -static bool arm_smmu_capable(enum iommu_cap cap)
>> -{
>> -	switch (cap) {
>> -	case IOMMU_CAP_CACHE_COHERENCY:
>> -		return true;
>> -	case IOMMU_CAP_NOEXEC:
>> -		return true;
>> -	default:
>> -		return false;
>> -	}
>> -}
>> -
>>  static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
>>  {
>>  	struct arm_smmu_domain *smmu_domain;
>> @@ -1421,7 +1346,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>>  	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
>>    	iommu_put_dma_cookie(domain);
>> -	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
>>    	if (cfg->vmid)
>>  		arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
>> @@ -1429,7 +1353,6 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
>>  	kfree(smmu_domain);
>>  }
>>  -
> 
> Looks like a spurious change.

Ack. 
> 
>>  static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>>  				       struct arm_smmu_master *master,
>>  				       struct io_pgtable_cfg *pgtbl_cfg)
> 
> You commit message leads to think that all the use of struct io_pgtable_cfg will be removed.

Ok I will remove it.
> 
>> @@ -1437,7 +1360,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
>>  	int vmid;
>>  	struct arm_smmu_device *smmu = smmu_domain->smmu;
>>  	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
>> -	typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
> 
> It feels a bit odd that the definition of 'vtcr' is removed but there are still users.

Ack. Will fix that.
> 
>>    	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
>>  	if (vmid < 0)
>> @@ -1461,20 +1383,12 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>>  {
>>  	int ret;
>>  	unsigned long ias, oas;
>> -	enum io_pgtable_fmt fmt;
>> -	struct io_pgtable_cfg pgtbl_cfg;
>> -	struct io_pgtable_ops *pgtbl_ops;
>>  	int (*finalise_stage_fn)(struct arm_smmu_domain *,
>>  				 struct arm_smmu_master *,
>>  				 struct io_pgtable_cfg *);
>>  	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>>  	struct arm_smmu_device *smmu = smmu_domain->smmu;
>>  -	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
>> -		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
>> -		return 0;
>> -	}
>> -
> 
> Per your commit message, this doesn't look related to what you intend to remove. Maybe your commit message should be updated?

Ok I will update the commit message.
> 
>>  	/* Restrict the stage to what we can actually support */
>>  	smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
>>  @@ -1483,40 +1397,17 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
>>  	case ARM_SMMU_DOMAIN_S2:
>>  		ias = smmu->ias;
>>  		oas = smmu->oas;
>> -		fmt = ARM_64_LPAE_S2;
>>  		finalise_stage_fn = arm_smmu_domain_finalise_s2;
>>  		break;
>>  	default:
>>  		return -EINVAL;
>>  	}
>>  -	pgtbl_cfg = (struct io_pgtable_cfg) {
>> -		.pgsize_bitmap	= smmu->pgsize_bitmap,
>> -		.ias		= ias,
>> -		.oas		= oas,
>> -		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENCY,
>> -		.tlb		= &arm_smmu_flush_ops,
>> -		.iommu_dev	= smmu->dev,
>> -	};
>> -
>> -	if (smmu_domain->non_strict)
>> -		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
>> -
>> -	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
>> -	if (!pgtbl_ops)
>> -		return -ENOMEM;
>> -
>> -	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
>> -	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
>> -	domain->geometry.force_aperture = true;
>> -
>>  	ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
>>  	if (ret < 0) {
>> -		free_io_pgtable_ops(pgtbl_ops);
>>  		return ret;
>>  	}
>>  -	smmu_domain->pgtbl_ops = pgtbl_ops;
>>  	return 0;
>>  }
>>  @@ -1626,71 +1517,6 @@ out_unlock:
>>  	return ret;
>>  }
>>  -static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
>> -			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
>> -{
>> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
>> -
>> -	if (!ops)
>> -		return -ENODEV;
>> -
>> -	return ops->map(ops, iova, paddr, size, prot, gfp);
>> -}
>> -
>> -static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
>> -			     size_t size, struct iommu_iotlb_gather *gather)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>> -	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
>> -
>> -	if (!ops)
>> -		return 0;
>> -
>> -	return ops->unmap(ops, iova, size, gather);
>> -}
>> -
>> -static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>> -
>> -	if (smmu_domain->smmu)
>> -		arm_smmu_tlb_inv_context(smmu_domain);
>> -}
>> -
>> -static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
>> -				struct iommu_iotlb_gather *gather)
>> -{
>> -	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
>> -
>> -	if (smmu)
>> -		arm_smmu_cmdq_issue_sync(smmu);
>> -}
>> -
>> -static phys_addr_t
>> -arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
>> -{
>> -	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
>> -
>> -	if (domain->type == IOMMU_DOMAIN_IDENTITY)
>> -		return iova;
>> -
>> -	if (!ops)
>> -		return 0;
>> -
>> -	return ops->iova_to_phys(ops, iova);
>> -}
>> -
>> -static struct platform_driver arm_smmu_driver;
>> -
>> -static
>> -struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
>> -{
>> -	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
>> -							  fwnode);
>> -	put_device(dev);
>> -	return dev ? dev_get_drvdata(dev) : NULL;
>> -}
>> -
>>  static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>>  {
>>  	unsigned long limit = smmu->strtab_cfg.num_l1_ents;
>> @@ -1701,206 +1527,6 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
>>  	return sid < limit;
>>  }
>>  -static struct iommu_ops arm_smmu_ops;
>> -
>> -static struct iommu_device *arm_smmu_probe_device(struct device *dev)
>> -{
> 
> Most of the code here looks useful to Xen. I think you want to keep the code and re-use it afterwards.

Ok. I removed the code here and added the XEN compatible code to add devices in next patch.
I will keep it in this patch and will modifying the code to make XEN compatible.

> 
>> -	int i, ret;
>> -	struct arm_smmu_device *smmu;
>> -	struct arm_smmu_master *master;
>> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
>> -
>> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
>> -		return ERR_PTR(-ENODEV);
>> -
>> -	if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
>> -		return ERR_PTR(-EBUSY);
>> -
>> -	smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
>> -	if (!smmu)
>> -		return ERR_PTR(-ENODEV);
>> -
>> -	master = kzalloc(sizeof(*master), GFP_KERNEL);
>> -	if (!master)
>> -		return ERR_PTR(-ENOMEM);
>> -
>> -	master->dev = dev;
>> -	master->smmu = smmu;
>> -	master->sids = fwspec->ids;
>> -	master->num_sids = fwspec->num_ids;
>> -	dev_iommu_priv_set(dev, master);
>> -
>> -	/* Check the SIDs are in range of the SMMU and our stream table */
>> -	for (i = 0; i < master->num_sids; i++) {
>> -		u32 sid = master->sids[i];
>> -
>> -		if (!arm_smmu_sid_in_range(smmu, sid)) {
>> -			ret = -ERANGE;
>> -			goto err_free_master;
>> -		}
>> -
>> -		/* Ensure l2 strtab is initialised */
>> -		if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
>> -			ret = arm_smmu_init_l2_strtab(smmu, sid);
>> -			if (ret)
>> -				goto err_free_master;
>> -		}
>> -	}
>> -
>> -	return &smmu->iommu;
>> -
>> -err_free_master:
>> -	kfree(master);
>> -	dev_iommu_priv_set(dev, NULL);
>> -	return ERR_PTR(ret);
>> -}
>> -
>> -static void arm_smmu_release_device(struct device *dev)
>> -{
>> -	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
>> -	struct arm_smmu_master *master;
>> -
>> -	if (!fwspec || fwspec->ops != &arm_smmu_ops)
>> -		return;
>> -
>> -	master = dev_iommu_priv_get(dev);
>> -	arm_smmu_detach_dev(master);
>> -	kfree(master);
>> -	iommu_fwspec_free(dev);
>> -}
>> -
>> -static struct iommu_group *arm_smmu_device_group(struct device *dev)
>> -{
>> -	struct iommu_group *group;
>> -
>> -	/*
>> -	 * We don't support devices sharing stream IDs other than PCI RID
>> -	 * aliases, since the necessary ID-to-device lookup becomes rather
>> -	 * impractical given a potential sparse 32-bit stream ID space.
>> -	 */
>> -	if (dev_is_pci(dev))
>> -		group = pci_device_group(dev);
>> -	else
>> -		group = generic_device_group(dev);
>> -
>> -	return group;
>> -}
>> -
>> -static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
>> -				    enum iommu_attr attr, void *data)
>> -{
>> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>> -
>> -	switch (domain->type) {
>> -	case IOMMU_DOMAIN_UNMANAGED:
>> -		switch (attr) {
>> -		case DOMAIN_ATTR_NESTING:
>> -			*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
>> -			return 0;
>> -		default:
>> -			return -ENODEV;
>> -		}
>> -		break;
>> -	case IOMMU_DOMAIN_DMA:
>> -		switch (attr) {
>> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
>> -			*(int *)data = smmu_domain->non_strict;
>> -			return 0;
>> -		default:
>> -			return -ENODEV;
>> -		}
>> -		break;
>> -	default:
>> -		return -EINVAL;
>> -	}
>> -}
>> -
>> -static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
>> -				    enum iommu_attr attr, void *data)
>> -{
>> -	int ret = 0;
>> -	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
>> -
>> -	mutex_lock(&smmu_domain->init_mutex);
>> -
>> -	switch (domain->type) {
>> -	case IOMMU_DOMAIN_UNMANAGED:
>> -		switch (attr) {
>> -		case DOMAIN_ATTR_NESTING:
>> -			if (smmu_domain->smmu) {
>> -				ret = -EPERM;
>> -				goto out_unlock;
>> -			}
>> -
>> -			if (*(int *)data)
>> -				smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
>> -			else
>> -				smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
>> -			break;
>> -		default:
>> -			ret = -ENODEV;
>> -		}
>> -		break;
>> -	case IOMMU_DOMAIN_DMA:
>> -		switch(attr) {
>> -		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
>> -			smmu_domain->non_strict = *(int *)data;
>> -			break;
>> -		default:
>> -			ret = -ENODEV;
>> -		}
>> -		break;
>> -	default:
>> -		ret = -EINVAL;
>> -	}
>> -
>> -out_unlock:
>> -	mutex_unlock(&smmu_domain->init_mutex);
>> -	return ret;
>> -}
>> -
>> -static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
>> -{
>> -	return iommu_fwspec_add_ids(dev, args->args, 1);
>> -}
>> -
>> -static void arm_smmu_get_resv_regions(struct device *dev,
>> -				      struct list_head *head)
>> -{
>> -	struct iommu_resv_region *region;
>> -	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
>> -
>> -	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
>> -					 prot, IOMMU_RESV_SW_MSI);
>> -	if (!region)
>> -		return;
>> -
>> -	list_add_tail(&region->list, head);
>> -
>> -	iommu_dma_get_resv_regions(dev, head);
>> -}
>> -
>> -static struct iommu_ops arm_smmu_ops = {
>> -	.capable		= arm_smmu_capable,
>> -	.domain_alloc		= arm_smmu_domain_alloc,
>> -	.domain_free		= arm_smmu_domain_free,
>> -	.attach_dev		= arm_smmu_attach_dev,
>> -	.map			= arm_smmu_map,
>> -	.unmap			= arm_smmu_unmap,
>> -	.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
>> -	.iotlb_sync		= arm_smmu_iotlb_sync,
>> -	.iova_to_phys		= arm_smmu_iova_to_phys,
>> -	.probe_device		= arm_smmu_probe_device,
>> -	.release_device		= arm_smmu_release_device,
>> -	.device_group		= arm_smmu_device_group,
>> -	.domain_get_attr	= arm_smmu_domain_get_attr,
>> -	.domain_set_attr	= arm_smmu_domain_set_attr,
>> -	.of_xlate		= arm_smmu_of_xlate,
>> -	.get_resv_regions	= arm_smmu_get_resv_regions,
>> -	.put_resv_regions	= generic_iommu_put_resv_regions,
>> -	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
>> -};
>> -
>>  /* Probing and initialisation functions */
>>  static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
>>  				   struct arm_smmu_queue *q,
>> @@ -2406,7 +2032,6 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>>  	switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
>>  	case IDR0_STALL_MODEL_FORCE:
>>  		smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
>> -		fallthrough;
> 
> We should keep all the fallthrough documented. So I think we want to introduce the fallthrough in Xen as well.

Ok I will keep fallthrough documented in this patch. 

fallthrough implementation in XEN should be another patch. I am not sure when we can implement but we will try to implement.

> 
>>  	case IDR0_STALL_MODEL_STALL:
>>  		smmu->features |= ARM_SMMU_FEAT_STALLS;
>>  	}
>> @@ -2426,7 +2051,6 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>>  	switch (FIELD_GET(IDR0_TTF, reg)) {
>>  	case IDR0_TTF_AARCH32_64:
>>  		smmu->ias = 40;
>> -		fallthrough;
>>  	case IDR0_TTF_AARCH64:
>>  		break;
>>  	default:
>> @@ -2515,21 +2139,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>>  	default:
>>  		dev_info(smmu->dev,
>>  			"unknown output address size. Truncating to 48-bit\n");
>> -		fallthrough;
>>  	case IDR5_OAS_48_BIT:
>>  		smmu->oas = 48;
>>  	}
>>  -	if (arm_smmu_ops.pgsize_bitmap == -1UL)
>> -		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
>> -	else
>> -		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
>> -
>> -	/* Set the DMA mask for our table walker */
>> -	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
>> -		dev_warn(smmu->dev,
>> -			 "failed to set DMA mask for table walker\n");
>> -
>>  	smmu->ias = max(smmu->ias, smmu->oas);
>>    	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
>> @@ -2595,9 +2208,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
>>    	parse_driver_options(smmu);
>>  -	if (of_dma_is_coherent(dev->of_node))
>> -		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
>> -
>>  	return ret;
>>  }
>>  @@ -2609,55 +2219,6 @@ static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
>>  		return SZ_128K;
>>  }
>>  -static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
>> -{
>> -	int err;
>> -
>> -#ifdef CONFIG_PCI
>> -	if (pci_bus_type.iommu_ops != ops) {
>> -		err = bus_set_iommu(&pci_bus_type, ops);
>> -		if (err)
>> -			return err;
>> -	}
>> -#endif
>> -#ifdef CONFIG_ARM_AMBA
>> -	if (amba_bustype.iommu_ops != ops) {
>> -		err = bus_set_iommu(&amba_bustype, ops);
>> -		if (err)
>> -			goto err_reset_pci_ops;
>> -	}
>> -#endif
>> -	if (platform_bus_type.iommu_ops != ops) {
>> -		err = bus_set_iommu(&platform_bus_type, ops);
>> -		if (err)
>> -			goto err_reset_amba_ops;
>> -	}
>> -
>> -	return 0;
>> -
>> -err_reset_amba_ops:
>> -#ifdef CONFIG_ARM_AMBA
>> -	bus_set_iommu(&amba_bustype, NULL);
>> -#endif
>> -err_reset_pci_ops: __maybe_unused;
>> -#ifdef CONFIG_PCI
>> -	bus_set_iommu(&pci_bus_type, NULL);
>> -#endif
>> -	return err;
>> -}
>> -
>> -static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
>> -				      resource_size_t size)
> 
> This seems a bit odd that you are removing the function but not the callers. Shouldn't you keep this function until next patch?
> 
Ok I will keep this in this patch and will remove in next patch.

>> -{
>> -	struct resource res = {
>> -		.flags = IORESOURCE_MEM,
>> -		.start = start,
>> -		.end = start + size - 1,
>> -	};
>> -
>> -	return devm_ioremap_resource(dev, &res);
>> -}
>> -
>>  static int arm_smmu_device_probe(struct platform_device *pdev)
>>  {
>>  	int irq, ret;
>> @@ -2785,21 +2346,3 @@ static const struct of_device_id arm_smmu_of_match[] = {
>>  	{ .compatible = "arm,smmu-v3", },
>>  	{ },
>>  };
>> -MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
>> -
>> -static struct platform_driver arm_smmu_driver = {
>> -	.driver	= {
>> -		.name			= "arm-smmu-v3",
>> -		.of_match_table		= arm_smmu_of_match,
>> -		.suppress_bind_attrs	= true,
>> -	},
>> -	.probe	= arm_smmu_device_probe,
>> -	.remove	= arm_smmu_device_remove,
>> -	.shutdown = arm_smmu_device_shutdown,
>> -};
>> -module_platform_driver(arm_smmu_driver);
>> -
>> -MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
>> -MODULE_AUTHOR("Will Deacon <will@kernel.org>");
>> -MODULE_ALIAS("platform:arm-smmu-v3");
>> -MODULE_LICENSE("GPL v2");
> 
> -- 
> Julien Grall
Julien Grall Dec. 4, 2020, 9:05 a.m. UTC | #6
Hi Rahul,

On 03/12/2020 14:33, Rahul Singh wrote:
>> On 2 Dec 2020, at 2:45 pm, Julien Grall <julien@xen.org> wrote:
>>> -
>>> -static struct iommu_device *arm_smmu_probe_device(struct device *dev)
>>> -{
>>
>> Most of the code here looks useful to Xen. I think you want to keep the code and re-use it afterwards.
> 
> Ok. I removed the code here and added the XEN compatible code to add devices in next patch.
> I will keep it in this patch and will modifying the code to make XEN compatible.

In general, it is prefer if the code the code rather than dropping in 
patch A and then add it again differently patch B. This makes easier to 
check that the code outcome of the function is mostly the same.

>>> -static struct iommu_ops arm_smmu_ops = {
>>> -	.capable		= arm_smmu_capable,
>>> -	.domain_alloc		= arm_smmu_domain_alloc,
>>> -	.domain_free		= arm_smmu_domain_free,
>>> -	.attach_dev		= arm_smmu_attach_dev,
>>> -	.map			= arm_smmu_map,
>>> -	.unmap			= arm_smmu_unmap,
>>> -	.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
>>> -	.iotlb_sync		= arm_smmu_iotlb_sync,
>>> -	.iova_to_phys		= arm_smmu_iova_to_phys,
>>> -	.probe_device		= arm_smmu_probe_device,
>>> -	.release_device		= arm_smmu_release_device,
>>> -	.device_group		= arm_smmu_device_group,
>>> -	.domain_get_attr	= arm_smmu_domain_get_attr,
>>> -	.domain_set_attr	= arm_smmu_domain_set_attr,
>>> -	.of_xlate		= arm_smmu_of_xlate,
>>> -	.get_resv_regions	= arm_smmu_get_resv_regions,
>>> -	.put_resv_regions	= generic_iommu_put_resv_regions,
>>> -	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
>>> -};
>>> -
>>>   /* Probing and initialisation functions */
>>>   static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
>>>   				   struct arm_smmu_queue *q,
>>> @@ -2406,7 +2032,6 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>>>   	switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
>>>   	case IDR0_STALL_MODEL_FORCE:
>>>   		smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
>>> -		fallthrough;
>>
>> We should keep all the fallthrough documented. So I think we want to introduce the fallthrough in Xen as well.
> 
> Ok I will keep fallthrough documented in this patch.
> 
> fallthrough implementation in XEN should be another patch. I am not sure when we can implement but we will try to implement.

Yes, I didn't ask to implement "fallthrough" in this patch, but instead 
as a pre-requirement patch.

I would implement it in include/xen/compiler.h.

Cheers,
Rahul Singh Dec. 7, 2020, 10:36 a.m. UTC | #7
Hello Julien,	

> On 4 Dec 2020, at 9:05 am, Julien Grall <julien@xen.org> wrote:
> 
> Hi Rahul,
> 
> On 03/12/2020 14:33, Rahul Singh wrote:
>>> On 2 Dec 2020, at 2:45 pm, Julien Grall <julien@xen.org> wrote:
>>>> -
>>>> -static struct iommu_device *arm_smmu_probe_device(struct device *dev)
>>>> -{
>>> 
>>> Most of the code here looks useful to Xen. I think you want to keep the code and re-use it afterwards.
>> Ok. I removed the code here and added the XEN compatible code to add devices in next patch.
>> I will keep it in this patch and will modifying the code to make XEN compatible.
> 
> In general, it is prefer if the code the code rather than dropping in patch A and then add it again differently patch B. This makes easier to check that the code outcome of the function is mostly the same.
> 

Ok.

>>>> -static struct iommu_ops arm_smmu_ops = {
>>>> -	.capable		= arm_smmu_capable,
>>>> -	.domain_alloc		= arm_smmu_domain_alloc,
>>>> -	.domain_free		= arm_smmu_domain_free,
>>>> -	.attach_dev		= arm_smmu_attach_dev,
>>>> -	.map			= arm_smmu_map,
>>>> -	.unmap			= arm_smmu_unmap,
>>>> -	.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
>>>> -	.iotlb_sync		= arm_smmu_iotlb_sync,
>>>> -	.iova_to_phys		= arm_smmu_iova_to_phys,
>>>> -	.probe_device		= arm_smmu_probe_device,
>>>> -	.release_device		= arm_smmu_release_device,
>>>> -	.device_group		= arm_smmu_device_group,
>>>> -	.domain_get_attr	= arm_smmu_domain_get_attr,
>>>> -	.domain_set_attr	= arm_smmu_domain_set_attr,
>>>> -	.of_xlate		= arm_smmu_of_xlate,
>>>> -	.get_resv_regions	= arm_smmu_get_resv_regions,
>>>> -	.put_resv_regions	= generic_iommu_put_resv_regions,
>>>> -	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
>>>> -};
>>>> -
>>>>  /* Probing and initialisation functions */
>>>>  static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
>>>>  				   struct arm_smmu_queue *q,
>>>> @@ -2406,7 +2032,6 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
>>>>  	switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
>>>>  	case IDR0_STALL_MODEL_FORCE:
>>>>  		smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
>>>> -		fallthrough;
>>> 
>>> We should keep all the fallthrough documented. So I think we want to introduce the fallthrough in Xen as well.
>> Ok I will keep fallthrough documented in this patch.
>> fallthrough implementation in XEN should be another patch. I am not sure when we can implement but we will try to implement.
> 
> Yes, I didn't ask to implement "fallthrough" in this patch, but instead as a pre-requirement patch.
> 
> I would implement it in include/xen/compiler.h.

Ok. I will implement and will share the patch for “__attribute__((__fallthrough__)) ” but for SMMUv3 is that ok if we can proceed with “ /* fallthrough */  ". 

As “fallthorugh” implementation is common for all architecture it required review for all stakeholder as per my understanding. I don’t want to block SMMUv3 because of this.

Regards,
Rahul
 
> 
> Cheers,
> 
> -- 
> Julien Grall
Julien Grall Dec. 7, 2020, 10:55 a.m. UTC | #8
On 07/12/2020 10:36, Rahul Singh wrote:
>> I would implement it in include/xen/compiler.h.
> 
> Ok. I will implement and will share the patch for “__attribute__((__fallthrough__)) ” but for SMMUv3 is that ok if we can proceed with “ /* fallthrough */  ".
The first approach should always be to work with the community to 
introduce such functionality tree-wide. Note that I am not suggesting to 
replace existing "/* fallthrough */" with "fallthrough".

If it is taking too long or the task is significant, then we can discuss 
about way to temporally work around it.

I don't believe we are at that stage yet here.

> As “fallthorugh” implementation is common for all architecture it required review for all stakeholder as per my understanding. I don’t want to block SMMUv3 because of this.

It only requires review from "THE REST" maintainers.

Cheers,
diff mbox series

Patch

diff --git a/xen/drivers/passthrough/arm/smmu-v3.c b/xen/drivers/passthrough/arm/smmu-v3.c
index 40e3890a58..55d1cba194 100644
--- a/xen/drivers/passthrough/arm/smmu-v3.c
+++ b/xen/drivers/passthrough/arm/smmu-v3.c
@@ -402,13 +402,7 @@ 
 #define ARM_SMMU_CMDQ_SYNC_TIMEOUT_US	1000000 /* 1s! */
 #define ARM_SMMU_CMDQ_SYNC_SPIN_COUNT	10
 
-#define MSI_IOVA_BASE			0x8000000
-#define MSI_IOVA_LENGTH			0x100000
-
 static bool disable_bypass = 1;
-module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
-MODULE_PARM_DESC(disable_bypass,
-	"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
 
 enum pri_resp {
 	PRI_RESP_DENY = 0,
@@ -599,7 +593,6 @@  struct arm_smmu_domain {
 	struct arm_smmu_device		*smmu;
 	struct mutex			init_mutex; /* Protects smmu pointer */
 
-	struct io_pgtable_ops		*pgtbl_ops;
 	bool				non_strict;
 
 	enum arm_smmu_domain_stage	stage;
@@ -1297,74 +1290,6 @@  static void arm_smmu_tlb_inv_context(void *cookie)
 	arm_smmu_cmdq_issue_sync(smmu);
 }
 
-static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
-					  size_t granule, bool leaf, void *cookie)
-{
-	struct arm_smmu_domain *smmu_domain = cookie;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
-	struct arm_smmu_cmdq_ent cmd = {
-		.tlbi = {
-			.leaf	= leaf,
-			.addr	= iova,
-		},
-	};
-
-	cmd.opcode	= CMDQ_OP_TLBI_S2_IPA;
-	cmd.tlbi.vmid	= smmu_domain->s2_cfg.vmid;
-
-	do {
-		arm_smmu_cmdq_issue_cmd(smmu, &cmd);
-		cmd.tlbi.addr += granule;
-	} while (size -= granule);
-}
-
-static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather,
-					 unsigned long iova, size_t granule,
-					 void *cookie)
-{
-	arm_smmu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
-}
-
-static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
-				  size_t granule, void *cookie)
-{
-	struct arm_smmu_domain *smmu_domain = cookie;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
-
-	arm_smmu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
-	arm_smmu_cmdq_issue_sync(smmu);
-}
-
-static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
-				  size_t granule, void *cookie)
-{
-	struct arm_smmu_domain *smmu_domain = cookie;
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
-
-	arm_smmu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
-	arm_smmu_cmdq_issue_sync(smmu);
-}
-
-static const struct iommu_flush_ops arm_smmu_flush_ops = {
-	.tlb_flush_all	= arm_smmu_tlb_inv_context,
-	.tlb_flush_walk = arm_smmu_tlb_inv_walk,
-	.tlb_flush_leaf = arm_smmu_tlb_inv_leaf,
-	.tlb_add_page	= arm_smmu_tlb_inv_page_nosync,
-};
-
-/* IOMMU API */
-static bool arm_smmu_capable(enum iommu_cap cap)
-{
-	switch (cap) {
-	case IOMMU_CAP_CACHE_COHERENCY:
-		return true;
-	case IOMMU_CAP_NOEXEC:
-		return true;
-	default:
-		return false;
-	}
-}
-
 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
 {
 	struct arm_smmu_domain *smmu_domain;
@@ -1421,7 +1346,6 @@  static void arm_smmu_domain_free(struct iommu_domain *domain)
 	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
 
 	iommu_put_dma_cookie(domain);
-	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
 
 	if (cfg->vmid)
 		arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
@@ -1429,7 +1353,6 @@  static void arm_smmu_domain_free(struct iommu_domain *domain)
 	kfree(smmu_domain);
 }
 
-
 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
 				       struct arm_smmu_master *master,
 				       struct io_pgtable_cfg *pgtbl_cfg)
@@ -1437,7 +1360,6 @@  static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
 	int vmid;
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
-	typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
 
 	vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
 	if (vmid < 0)
@@ -1461,20 +1383,12 @@  static int arm_smmu_domain_finalise(struct iommu_domain *domain,
 {
 	int ret;
 	unsigned long ias, oas;
-	enum io_pgtable_fmt fmt;
-	struct io_pgtable_cfg pgtbl_cfg;
-	struct io_pgtable_ops *pgtbl_ops;
 	int (*finalise_stage_fn)(struct arm_smmu_domain *,
 				 struct arm_smmu_master *,
 				 struct io_pgtable_cfg *);
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 
-	if (domain->type == IOMMU_DOMAIN_IDENTITY) {
-		smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
-		return 0;
-	}
-
 	/* Restrict the stage to what we can actually support */
 	smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
 
@@ -1483,40 +1397,17 @@  static int arm_smmu_domain_finalise(struct iommu_domain *domain,
 	case ARM_SMMU_DOMAIN_S2:
 		ias = smmu->ias;
 		oas = smmu->oas;
-		fmt = ARM_64_LPAE_S2;
 		finalise_stage_fn = arm_smmu_domain_finalise_s2;
 		break;
 	default:
 		return -EINVAL;
 	}
 
-	pgtbl_cfg = (struct io_pgtable_cfg) {
-		.pgsize_bitmap	= smmu->pgsize_bitmap,
-		.ias		= ias,
-		.oas		= oas,
-		.coherent_walk	= smmu->features & ARM_SMMU_FEAT_COHERENCY,
-		.tlb		= &arm_smmu_flush_ops,
-		.iommu_dev	= smmu->dev,
-	};
-
-	if (smmu_domain->non_strict)
-		pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
-
-	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
-	if (!pgtbl_ops)
-		return -ENOMEM;
-
-	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
-	domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
-	domain->geometry.force_aperture = true;
-
 	ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
 	if (ret < 0) {
-		free_io_pgtable_ops(pgtbl_ops);
 		return ret;
 	}
 
-	smmu_domain->pgtbl_ops = pgtbl_ops;
 	return 0;
 }
 
@@ -1626,71 +1517,6 @@  out_unlock:
 	return ret;
 }
 
-static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
-			phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
-{
-	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
-
-	if (!ops)
-		return -ENODEV;
-
-	return ops->map(ops, iova, paddr, size, prot, gfp);
-}
-
-static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
-			     size_t size, struct iommu_iotlb_gather *gather)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
-
-	if (!ops)
-		return 0;
-
-	return ops->unmap(ops, iova, size, gather);
-}
-
-static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-	if (smmu_domain->smmu)
-		arm_smmu_tlb_inv_context(smmu_domain);
-}
-
-static void arm_smmu_iotlb_sync(struct iommu_domain *domain,
-				struct iommu_iotlb_gather *gather)
-{
-	struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
-
-	if (smmu)
-		arm_smmu_cmdq_issue_sync(smmu);
-}
-
-static phys_addr_t
-arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
-{
-	struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
-
-	if (domain->type == IOMMU_DOMAIN_IDENTITY)
-		return iova;
-
-	if (!ops)
-		return 0;
-
-	return ops->iova_to_phys(ops, iova);
-}
-
-static struct platform_driver arm_smmu_driver;
-
-static
-struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
-{
-	struct device *dev = driver_find_device_by_fwnode(&arm_smmu_driver.driver,
-							  fwnode);
-	put_device(dev);
-	return dev ? dev_get_drvdata(dev) : NULL;
-}
-
 static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
 {
 	unsigned long limit = smmu->strtab_cfg.num_l1_ents;
@@ -1701,206 +1527,6 @@  static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
 	return sid < limit;
 }
 
-static struct iommu_ops arm_smmu_ops;
-
-static struct iommu_device *arm_smmu_probe_device(struct device *dev)
-{
-	int i, ret;
-	struct arm_smmu_device *smmu;
-	struct arm_smmu_master *master;
-	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
-	if (!fwspec || fwspec->ops != &arm_smmu_ops)
-		return ERR_PTR(-ENODEV);
-
-	if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
-		return ERR_PTR(-EBUSY);
-
-	smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
-	if (!smmu)
-		return ERR_PTR(-ENODEV);
-
-	master = kzalloc(sizeof(*master), GFP_KERNEL);
-	if (!master)
-		return ERR_PTR(-ENOMEM);
-
-	master->dev = dev;
-	master->smmu = smmu;
-	master->sids = fwspec->ids;
-	master->num_sids = fwspec->num_ids;
-	dev_iommu_priv_set(dev, master);
-
-	/* Check the SIDs are in range of the SMMU and our stream table */
-	for (i = 0; i < master->num_sids; i++) {
-		u32 sid = master->sids[i];
-
-		if (!arm_smmu_sid_in_range(smmu, sid)) {
-			ret = -ERANGE;
-			goto err_free_master;
-		}
-
-		/* Ensure l2 strtab is initialised */
-		if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
-			ret = arm_smmu_init_l2_strtab(smmu, sid);
-			if (ret)
-				goto err_free_master;
-		}
-	}
-
-	return &smmu->iommu;
-
-err_free_master:
-	kfree(master);
-	dev_iommu_priv_set(dev, NULL);
-	return ERR_PTR(ret);
-}
-
-static void arm_smmu_release_device(struct device *dev)
-{
-	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-	struct arm_smmu_master *master;
-
-	if (!fwspec || fwspec->ops != &arm_smmu_ops)
-		return;
-
-	master = dev_iommu_priv_get(dev);
-	arm_smmu_detach_dev(master);
-	kfree(master);
-	iommu_fwspec_free(dev);
-}
-
-static struct iommu_group *arm_smmu_device_group(struct device *dev)
-{
-	struct iommu_group *group;
-
-	/*
-	 * We don't support devices sharing stream IDs other than PCI RID
-	 * aliases, since the necessary ID-to-device lookup becomes rather
-	 * impractical given a potential sparse 32-bit stream ID space.
-	 */
-	if (dev_is_pci(dev))
-		group = pci_device_group(dev);
-	else
-		group = generic_device_group(dev);
-
-	return group;
-}
-
-static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
-				    enum iommu_attr attr, void *data)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-	switch (domain->type) {
-	case IOMMU_DOMAIN_UNMANAGED:
-		switch (attr) {
-		case DOMAIN_ATTR_NESTING:
-			*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
-			return 0;
-		default:
-			return -ENODEV;
-		}
-		break;
-	case IOMMU_DOMAIN_DMA:
-		switch (attr) {
-		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-			*(int *)data = smmu_domain->non_strict;
-			return 0;
-		default:
-			return -ENODEV;
-		}
-		break;
-	default:
-		return -EINVAL;
-	}
-}
-
-static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
-				    enum iommu_attr attr, void *data)
-{
-	int ret = 0;
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
-	mutex_lock(&smmu_domain->init_mutex);
-
-	switch (domain->type) {
-	case IOMMU_DOMAIN_UNMANAGED:
-		switch (attr) {
-		case DOMAIN_ATTR_NESTING:
-			if (smmu_domain->smmu) {
-				ret = -EPERM;
-				goto out_unlock;
-			}
-
-			if (*(int *)data)
-				smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
-			else
-				smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-			break;
-		default:
-			ret = -ENODEV;
-		}
-		break;
-	case IOMMU_DOMAIN_DMA:
-		switch(attr) {
-		case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-			smmu_domain->non_strict = *(int *)data;
-			break;
-		default:
-			ret = -ENODEV;
-		}
-		break;
-	default:
-		ret = -EINVAL;
-	}
-
-out_unlock:
-	mutex_unlock(&smmu_domain->init_mutex);
-	return ret;
-}
-
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
-{
-	return iommu_fwspec_add_ids(dev, args->args, 1);
-}
-
-static void arm_smmu_get_resv_regions(struct device *dev,
-				      struct list_head *head)
-{
-	struct iommu_resv_region *region;
-	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
-
-	region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
-					 prot, IOMMU_RESV_SW_MSI);
-	if (!region)
-		return;
-
-	list_add_tail(&region->list, head);
-
-	iommu_dma_get_resv_regions(dev, head);
-}
-
-static struct iommu_ops arm_smmu_ops = {
-	.capable		= arm_smmu_capable,
-	.domain_alloc		= arm_smmu_domain_alloc,
-	.domain_free		= arm_smmu_domain_free,
-	.attach_dev		= arm_smmu_attach_dev,
-	.map			= arm_smmu_map,
-	.unmap			= arm_smmu_unmap,
-	.flush_iotlb_all	= arm_smmu_flush_iotlb_all,
-	.iotlb_sync		= arm_smmu_iotlb_sync,
-	.iova_to_phys		= arm_smmu_iova_to_phys,
-	.probe_device		= arm_smmu_probe_device,
-	.release_device		= arm_smmu_release_device,
-	.device_group		= arm_smmu_device_group,
-	.domain_get_attr	= arm_smmu_domain_get_attr,
-	.domain_set_attr	= arm_smmu_domain_set_attr,
-	.of_xlate		= arm_smmu_of_xlate,
-	.get_resv_regions	= arm_smmu_get_resv_regions,
-	.put_resv_regions	= generic_iommu_put_resv_regions,
-	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
-};
-
 /* Probing and initialisation functions */
 static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
 				   struct arm_smmu_queue *q,
@@ -2406,7 +2032,6 @@  static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
 	switch (FIELD_GET(IDR0_STALL_MODEL, reg)) {
 	case IDR0_STALL_MODEL_FORCE:
 		smmu->features |= ARM_SMMU_FEAT_STALL_FORCE;
-		fallthrough;
 	case IDR0_STALL_MODEL_STALL:
 		smmu->features |= ARM_SMMU_FEAT_STALLS;
 	}
@@ -2426,7 +2051,6 @@  static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
 	switch (FIELD_GET(IDR0_TTF, reg)) {
 	case IDR0_TTF_AARCH32_64:
 		smmu->ias = 40;
-		fallthrough;
 	case IDR0_TTF_AARCH64:
 		break;
 	default:
@@ -2515,21 +2139,10 @@  static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
 	default:
 		dev_info(smmu->dev,
 			"unknown output address size. Truncating to 48-bit\n");
-		fallthrough;
 	case IDR5_OAS_48_BIT:
 		smmu->oas = 48;
 	}
 
-	if (arm_smmu_ops.pgsize_bitmap == -1UL)
-		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
-	else
-		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
-
-	/* Set the DMA mask for our table walker */
-	if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
-		dev_warn(smmu->dev,
-			 "failed to set DMA mask for table walker\n");
-
 	smmu->ias = max(smmu->ias, smmu->oas);
 
 	dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
@@ -2595,9 +2208,6 @@  static int arm_smmu_device_dt_probe(struct platform_device *pdev,
 
 	parse_driver_options(smmu);
 
-	if (of_dma_is_coherent(dev->of_node))
-		smmu->features |= ARM_SMMU_FEAT_COHERENCY;
-
 	return ret;
 }
 
@@ -2609,55 +2219,6 @@  static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu)
 		return SZ_128K;
 }
 
-static int arm_smmu_set_bus_ops(struct iommu_ops *ops)
-{
-	int err;
-
-#ifdef CONFIG_PCI
-	if (pci_bus_type.iommu_ops != ops) {
-		err = bus_set_iommu(&pci_bus_type, ops);
-		if (err)
-			return err;
-	}
-#endif
-#ifdef CONFIG_ARM_AMBA
-	if (amba_bustype.iommu_ops != ops) {
-		err = bus_set_iommu(&amba_bustype, ops);
-		if (err)
-			goto err_reset_pci_ops;
-	}
-#endif
-	if (platform_bus_type.iommu_ops != ops) {
-		err = bus_set_iommu(&platform_bus_type, ops);
-		if (err)
-			goto err_reset_amba_ops;
-	}
-
-	return 0;
-
-err_reset_amba_ops:
-#ifdef CONFIG_ARM_AMBA
-	bus_set_iommu(&amba_bustype, NULL);
-#endif
-err_reset_pci_ops: __maybe_unused;
-#ifdef CONFIG_PCI
-	bus_set_iommu(&pci_bus_type, NULL);
-#endif
-	return err;
-}
-
-static void __iomem *arm_smmu_ioremap(struct device *dev, resource_size_t start,
-				      resource_size_t size)
-{
-	struct resource res = {
-		.flags = IORESOURCE_MEM,
-		.start = start,
-		.end = start + size - 1,
-	};
-
-	return devm_ioremap_resource(dev, &res);
-}
-
 static int arm_smmu_device_probe(struct platform_device *pdev)
 {
 	int irq, ret;
@@ -2785,21 +2346,3 @@  static const struct of_device_id arm_smmu_of_match[] = {
 	{ .compatible = "arm,smmu-v3", },
 	{ },
 };
-MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
-
-static struct platform_driver arm_smmu_driver = {
-	.driver	= {
-		.name			= "arm-smmu-v3",
-		.of_match_table		= arm_smmu_of_match,
-		.suppress_bind_attrs	= true,
-	},
-	.probe	= arm_smmu_device_probe,
-	.remove	= arm_smmu_device_remove,
-	.shutdown = arm_smmu_device_shutdown,
-};
-module_platform_driver(arm_smmu_driver);
-
-MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
-MODULE_AUTHOR("Will Deacon <will@kernel.org>");
-MODULE_ALIAS("platform:arm-smmu-v3");
-MODULE_LICENSE("GPL v2");