diff mbox series

[v4,10/18] iommu/amd: Add domain_alloc_user based domain allocation

Message ID 20231018202715.69734-11-joao.m.martins@oracle.com (mailing list archive)
State New, archived
Headers show
Series IOMMUFD Dirty Tracking | expand

Commit Message

Joao Martins Oct. 18, 2023, 8:27 p.m. UTC
Add the domain_alloc_user op implementation. To that end, refactor
amd_iommu_domain_alloc() to receive a dev pointer and flags, while renaming
it too, such that it becomes a common function shared with
domain_alloc_user() implementation. The sole difference with
domain_alloc_user() is that we initialize also other fields that
iommu_domain_alloc() does. It lets it return the iommu domain correctly
initialized in one function.

This is in preparation to add dirty enforcement on AMD implementation of
domain_alloc_user.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
Reviewed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 drivers/iommu/amd/iommu.c | 47 ++++++++++++++++++++++++++++++++++++---
 1 file changed, 44 insertions(+), 3 deletions(-)

Comments

Jason Gunthorpe Oct. 18, 2023, 10:58 p.m. UTC | #1
On Wed, Oct 18, 2023 at 09:27:07PM +0100, Joao Martins wrote:
> -static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
> +static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
> +						  struct device *dev,
> +						  u32 flags)
>  {
>  	struct protection_domain *domain;
> +	struct amd_iommu *iommu = NULL;
> +
> +	if (dev) {
> +		iommu = rlookup_amd_iommu(dev);
> +		if (!iommu)
> +			return ERR_PTR(-ENODEV);
> +	}
>  
>  	/*
>  	 * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
>  	 * default to use IOMMU_DOMAIN_DMA[_FQ].
>  	 */
>  	if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
> -		return NULL;
> +		return ERR_PTR(-EINVAL);
>  
>  	domain = protection_domain_alloc(type);
>  	if (!domain)
> -		return NULL;
> +		return ERR_PTR(-ENOMEM);
>  
>  	domain->domain.geometry.aperture_start = 0;
>  	domain->domain.geometry.aperture_end   = dma_max_address();
>  	domain->domain.geometry.force_aperture = true;
>  
> +	if (iommu) {
> +		domain->domain.type = type;
> +		domain->domain.pgsize_bitmap =
> +			iommu->iommu.ops->pgsize_bitmap;
> +		domain->domain.ops =
> +			iommu->iommu.ops->default_domain_ops;
> +	}
> +
>  	return &domain->domain;
>  }

In the end this is probably not enough refactoring, but this driver
needs so much work we should just wait till the already written series
get merged.

eg domain_alloc_paging should just invoke domain_alloc_user with some
null arguments if the driver is constructed this way

> +static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev,
> +							u32 flags)
> +{
> +	unsigned int type = IOMMU_DOMAIN_UNMANAGED;
> +
> +	if (flags & IOMMU_HWPT_ALLOC_NEST_PARENT)
> +		return ERR_PTR(-EOPNOTSUPP);

This should be written as a list of flags the driver *supports* not
that it rejects

if (flags)
	return ERR_PTR(-EOPNOTSUPP);

Jason
Joao Martins Oct. 18, 2023, 11:54 p.m. UTC | #2
On 18/10/2023 23:58, Jason Gunthorpe wrote:
> On Wed, Oct 18, 2023 at 09:27:07PM +0100, Joao Martins wrote:
>> -static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
>> +static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
>> +						  struct device *dev,
>> +						  u32 flags)
>>  {
>>  	struct protection_domain *domain;
>> +	struct amd_iommu *iommu = NULL;
>> +
>> +	if (dev) {
>> +		iommu = rlookup_amd_iommu(dev);
>> +		if (!iommu)
>> +			return ERR_PTR(-ENODEV);
>> +	}
>>  
>>  	/*
>>  	 * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
>>  	 * default to use IOMMU_DOMAIN_DMA[_FQ].
>>  	 */
>>  	if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
>> -		return NULL;
>> +		return ERR_PTR(-EINVAL);
>>  
>>  	domain = protection_domain_alloc(type);
>>  	if (!domain)
>> -		return NULL;
>> +		return ERR_PTR(-ENOMEM);
>>  
>>  	domain->domain.geometry.aperture_start = 0;
>>  	domain->domain.geometry.aperture_end   = dma_max_address();
>>  	domain->domain.geometry.force_aperture = true;
>>  
>> +	if (iommu) {
>> +		domain->domain.type = type;
>> +		domain->domain.pgsize_bitmap =
>> +			iommu->iommu.ops->pgsize_bitmap;
>> +		domain->domain.ops =
>> +			iommu->iommu.ops->default_domain_ops;
>> +	}
>> +
>>  	return &domain->domain;
>>  }
> 
> In the end this is probably not enough refactoring, but this driver
> needs so much work we should just wait till the already written series
> get merged.
> 

That is quite a road ahead :(( -- and AMD IOMMU hardware is the only thing I am
best equipped to test this.

But I understand if we end up going this way

> eg domain_alloc_paging should just invoke domain_alloc_user with some
> null arguments if the driver is constructed this way
> 
>> +static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev,
>> +							u32 flags)
>> +{
>> +	unsigned int type = IOMMU_DOMAIN_UNMANAGED;
>> +
>> +	if (flags & IOMMU_HWPT_ALLOC_NEST_PARENT)
>> +		return ERR_PTR(-EOPNOTSUPP);
> 
> This should be written as a list of flags the driver *supports* not
> that it rejects
> 
> if (flags)
> 	return ERR_PTR(-EOPNOTSUPP);

Will fix (this was a silly mistake, as I'm doing the right thing on Intel).
diff mbox series

Patch

diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 95bd7c25ba6f..292a09b2fbbf 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -37,6 +37,7 @@ 
 #include <asm/iommu.h>
 #include <asm/gart.h>
 #include <asm/dma.h>
+#include <uapi/linux/iommufd.h>
 
 #include "amd_iommu.h"
 #include "../dma-iommu.h"
@@ -2155,28 +2156,67 @@  static inline u64 dma_max_address(void)
 	return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
 }
 
-static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
+						  struct device *dev,
+						  u32 flags)
 {
 	struct protection_domain *domain;
+	struct amd_iommu *iommu = NULL;
+
+	if (dev) {
+		iommu = rlookup_amd_iommu(dev);
+		if (!iommu)
+			return ERR_PTR(-ENODEV);
+	}
 
 	/*
 	 * Since DTE[Mode]=0 is prohibited on SNP-enabled system,
 	 * default to use IOMMU_DOMAIN_DMA[_FQ].
 	 */
 	if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
-		return NULL;
+		return ERR_PTR(-EINVAL);
 
 	domain = protection_domain_alloc(type);
 	if (!domain)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	domain->domain.geometry.aperture_start = 0;
 	domain->domain.geometry.aperture_end   = dma_max_address();
 	domain->domain.geometry.force_aperture = true;
 
+	if (iommu) {
+		domain->domain.type = type;
+		domain->domain.pgsize_bitmap =
+			iommu->iommu.ops->pgsize_bitmap;
+		domain->domain.ops =
+			iommu->iommu.ops->default_domain_ops;
+	}
+
 	return &domain->domain;
 }
 
+static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type)
+{
+	struct iommu_domain *domain;
+
+	domain = do_iommu_domain_alloc(type, NULL, 0);
+	if (IS_ERR(domain))
+		return NULL;
+
+	return domain;
+}
+
+static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev,
+							u32 flags)
+{
+	unsigned int type = IOMMU_DOMAIN_UNMANAGED;
+
+	if (flags & IOMMU_HWPT_ALLOC_NEST_PARENT)
+		return ERR_PTR(-EOPNOTSUPP);
+
+	return do_iommu_domain_alloc(type, dev, flags);
+}
+
 static void amd_iommu_domain_free(struct iommu_domain *dom)
 {
 	struct protection_domain *domain;
@@ -2464,6 +2504,7 @@  static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
 const struct iommu_ops amd_iommu_ops = {
 	.capable = amd_iommu_capable,
 	.domain_alloc = amd_iommu_domain_alloc,
+	.domain_alloc_user = amd_iommu_domain_alloc_user,
 	.probe_device = amd_iommu_probe_device,
 	.release_device = amd_iommu_release_device,
 	.probe_finalize = amd_iommu_probe_finalize,