@@ -9,27 +9,19 @@
#include "iommu-sva.h"
static DEFINE_MUTEX(iommu_sva_lock);
-static DEFINE_IDA(iommu_global_pasid_ida);
/* Allocate a PASID for the mm within range (inclusive) */
-static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
+static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
{
int ret = 0;
- if (!pasid_valid(min) || !pasid_valid(max) ||
- min == 0 || max < min)
- return -EINVAL;
-
mutex_lock(&iommu_sva_lock);
/* Is a PASID already associated with this mm? */
- if (pasid_valid(mm->pasid)) {
- if (mm->pasid < min || mm->pasid > max)
- ret = -EOVERFLOW;
+ if (pasid_valid(mm->pasid))
goto out;
- }
- ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL);
- if (ret < min)
+ ret = iommu_alloc_global_pasid_dev(dev);
+ if (!pasid_valid(ret))
goto out;
mm->pasid = ret;
ret = 0;
@@ -58,15 +50,10 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
{
struct iommu_domain *domain;
struct iommu_sva *handle;
- ioasid_t max_pasids;
int ret;
- max_pasids = dev->iommu->max_pasids;
- if (!max_pasids)
- return ERR_PTR(-EOPNOTSUPP);
-
/* Allocate mm->pasid if necessary. */
- ret = iommu_sva_alloc_pasid(mm, IOMMU_DEF_RID_PASID + 1, max_pasids - 1);
+ ret = iommu_sva_alloc_pasid(mm, dev);
if (ret)
return ERR_PTR(ret);
@@ -211,5 +198,5 @@ void mm_pasid_drop(struct mm_struct *mm)
if (likely(!pasid_valid(mm->pasid)))
return;
- ida_free(&iommu_global_pasid_ida, mm->pasid);
+ iommu_free_global_pasid(mm->pasid);
}
@@ -38,6 +38,7 @@
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
+static DEFINE_IDA(iommu_global_pasid_ida);
static unsigned int iommu_def_domain_type __read_mostly;
static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
@@ -3450,3 +3451,26 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
return domain;
}
+
+ioasid_t iommu_alloc_global_pasid_dev(struct device *dev)
+{
+ int ret;
+ ioasid_t max;
+
+ max = dev_iommu_get_max_pasids(dev);
+ ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_DEF_RID_PASID + 1, max, GFP_KERNEL);
+ if (ret < 0)
+ return IOMMU_PASID_INVALID;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid_dev);
+
+void iommu_free_global_pasid(ioasid_t pasid)
+{
+ if (WARN_ON(!pasid_valid(pasid)))
+ return;
+
+ ida_free(&iommu_global_pasid_ida, pasid);
+}
+EXPORT_SYMBOL_GPL(iommu_free_global_pasid);
@@ -724,6 +724,8 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
struct iommu_domain *
iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
unsigned int type);
+ioasid_t iommu_alloc_global_pasid_dev(struct device *dev);
+void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -1090,6 +1092,13 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
{
return NULL;
}
+
+static inline ioasid_t iommu_alloc_global_pasid_dev(struct device *dev)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void iommu_free_global_pasid(ioasid_t pasid) {}
#endif /* CONFIG_IOMMU_API */
/**
Devices that use Intel ENQCMD to submit work must use global PASIDs in that the PASID are stored in a per CPU MSR. When such device need to submit work for in-kernel DMA with PASID, it must allocate PASIDs from the same global number space to avoid conflict. This patch moves global PASID allocation APIs from SVA to IOMMU APIs. It is expected that device drivers will use the allocated PASIDs to attach to appropriate IOMMU domains for use. Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com> --- v5: move PASID range check inside API so that device drivers only pass in struct device* (Kevin) v4: move dummy functions outside ifdef CONFIG_IOMMU_SVA (Baolu) --- drivers/iommu/iommu-sva.c | 25 ++++++------------------- drivers/iommu/iommu.c | 24 ++++++++++++++++++++++++ include/linux/iommu.h | 9 +++++++++ 3 files changed, 39 insertions(+), 19 deletions(-)