@@ -130,6 +130,33 @@ struct vfio_regions {
#define IS_DOMAIN_IN_CONTAINER(iommu) ((iommu->external_domain) || \
(!list_empty(&iommu->domain_list)))
+struct domain_capsule {
+ struct vfio_group *group;
+ struct iommu_domain *domain;
+ void *data;
+};
+
+/* iommu->lock must be held */
+static struct vfio_group *vfio_find_nesting_group(struct vfio_iommu *iommu)
+{
+ struct vfio_domain *d;
+ struct vfio_group *g, *group = NULL;
+
+ if (!iommu->nesting_info)
+ return NULL;
+
+ /* only support singleton container with nesting type */
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ list_for_each_entry(g, &d->group_list, next) {
+ if (!group) {
+ group = g;
+ break;
+ }
+ }
+ }
+ return group;
+}
+
static int put_pfn(unsigned long pfn, int prot);
/*
@@ -2014,6 +2041,39 @@ static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
return ret;
}
+static int vfio_dev_bind_gpasid_fn(struct device *dev, void *data)
+{
+ struct domain_capsule *dc = (struct domain_capsule *)data;
+ struct iommu_gpasid_bind_data *bind_data =
+ (struct iommu_gpasid_bind_data *) dc->data;
+
+ return iommu_sva_bind_gpasid(dc->domain, dev, bind_data);
+}
+
+static int vfio_dev_unbind_gpasid_fn(struct device *dev, void *data)
+{
+ struct domain_capsule *dc = (struct domain_capsule *)data;
+ struct iommu_gpasid_unbind_data *unbind_data =
+ (struct iommu_gpasid_unbind_data *) dc->data;
+
+ iommu_sva_unbind_gpasid(dc->domain, dev, unbind_data);
+ return 0;
+}
+
+static void vfio_group_unbind_gpasid_fn(ioasid_t pasid, void *data)
+{
+ struct domain_capsule *dc = (struct domain_capsule *) data;
+ struct iommu_gpasid_unbind_data unbind_data;
+
+ unbind_data.argsz = sizeof(unbind_data);
+ unbind_data.flags = 0;
+ unbind_data.pasid = pasid;
+
+ dc->data = &unbind_data;
+ iommu_group_for_each_dev(dc->group->iommu_group,
+ dc, vfio_dev_unbind_gpasid_fn);
+}
+
static void vfio_iommu_type1_detach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
@@ -2055,6 +2115,21 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (!group)
continue;
+ if (iommu->nesting_info && iommu->vmm &&
+ (iommu->nesting_info->features &
+ IOMMU_NESTING_FEAT_BIND_PGTBL)) {
+ struct domain_capsule dc = { .group = group,
+ .domain = domain->domain,
+ .data = NULL };
+
+ /*
+ * Unbind page tables bound with system wide PASIDs
+ * which are allocated to user space.
+ */
+ vfio_mm_for_each_pasid(iommu->vmm, &dc,
+ vfio_group_unbind_gpasid_fn);
+ }
+
vfio_iommu_detach_group(domain, group);
list_del(&group->next);
kfree(group);
@@ -2453,6 +2528,120 @@ static int vfio_iommu_type1_pasid_request(struct vfio_iommu *iommu,
}
}
+static long vfio_iommu_handle_pgtbl_op(struct vfio_iommu *iommu,
+ bool is_bind, void *data)
+{
+ struct iommu_nesting_info *info;
+ struct domain_capsule dc = { .data = data };
+ struct vfio_group *group;
+ struct vfio_domain *domain;
+ int ret;
+
+ mutex_lock(&iommu->lock);
+
+ info = iommu->nesting_info;
+ if (!info || !(info->features & IOMMU_NESTING_FEAT_BIND_PGTBL)) {
+ ret = -ENOTSUPP;
+ goto out_unlock_iommu;
+ }
+
+ if (!iommu->vmm) {
+ ret = -EINVAL;
+ goto out_unlock_iommu;
+ }
+
+ group = vfio_find_nesting_group(iommu);
+ if (!group) {
+ ret = -EINVAL;
+ goto out_unlock_iommu;
+ }
+
+ domain = list_first_entry(&iommu->domain_list,
+ struct vfio_domain, next);
+ dc.group = group;
+ dc.domain = domain->domain;
+
+ /* Avoid race with other containers within the same process */
+ vfio_mm_pasid_lock(iommu->vmm);
+
+ if (is_bind) {
+ ret = iommu_group_for_each_dev(group->iommu_group, &dc,
+ vfio_dev_bind_gpasid_fn);
+ if (ret)
+ iommu_group_for_each_dev(group->iommu_group, &dc,
+ vfio_dev_unbind_gpasid_fn);
+ } else {
+ iommu_group_for_each_dev(group->iommu_group,
+ &dc, vfio_dev_unbind_gpasid_fn);
+ ret = 0;
+ }
+
+ vfio_mm_pasid_unlock(iommu->vmm);
+out_unlock_iommu:
+ mutex_unlock(&iommu->lock);
+ return ret;
+}
+
+static long vfio_iommu_type1_nesting_op(struct vfio_iommu *iommu,
+ unsigned long arg)
+{
+ struct vfio_iommu_type1_nesting_op hdr;
+ unsigned int minsz;
+ u8 *data = NULL;
+ size_t data_size;
+ int ret;
+
+ minsz = offsetofend(struct vfio_iommu_type1_nesting_op, flags);
+
+ if (copy_from_user(&hdr, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (hdr.argsz < minsz || hdr.flags & ~VFIO_NESTING_OP_MASK)
+ return -EINVAL;
+
+ /* Get the current IOMMU UAPI data size */
+ switch (hdr.flags & VFIO_NESTING_OP_MASK) {
+ case VFIO_IOMMU_NESTING_OP_BIND_PGTBL:
+ data_size = sizeof(struct iommu_gpasid_bind_data);
+ break;
+ case VFIO_IOMMU_NESTING_OP_UNBIND_PGTBL:
+ data_size = sizeof(struct iommu_gpasid_unbind_data);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((hdr.argsz - minsz) > data_size) {
+ /* User data > current kernel */
+ return -E2BIG;
+ }
+
+ data = kzalloc(data_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (copy_from_user(data, (void __user *)(arg + minsz),
+ hdr.argsz - minsz)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
+ switch (hdr.flags & VFIO_NESTING_OP_MASK) {
+ case VFIO_IOMMU_NESTING_OP_BIND_PGTBL:
+ ret = vfio_iommu_handle_pgtbl_op(iommu, true, data);
+ break;
+ case VFIO_IOMMU_NESTING_OP_UNBIND_PGTBL:
+ ret = vfio_iommu_handle_pgtbl_op(iommu, false, data);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+out_free:
+ kfree(data);
+ return ret;
+}
+
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -2469,6 +2658,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
return vfio_iommu_type1_unmap_dma(iommu, arg);
case VFIO_IOMMU_PASID_REQUEST:
return vfio_iommu_type1_pasid_request(iommu, arg);
+ case VFIO_IOMMU_NESTING_OP:
+ return vfio_iommu_type1_nesting_op(iommu, arg);
}
return -ENOTTY;
@@ -30,6 +30,7 @@ struct vfio_mm {
struct kref kref;
struct vfio_mm_token token;
int ioasid_sid;
+ struct mutex pasid_lock;
int pasid_quota;
struct list_head next;
};
@@ -97,6 +98,7 @@ struct vfio_mm *vfio_mm_get_from_task(struct task_struct *task)
kref_init(&vmm->kref);
vmm->token.val = (unsigned long long) mm;
vmm->pasid_quota = pasid_quota;
+ mutex_init(&vmm->pasid_lock);
list_add(&vmm->next, &vfio_pasid.vfio_mm_list);
out:
@@ -134,12 +136,40 @@ void vfio_pasid_free_range(struct vfio_mm *vmm,
* IOASID core will notify PASID users (e.g. IOMMU driver) to
* teardown necessary structures depending on the to-be-freed
* PASID.
+ * Hold pasid_lock to avoid race with PASID usages like bind/
+ * unbind page tables to requested PASID.
*/
+ mutex_lock(&vmm->pasid_lock);
for (; pasid <= max; pasid++)
ioasid_free(pasid);
+ mutex_unlock(&vmm->pasid_lock);
}
EXPORT_SYMBOL_GPL(vfio_pasid_free_range);
+int vfio_mm_for_each_pasid(struct vfio_mm *vmm, void *data,
+ void (*fn)(ioasid_t id, void *data))
+{
+ int ret;
+
+ mutex_lock(&vmm->pasid_lock);
+ ret = ioasid_set_for_each_ioasid(vmm->ioasid_sid, fn, data);
+ mutex_unlock(&vmm->pasid_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vfio_mm_for_each_pasid);
+
+void vfio_mm_pasid_lock(struct vfio_mm *vmm)
+{
+ mutex_lock(&vmm->pasid_lock);
+}
+EXPORT_SYMBOL_GPL(vfio_mm_pasid_lock);
+
+void vfio_mm_pasid_unlock(struct vfio_mm *vmm)
+{
+ mutex_unlock(&vmm->pasid_lock);
+}
+EXPORT_SYMBOL_GPL(vfio_mm_pasid_unlock);
+
static int __init vfio_pasid_init(void)
{
mutex_init(&vfio_pasid.vfio_mm_lock);
@@ -103,6 +103,11 @@ int vfio_mm_ioasid_sid(struct vfio_mm *vmm);
extern int vfio_pasid_alloc(struct vfio_mm *vmm, int min, int max);
extern void vfio_pasid_free_range(struct vfio_mm *vmm,
ioasid_t min, ioasid_t max);
+extern int vfio_mm_for_each_pasid(struct vfio_mm *vmm, void *data,
+ void (*fn)(ioasid_t id, void *data));
+extern void vfio_mm_pasid_lock(struct vfio_mm *vmm);
+extern void vfio_mm_pasid_unlock(struct vfio_mm *vmm);
+
#else
static inline struct vfio_mm *vfio_mm_get_from_task(struct task_struct *task)
{
@@ -127,6 +132,21 @@ static inline void vfio_pasid_free_range(struct vfio_mm *vmm,
ioasid_t min, ioasid_t max)
{
}
+
+static inline int vfio_mm_for_each_pasid(struct vfio_mm *vmm, void *data,
+ void (*fn)(ioasid_t id, void *data))
+{
+ return -ENOTTY;
+}
+
+static inline void vfio_mm_pasid_lock(struct vfio_mm *vmm)
+{
+}
+
+static inline void vfio_mm_pasid_unlock(struct vfio_mm *vmm)
+{
+}
+
#endif /* CONFIG_VFIO_PASID */
/*
@@ -876,6 +876,36 @@ struct vfio_iommu_type1_pasid_request {
#define VFIO_IOMMU_PASID_REQUEST _IO(VFIO_TYPE, VFIO_BASE + 22)
+/**
+ * VFIO_IOMMU_NESTING_OP - _IOW(VFIO_TYPE, VFIO_BASE + 23,
+ * struct vfio_iommu_type1_nesting_op)
+ *
+ * This interface allows user space to utilize the nesting IOMMU
+ * capabilities as reported through VFIO_IOMMU_GET_INFO.
+ *
+ * @data[] types defined for each op:
+ * +=================+===============================================+
+ * | NESTING OP | @data[] |
+ * +=================+===============================================+
+ * | BIND_PGTBL | struct iommu_gpasid_bind_data |
+ * +-----------------+-----------------------------------------------+
+ * | UNBIND_PGTBL | struct iommu_gpasid_unbind_data |
+ * +-----------------+-----------------------------------------------+
+ *
+ * returns: 0 on success, -errno on failure.
+ */
+struct vfio_iommu_type1_nesting_op {
+ __u32 argsz;
+ __u32 flags;
+#define VFIO_NESTING_OP_MASK (0xffff) /* lower 16-bits for op */
+ __u8 data[];
+};
+
+#define VFIO_IOMMU_NESTING_OP_BIND_PGTBL (0)
+#define VFIO_IOMMU_NESTING_OP_UNBIND_PGTBL (1)
+
+#define VFIO_IOMMU_NESTING_OP _IO(VFIO_TYPE, VFIO_BASE + 23)
+
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*