@@ -1710,6 +1710,25 @@ static int vfio_cache_inv_fn(struct device *dev, void *data)
return iommu_cache_invalidate(d, dev, &ustruct->info);
}
+static int vfio_bind_msi_fn(struct device *dev, void *data)
+{
+ struct vfio_iommu_type1_bind_msi *ustruct =
+ (struct vfio_iommu_type1_bind_msi *)data;
+ struct iommu_domain *d = iommu_get_domain_for_dev(dev);
+
+ return iommu_bind_guest_msi(d, dev, ustruct->iova,
+ ustruct->gpa, ustruct->size);
+}
+
+static int vfio_unbind_msi_fn(struct device *dev, void *data)
+{
+ dma_addr_t *iova = (dma_addr_t *)data;
+ struct iommu_domain *d = iommu_get_domain_for_dev(dev);
+
+ iommu_unbind_guest_msi(d, dev, *iova);
+ return 0;
+}
+
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
@@ -1814,6 +1833,45 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
vfio_cache_inv_fn);
mutex_unlock(&iommu->lock);
return ret;
+ } else if (cmd == VFIO_IOMMU_BIND_MSI) {
+ struct vfio_iommu_type1_bind_msi ustruct;
+ int ret;
+
+ minsz = offsetofend(struct vfio_iommu_type1_bind_msi,
+ size);
+
+ if (copy_from_user(&ustruct, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (ustruct.argsz < minsz || ustruct.flags)
+ return -EINVAL;
+
+ mutex_lock(&iommu->lock);
+ ret = vfio_iommu_for_each_dev(iommu, &ustruct,
+ vfio_bind_msi_fn);
+ if (ret)
+ vfio_iommu_for_each_dev(iommu, &ustruct.iova,
+ vfio_unbind_msi_fn);
+ mutex_unlock(&iommu->lock);
+ return ret;
+ } else if (cmd == VFIO_IOMMU_UNBIND_MSI) {
+ struct vfio_iommu_type1_unbind_msi ustruct;
+ int ret;
+
+ minsz = offsetofend(struct vfio_iommu_type1_unbind_msi,
+ iova);
+
+ if (copy_from_user(&ustruct, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (ustruct.argsz < minsz || ustruct.flags)
+ return -EINVAL;
+
+ mutex_lock(&iommu->lock);
+ ret = vfio_iommu_for_each_dev(iommu, &ustruct.iova,
+ vfio_unbind_msi_fn);
+ mutex_unlock(&iommu->lock);
+ return ret;
}
return -ENOTTY;
@@ -789,6 +789,35 @@ struct vfio_iommu_type1_cache_invalidate {
};
#define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 24)
+/**
+ * VFIO_IOMMU_BIND_MSI - _IOWR(VFIO_TYPE, VFIO_BASE + 25,
+ * struct vfio_iommu_type1_bind_msi)
+ *
+ * Pass a stage 1 MSI doorbell mapping to the host so that this
+ * latter can build a nested stage2 mapping
+ */
+struct vfio_iommu_type1_bind_msi {
+ __u32 argsz;
+ __u32 flags;
+ __u64 iova;
+ __u64 gpa;
+ __u64 size;
+};
+#define VFIO_IOMMU_BIND_MSI _IO(VFIO_TYPE, VFIO_BASE + 25)
+
+/**
+ * VFIO_IOMMU_UNBIND_MSI - _IOWR(VFIO_TYPE, VFIO_BASE + 26,
+ * struct vfio_iommu_type1_unbind_msi)
+ *
+ * Unregister an MSI mapping
+ */
+struct vfio_iommu_type1_unbind_msi {
+ __u32 argsz;
+ __u32 flags;
+ __u64 iova;
+};
+#define VFIO_IOMMU_UNBIND_MSI _IO(VFIO_TYPE, VFIO_BASE + 26)
+
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
This patch adds the VFIO_IOMMU_BIND/UNBIND_MSI ioctl which aim to pass/withdraw the guest MSI binding to/from the host. Signed-off-by: Eric Auger <eric.auger@redhat.com> --- v3 -> v4: - add UNBIND - unwind on BIND error v2 -> v3: - adapt to new proto of bind_guest_msi - directly use vfio_iommu_for_each_dev v1 -> v2: - s/vfio_iommu_type1_guest_msi_binding/vfio_iommu_type1_bind_guest_msi --- drivers/vfio/vfio_iommu_type1.c | 58 +++++++++++++++++++++++++++++++++ include/uapi/linux/vfio.h | 29 +++++++++++++++++ 2 files changed, 87 insertions(+)