diff mbox series

[v3,03/16] iommufd/viommu: Add IOMMU_VDEVICE_ALLOC ioctl

Message ID df942c216c0e5d1740ba5add73375becf0713dfd.1728491532.git.nicolinc@nvidia.com (mailing list archive)
State New, archived
Headers show
Series [v3,01/16] iommufd/viommu: Introduce IOMMUFD_OBJ_VDEVICE and its related struct | expand

Commit Message

Nicolin Chen Oct. 9, 2024, 4:38 p.m. UTC
Introduce a new ioctl to allocate a vDEVICE object. Since a vDEVICE object
is a connection of an iommufd_iommufd object and an iommufd_viommu object,
require both as the ioctl inputs and take refcounts in the ioctl handler.

Add to the vIOMMU object a "vdevs" xarray, indexed by a per-vIOMMU virtual
device ID, which can be:
 - Virtual StreamID on a nested ARM SMMUv3, an index to a Stream Table
 - Virtual DeviceID on a nested AMD IOMMU, an index to a Device Table
 - Virtual ID on a nested Intel VT-D IOMMU, an index to a Context Table

Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
 drivers/iommu/iommufd/iommufd_private.h | 11 +++
 include/linux/iommufd.h                 |  2 +
 include/uapi/linux/iommufd.h            | 26 +++++++
 drivers/iommu/iommufd/main.c            |  6 ++
 drivers/iommu/iommufd/viommu.c          | 91 +++++++++++++++++++++++++
 5 files changed, 136 insertions(+)

Comments

Jason Gunthorpe Oct. 17, 2024, 6:52 p.m. UTC | #1
On Wed, Oct 09, 2024 at 09:38:15AM -0700, Nicolin Chen wrote:
> +void iommufd_vdevice_destroy(struct iommufd_object *obj)
> +{
> +	struct iommufd_vdevice *old, *vdev =
> +		container_of(obj, struct iommufd_vdevice, obj);
> +	struct iommufd_viommu *viommu = vdev->viommu;
> +	struct iommufd_device *idev = vdev->idev;
> +
> +	if (viommu->ops && viommu->ops->vdevice_free)
> +		viommu->ops->vdevice_free(vdev);
> +
> +	old = xa_cmpxchg(&viommu->vdevs, vdev->id, vdev, NULL, GFP_KERNEL);
> +	if (old)
> +		WARN_ON(old != vdev);
> +
> +	refcount_dec(&viommu->obj.users);
> +	refcount_dec(&idev->obj.users);
> +	idev->vdev = NULL;

This should hold the igroup lock when touching vdev?

> +int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd)
> +{
> +	struct iommu_vdevice_alloc *cmd = ucmd->cmd;
> +	struct iommufd_vdevice *vdev, *curr;
> +	struct iommufd_viommu *viommu;
> +	struct iommufd_device *idev;
> +	u64 virt_id = cmd->virt_id;
> +	int rc = 0;
> +
> +	if (virt_id > ULONG_MAX)
> +		return -EINVAL;
> +
> +	viommu = iommufd_get_viommu(ucmd, cmd->viommu_id);
> +	if (IS_ERR(viommu))
> +		return PTR_ERR(viommu);
> +
> +	idev = iommufd_get_device(ucmd, cmd->dev_id);
> +	if (IS_ERR(idev)) {
> +		rc = PTR_ERR(idev);
> +		goto out_put_viommu;
> +	}
> +
> +	mutex_lock(&idev->igroup->lock);
> +	if (idev->vdev) {
> +		rc = -EEXIST;
> +		goto out_unlock_igroup;
> +	}

Otherwise this won't work right

> +	if (viommu->ops && viommu->ops->vdevice_alloc)
> +		vdev = viommu->ops->vdevice_alloc(viommu, idev->dev, virt_id);
> +	else
> +		vdev = __iommufd_vdevice_alloc(ucmd->ictx, sizeof(*vdev));
> +	if (IS_ERR(vdev)) {
> +		rc = PTR_ERR(vdev);
> +		goto out_unlock_igroup;
> +	}
> +
> +	vdev->idev = idev;
> +	vdev->id = virt_id;
> +	vdev->viommu = viommu;
> +
> +	idev->vdev = vdev;
> +	refcount_inc(&idev->obj.users);
> +	refcount_inc(&viommu->obj.users);
> +
> +	curr = xa_cmpxchg(&viommu->vdevs, virt_id, NULL, vdev, GFP_KERNEL);
> +	if (curr) {
> +		rc = xa_err(curr) ? : -EBUSY;
> +		goto out_abort;
> +	}
> +
> +	cmd->out_vdevice_id = vdev->obj.id;
> +	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
> +	if (rc)
> +		goto out_abort;
> +	iommufd_object_finalize(ucmd->ictx, &vdev->obj);
> +	goto out_unlock_igroup;
> +
> +out_abort:
> +	iommufd_object_abort_and_destroy(ucmd->ictx, &vdev->obj);

But be mindful of this abort, it doesn't want to be inside the lock if
it also gets the lock.. fail_nth should be updated to cover these new
ioctls to look for tricky things like that

But the design looks OK

Jason
Nicolin Chen Oct. 20, 2024, 1:42 a.m. UTC | #2
On Thu, Oct 17, 2024 at 03:52:30PM -0300, Jason Gunthorpe wrote:
> > +	if (viommu->ops && viommu->ops->vdevice_alloc)
> > +		vdev = viommu->ops->vdevice_alloc(viommu, idev->dev, virt_id);
> > +	else
> > +		vdev = __iommufd_vdevice_alloc(ucmd->ictx, sizeof(*vdev));
> > +	if (IS_ERR(vdev)) {
> > +		rc = PTR_ERR(vdev);
> > +		goto out_unlock_igroup;
> > +	}
> > +
> > +	vdev->idev = idev;
> > +	vdev->id = virt_id;
> > +	vdev->viommu = viommu;
> > +
> > +	idev->vdev = vdev;
> > +	refcount_inc(&idev->obj.users);
> > +	refcount_inc(&viommu->obj.users);
> > +
> > +	curr = xa_cmpxchg(&viommu->vdevs, virt_id, NULL, vdev, GFP_KERNEL);
> > +	if (curr) {
> > +		rc = xa_err(curr) ? : -EBUSY;
> > +		goto out_abort;
> > +	}
> > +
> > +	cmd->out_vdevice_id = vdev->obj.id;
> > +	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
> > +	if (rc)
> > +		goto out_abort;
> > +	iommufd_object_finalize(ucmd->ictx, &vdev->obj);
> > +	goto out_unlock_igroup;
> > +
> > +out_abort:
> > +	iommufd_object_abort_and_destroy(ucmd->ictx, &vdev->obj);
> 
> But be mindful of this abort, it doesn't want to be inside the lock if
> it also gets the lock.. fail_nth should be updated to cover these new
> ioctls to look for tricky things like that

I added an abort() beside destroy():

+void iommufd_vdevice_abort(struct iommufd_object *obj)
+{
+       struct iommufd_vdevice *old, *vdev =
+               container_of(obj, struct iommufd_vdevice, obj);
+       struct iommufd_viommu *viommu = vdev->viommu;
+       struct iommufd_device *idev = vdev->idev;
+
+       lockdep_assert_not_held(&idev->igroup->lock);
+
+       if (viommu->ops && viommu->ops->vdevice_free)
+               viommu->ops->vdevice_free(vdev);
+
+       old = xa_cmpxchg(&viommu->vdevs, vdev->id, vdev, NULL, GFP_KERNEL);
+       if (old)
+               WARN_ON(old != vdev);
+
+       refcount_dec(&viommu->obj.users);
+       refcount_dec(&idev->obj.users);
+       idev->vdev = NULL;
+}
+
+void iommufd_vdevice_destroy(struct iommufd_object *obj)
+{
+       struct iommufd_vdevice *vdev =
+               container_of(obj, struct iommufd_vdevice, obj);
+
+       mutex_lock(&vdev->idev->igroup->lock);
+       iommufd_vdevice_abort(obj);
+       mutex_unlock(&vdev->idev->igroup->lock);
+}
----------------------------------------------------------

And I added fail_nth coverage for IOMMU_VIOMMU/VDEVICE_ALLOC cmds.

Thanks
Nicolin
Jason Gunthorpe Oct. 21, 2024, 12:22 p.m. UTC | #3
On Sat, Oct 19, 2024 at 06:42:30PM -0700, Nicolin Chen wrote:
> > But be mindful of this abort, it doesn't want to be inside the lock if
> > it also gets the lock.. fail_nth should be updated to cover these new
> > ioctls to look for tricky things like that
> 
> I added an abort() beside destroy():
> 
> +void iommufd_vdevice_abort(struct iommufd_object *obj)
> +{
> +       struct iommufd_vdevice *old, *vdev =
> +               container_of(obj, struct iommufd_vdevice, obj);
> +       struct iommufd_viommu *viommu = vdev->viommu;
> +       struct iommufd_device *idev = vdev->idev;
> +
> +       lockdep_assert_not_held(&idev->igroup->lock);

???

> +
> +       if (viommu->ops && viommu->ops->vdevice_free)
> +               viommu->ops->vdevice_free(vdev);
> +
> +       old = xa_cmpxchg(&viommu->vdevs, vdev->id, vdev, NULL, GFP_KERNEL);
> +       if (old)
> +               WARN_ON(old != vdev);
> +
> +       refcount_dec(&viommu->obj.users);
> +       refcount_dec(&idev->obj.users);
> +       idev->vdev = NULL;
> +}
> +
> +void iommufd_vdevice_destroy(struct iommufd_object *obj)
> +{
> +       struct iommufd_vdevice *vdev =
> +               container_of(obj, struct iommufd_vdevice, obj);
> +
> +       mutex_lock(&vdev->idev->igroup->lock);
> +       iommufd_vdevice_abort(obj);

When we get it here??

Jason
Nicolin Chen Oct. 21, 2024, 4:42 p.m. UTC | #4
On Mon, Oct 21, 2024 at 09:22:48AM -0300, Jason Gunthorpe wrote:
> On Sat, Oct 19, 2024 at 06:42:30PM -0700, Nicolin Chen wrote:
> > > But be mindful of this abort, it doesn't want to be inside the lock if
> > > it also gets the lock.. fail_nth should be updated to cover these new
> > > ioctls to look for tricky things like that
> > 
> > I added an abort() beside destroy():
> > 
> > +void iommufd_vdevice_abort(struct iommufd_object *obj)
> > +{
> > +       struct iommufd_vdevice *old, *vdev =
> > +               container_of(obj, struct iommufd_vdevice, obj);
> > +       struct iommufd_viommu *viommu = vdev->viommu;
> > +       struct iommufd_device *idev = vdev->idev;
> > +
> > +       lockdep_assert_not_held(&idev->igroup->lock);
> 
> ???

Oops. That's a typo from auto-completion.

> > +void iommufd_vdevice_destroy(struct iommufd_object *obj)
> > +{
> > +       struct iommufd_vdevice *vdev =
> > +               container_of(obj, struct iommufd_vdevice, obj);
> > +
> > +       mutex_lock(&vdev->idev->igroup->lock);
> > +       iommufd_vdevice_abort(obj);
> 
> When we get it here??

LOCKDEP wasn't enabled when I tested it.. Just fixed and retested.

Thanks
Nicolin
diff mbox series

Patch

diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 0c56a467e440..e160edb22b67 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -409,6 +409,7 @@  struct iommufd_device {
 	struct iommufd_object obj;
 	struct iommufd_ctx *ictx;
 	struct iommufd_group *igroup;
+	struct iommufd_vdevice *vdev;
 	struct list_head group_item;
 	/* always the physical device */
 	struct device *dev;
@@ -523,8 +524,18 @@  static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
 	return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
 }
 
+static inline struct iommufd_viommu *
+iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
+{
+	return container_of(iommufd_get_object(ucmd->ictx, id,
+					       IOMMUFD_OBJ_VIOMMU),
+			    struct iommufd_viommu, obj);
+}
+
 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
 void iommufd_viommu_destroy(struct iommufd_object *obj);
+int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
+void iommufd_vdevice_destroy(struct iommufd_object *obj);
 
 #ifdef CONFIG_IOMMUFD_TEST
 int iommufd_test(struct iommufd_ucmd *ucmd);
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index 510fc961a9ad..5a630952150d 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -72,6 +72,8 @@  struct iommufd_viommu {
 
 	const struct iommufd_viommu_ops *ops;
 
+	struct xarray vdevs;
+
 	unsigned int type;
 };
 
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 6ee841a8c79b..3039519d71b7 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -52,6 +52,7 @@  enum {
 	IOMMUFD_CMD_HWPT_INVALIDATE = 0x8d,
 	IOMMUFD_CMD_FAULT_QUEUE_ALLOC = 0x8e,
 	IOMMUFD_CMD_VIOMMU_ALLOC = 0x8f,
+	IOMMUFD_CMD_VDEVICE_ALLOC = 0x90,
 };
 
 /**
@@ -894,4 +895,29 @@  struct iommu_viommu_alloc {
 	__u32 out_viommu_id;
 };
 #define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC)
+
+/**
+ * struct iommu_vdevice_alloc - ioctl(IOMMU_VDEVICE_ALLOC)
+ * @size: sizeof(struct iommu_vdevice_alloc)
+ * @viommu_id: vIOMMU ID to associate with the virtual device
+ * @dev_id: The pyhsical device to allocate a virtual instance on the vIOMMU
+ * @__reserved: Must be 0
+ * @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID
+ *           of AMD IOMMU, and vID of a nested Intel VT-d to a Context Table.
+ * @out_vdevice_id: Output virtual instance ID for the allocated object
+ * @__reserved2: Must be 0
+ *
+ * Allocate a virtual device instance (for a physical device) against a vIOMMU.
+ * This instance holds the device's information (related to its vIOMMU) in a VM.
+ */
+struct iommu_vdevice_alloc {
+	__u32 size;
+	__u32 viommu_id;
+	__u32 dev_id;
+	__u32 __reserved;
+	__aligned_u64 virt_id;
+	__u32 out_vdevice_id;
+	__u32 __reserved2;
+};
+#define IOMMU_VDEVICE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VDEVICE_ALLOC)
 #endif
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index cbd0a80b2d67..9a8c3cbecc11 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -302,6 +302,7 @@  union ucmd_buffer {
 	struct iommu_option option;
 	struct iommu_vfio_ioas vfio_ioas;
 	struct iommu_viommu_alloc viommu;
+	struct iommu_vdevice_alloc vdev;
 #ifdef CONFIG_IOMMUFD_TEST
 	struct iommu_test_cmd test;
 #endif
@@ -355,6 +356,8 @@  static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
 		 __reserved),
 	IOCTL_OP(IOMMU_VIOMMU_ALLOC, iommufd_viommu_alloc_ioctl,
 		 struct iommu_viommu_alloc, out_viommu_id),
+	IOCTL_OP(IOMMU_VDEVICE_ALLOC, iommufd_vdevice_alloc_ioctl,
+		 struct iommu_vdevice_alloc, __reserved2),
 #ifdef CONFIG_IOMMUFD_TEST
 	IOCTL_OP(IOMMU_TEST_CMD, iommufd_test, struct iommu_test_cmd, last),
 #endif
@@ -493,6 +496,9 @@  static const struct iommufd_object_ops iommufd_object_ops[] = {
 	[IOMMUFD_OBJ_VIOMMU] = {
 		.destroy = iommufd_viommu_destroy,
 	},
+	[IOMMUFD_OBJ_VDEVICE] = {
+		.destroy = iommufd_vdevice_destroy,
+	},
 #ifdef CONFIG_IOMMUFD_TEST
 	[IOMMUFD_OBJ_SELFTEST] = {
 		.destroy = iommufd_selftest_destroy,
diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c
index f512dfb535fd..eb78c928c60f 100644
--- a/drivers/iommu/iommufd/viommu.c
+++ b/drivers/iommu/iommufd/viommu.c
@@ -9,6 +9,8 @@  void iommufd_viommu_destroy(struct iommufd_object *obj)
 	struct iommufd_viommu *viommu =
 		container_of(obj, struct iommufd_viommu, obj);
 
+	xa_destroy(&viommu->vdevs);
+
 	if (viommu->ops && viommu->ops->free)
 		viommu->ops->free(viommu);
 	refcount_dec(&viommu->hwpt->common.obj.users);
@@ -72,6 +74,8 @@  int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
 	 */
 	viommu->iommu_dev = idev->dev->iommu->iommu_dev;
 
+	xa_init(&viommu->vdevs);
+
 	refcount_inc(&viommu->hwpt->common.obj.users);
 
 	cmd->out_viommu_id = viommu->obj.id;
@@ -89,3 +93,90 @@  int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
 	iommufd_put_object(ucmd->ictx, &idev->obj);
 	return rc;
 }
+
+void iommufd_vdevice_destroy(struct iommufd_object *obj)
+{
+	struct iommufd_vdevice *old, *vdev =
+		container_of(obj, struct iommufd_vdevice, obj);
+	struct iommufd_viommu *viommu = vdev->viommu;
+	struct iommufd_device *idev = vdev->idev;
+
+	if (viommu->ops && viommu->ops->vdevice_free)
+		viommu->ops->vdevice_free(vdev);
+
+	old = xa_cmpxchg(&viommu->vdevs, vdev->id, vdev, NULL, GFP_KERNEL);
+	if (old)
+		WARN_ON(old != vdev);
+
+	refcount_dec(&viommu->obj.users);
+	refcount_dec(&idev->obj.users);
+	idev->vdev = NULL;
+}
+
+int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd)
+{
+	struct iommu_vdevice_alloc *cmd = ucmd->cmd;
+	struct iommufd_vdevice *vdev, *curr;
+	struct iommufd_viommu *viommu;
+	struct iommufd_device *idev;
+	u64 virt_id = cmd->virt_id;
+	int rc = 0;
+
+	if (virt_id > ULONG_MAX)
+		return -EINVAL;
+
+	viommu = iommufd_get_viommu(ucmd, cmd->viommu_id);
+	if (IS_ERR(viommu))
+		return PTR_ERR(viommu);
+
+	idev = iommufd_get_device(ucmd, cmd->dev_id);
+	if (IS_ERR(idev)) {
+		rc = PTR_ERR(idev);
+		goto out_put_viommu;
+	}
+
+	mutex_lock(&idev->igroup->lock);
+	if (idev->vdev) {
+		rc = -EEXIST;
+		goto out_unlock_igroup;
+	}
+
+	if (viommu->ops && viommu->ops->vdevice_alloc)
+		vdev = viommu->ops->vdevice_alloc(viommu, idev->dev, virt_id);
+	else
+		vdev = __iommufd_vdevice_alloc(ucmd->ictx, sizeof(*vdev));
+	if (IS_ERR(vdev)) {
+		rc = PTR_ERR(vdev);
+		goto out_unlock_igroup;
+	}
+
+	vdev->idev = idev;
+	vdev->id = virt_id;
+	vdev->viommu = viommu;
+
+	idev->vdev = vdev;
+	refcount_inc(&idev->obj.users);
+	refcount_inc(&viommu->obj.users);
+
+	curr = xa_cmpxchg(&viommu->vdevs, virt_id, NULL, vdev, GFP_KERNEL);
+	if (curr) {
+		rc = xa_err(curr) ? : -EBUSY;
+		goto out_abort;
+	}
+
+	cmd->out_vdevice_id = vdev->obj.id;
+	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+	if (rc)
+		goto out_abort;
+	iommufd_object_finalize(ucmd->ictx, &vdev->obj);
+	goto out_unlock_igroup;
+
+out_abort:
+	iommufd_object_abort_and_destroy(ucmd->ictx, &vdev->obj);
+out_unlock_igroup:
+	mutex_unlock(&idev->igroup->lock);
+	iommufd_put_object(ucmd->ictx, &idev->obj);
+out_put_viommu:
+	iommufd_put_object(ucmd->ictx, &viommu->obj);
+	return rc;
+}