diff mbox series

[v2,04/14] vfio: Provide better generic support for open/release vfio_device_ops

Message ID 4-v2-b6a5582525c9+ff96-vfio_reflck_jgg@nvidia.com (mailing list archive)
State New, archived
Headers show
Series Provide core infrastructure for managing open/release | expand

Commit Message

Jason Gunthorpe July 20, 2021, 5:42 p.m. UTC
Currently the driver ops have an open/release pair that is called once
each time a device FD is opened or closed. Add an additional set of
open/close_device() ops which are called when the device FD is opened for
the first time and closed for the last time.

An analysis shows that all of the drivers require this semantic. Some are
open coding it as part of their reflck implementation, and some are just
buggy and miss it completely.

To retain the current semantics PCI and FSL depend on, introduce the idea
of a "device set" which is a grouping of vfio_device's that share the same
lock around opening.

The device set is established by providing a 'set_id' pointer. All
vfio_device's that provide the same pointer will be joined to the same
singleton memory and lock across the whole set. This effectively replaces
the oddly named reflck.

After conversion the set_id will be sourced from:
 - A struct device from a fsl_mc_device (fsl)
 - A struct pci_slot (pci)
 - A struct pci_bus (pci)
 - The struct vfio_device (everything)

The design ensures that the above pointers are live as long as the
vfio_device is registered, so they form reliable unique keys to group
vfio_devices into sets.

This implementation uses xarray instead of searching through the driver
core structures, which simplifies the somewhat tricky locking in this
area.

Following patches convert all the drivers.

Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/vfio/mdev/vfio_mdev.c |  22 +++++
 drivers/vfio/vfio.c           | 146 +++++++++++++++++++++++++++++-----
 include/linux/mdev.h          |   2 +
 include/linux/vfio.h          |  19 +++++
 4 files changed, 167 insertions(+), 22 deletions(-)

Comments

Cornelia Huck July 22, 2021, 2:42 p.m. UTC | #1
On Tue, Jul 20 2021, Jason Gunthorpe <jgg@nvidia.com> wrote:

> Currently the driver ops have an open/release pair that is called once
> each time a device FD is opened or closed. Add an additional set of
> open/close_device() ops which are called when the device FD is opened for
> the first time and closed for the last time.
>
> An analysis shows that all of the drivers require this semantic. Some are
> open coding it as part of their reflck implementation, and some are just
> buggy and miss it completely.
>
> To retain the current semantics PCI and FSL depend on, introduce the idea
> of a "device set" which is a grouping of vfio_device's that share the same
> lock around opening.
>
> The device set is established by providing a 'set_id' pointer. All
> vfio_device's that provide the same pointer will be joined to the same
> singleton memory and lock across the whole set. This effectively replaces
> the oddly named reflck.
>
> After conversion the set_id will be sourced from:
>  - A struct device from a fsl_mc_device (fsl)
>  - A struct pci_slot (pci)
>  - A struct pci_bus (pci)
>  - The struct vfio_device (everything)
>
> The design ensures that the above pointers are live as long as the
> vfio_device is registered, so they form reliable unique keys to group
> vfio_devices into sets.
>
> This implementation uses xarray instead of searching through the driver
> core structures, which simplifies the somewhat tricky locking in this
> area.
>
> Following patches convert all the drivers.
>
> Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>  drivers/vfio/mdev/vfio_mdev.c |  22 +++++
>  drivers/vfio/vfio.c           | 146 +++++++++++++++++++++++++++++-----
>  include/linux/mdev.h          |   2 +
>  include/linux/vfio.h          |  19 +++++
>  4 files changed, 167 insertions(+), 22 deletions(-)

Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Christoph Hellwig July 23, 2021, 7:39 a.m. UTC | #2
> +int vfio_assign_device_set(struct vfio_device *device, void *set_id)
> +{
> +	struct vfio_device_set *alloc_dev_set = NULL;
> +	struct vfio_device_set *dev_set;
> +
> +	if (WARN_ON(!set_id))
> +		return -EINVAL;
> +
> +	/*
> +	 * Atomically acquire a singleton object in the xarray for this set_id
> +	 */
> +again:
> +	xa_lock(&vfio_device_set_xa);
> +	if (alloc_dev_set) {
> +		dev_set = __xa_cmpxchg(&vfio_device_set_xa,
> +				       (unsigned long)set_id, NULL,
> +				       alloc_dev_set, GFP_KERNEL);
> +		if (xa_is_err(dev_set)) {
> +			xa_unlock(&vfio_device_set_xa);
> +			kfree(alloc_dev_set);
> +			return xa_err(dev_set);
> +		}
> +		if (!dev_set)
> +			dev_set = alloc_dev_set;
> +	} else {
> +		dev_set = xa_load(&vfio_device_set_xa, (unsigned long)set_id);
> +	}
> +
> +	if (dev_set) {
> +		dev_set->device_count++;
> +		xa_unlock(&vfio_device_set_xa);
> +		device->dev_set = dev_set;
> +		if (dev_set != alloc_dev_set)
> +			kfree(alloc_dev_set);
> +		return 0;
> +	}
> +	xa_unlock(&vfio_device_set_xa);
> +
> +	if (WARN_ON(alloc_dev_set))
> +		return -EINVAL;
> +
> +	alloc_dev_set = kzalloc(sizeof(*alloc_dev_set), GFP_KERNEL);
> +	if (!alloc_dev_set)
> +		return -ENOMEM;
> +	mutex_init(&alloc_dev_set->lock);
> +	alloc_dev_set->set_id = set_id;
> +	goto again;
> +}
> +EXPORT_SYMBOL_GPL(vfio_assign_device_set);

This looks unessecarily complicated.  We can just try to load first
and then store it under the same lock, e.g.:

int vfio_assign_device_set(struct vfio_device *device, void *set_id)
{
	unsigned long idx = (unsigned long)set_id;
	struct vfio_device_set *set, *new;
	int err;

	if (WARN_ON(!set_id))
		return -EINVAL;

	xa_lock(&vfio_device_set_xa);
	set = xa_load(&vfio_device_set_xa, idx);
	if (set)
		goto found;
	xa_unlock(&vfio_device_set_xa);

	new = kzalloc(sizeof(*new), GFP_KERNEL);
	if (!new)
		return -ENOMEM;
	mutex_init(&new->lock);
	alloc_dev_set->set_id = set_id;

	xa_lock(&vfio_device_set_xa);
	set = xa_load(&vfio_device_set_xa, idx);
	if (set) {
		kfree(new);
		goto found;
	}
	err = xa_err(__xa_store(&vfio_device_set_xa, idx, new, GFP_KERNEL));
	xa_unlock(&vfio_device_set_xa);
	if (err)
		kfree(new);
	return err;

found:
	set->device_count++;
	xa_unlock(&vfio_device_set_xa);

	device->dev_set = set;
	return 0;
}

> +static void vfio_release_device_set(struct vfio_device *device)
> +{
> +	struct vfio_device_set *dev_set = device->dev_set;
> +
> +	if (!dev_set)
> +		return;
> +
> +	xa_lock(&vfio_device_set_xa);
> +	dev_set->device_count--;
> +	if (!dev_set->device_count) {

Nit, by I'd find

	if (!--dev_set->device_count) {

easier to follow as it clearly documents the dec_and_test pattern.
Jason Gunthorpe July 23, 2021, 2:38 p.m. UTC | #3
On Fri, Jul 23, 2021 at 09:39:14AM +0200, Christoph Hellwig wrote:

> This looks unessecarily complicated.  We can just try to load first
> and then store it under the same lock, e.g.:

Yes indeed, I went with this:

int vfio_assign_device_set(struct vfio_device *device, void *set_id)
{
	unsigned long idx = (unsigned long)set_id;
	struct vfio_device_set *new_dev_set;
	struct vfio_device_set *dev_set;

	if (WARN_ON(!set_id))
		return -EINVAL;

	/*
	 * Atomically acquire a singleton object in the xarray for this set_id
	 */
	xa_lock(&vfio_device_set_xa);
	dev_set = xa_load(&vfio_device_set_xa, idx);
	if (dev_set)
		goto found_get_ref;
	xa_unlock(&vfio_device_set_xa);

	new_dev_set = kzalloc(sizeof(*new_dev_set), GFP_KERNEL);
	if (!new_dev_set)
		return -ENOMEM;
	mutex_init(&new_dev_set->lock);
	new_dev_set->set_id = set_id;

	xa_lock(&vfio_device_set_xa);
	dev_set = __xa_cmpxchg(&vfio_device_set_xa, idx, NULL, new_dev_set,
			       GFP_KERNEL);
	if (!dev_set) {
		dev_set = new_dev_set;
		goto found_get_ref;
	}

	kfree(new_dev_set);
	if (xa_is_err(dev_set)) {
		xa_unlock(&vfio_device_set_xa);
		return xa_err(dev_set);
	}

found_get_ref:
	dev_set->device_count++;
	xa_unlock(&vfio_device_set_xa);
	device->dev_set = dev_set;
	return 0;
}

I'm also half inclined to delete the xa_load() since I think the
common case here is to need the allocate...

> 	xa_lock(&vfio_device_set_xa);
> 	set = xa_load(&vfio_device_set_xa, idx);
> 	if (set) {
> 		kfree(new);
> 		goto found;
> 	}
> 	err = xa_err(__xa_store(&vfio_device_set_xa, idx, new, GFP_KERNEL));

AIUI this is subtly racy:


  CPU1                               CPU2
xa_lock()
 xa_load() == NULL
 xa_store()
   __xas_nomem()
     xa_unlock()
                                 xa_lock()
                                  xa_load() == NULL
                                  xa_store()
                                   __xas_nomem()
                                    xa_unlock()
                                     kmem_cache_alloc()
      kmem_cache_alloc()
     xa_lock()
   [idx] = new1
xa_unlock()
                                    xa_lock()
                                   [idx] = new2    // Woops, lost new1!
                                 xa_unlock()

The hidden xa unlock is really tricky.

The __xa_cmpxchg is safe against this.

Jason
diff mbox series

Patch

diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index a5c77ccb24f70a..725cd2fe675190 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -17,6 +17,26 @@ 
 
 #include "mdev_private.h"
 
+static int vfio_mdev_open_device(struct vfio_device *core_vdev)
+{
+	struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
+	struct mdev_parent *parent = mdev->type->parent;
+
+	if (unlikely(!parent->ops->open_device))
+		return 0;
+
+	return parent->ops->open_device(mdev);
+}
+
+static void vfio_mdev_close_device(struct vfio_device *core_vdev)
+{
+	struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
+	struct mdev_parent *parent = mdev->type->parent;
+
+	if (likely(parent->ops->close_device))
+		parent->ops->close_device(mdev);
+}
+
 static int vfio_mdev_open(struct vfio_device *core_vdev)
 {
 	struct mdev_device *mdev = to_mdev_device(core_vdev->dev);
@@ -100,6 +120,8 @@  static void vfio_mdev_request(struct vfio_device *core_vdev, unsigned int count)
 
 static const struct vfio_device_ops vfio_mdev_dev_ops = {
 	.name		= "vfio-mdev",
+	.open_device	= vfio_mdev_open_device,
+	.close_device	= vfio_mdev_close_device,
 	.open		= vfio_mdev_open,
 	.release	= vfio_mdev_release,
 	.ioctl		= vfio_mdev_unlocked_ioctl,
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index cc375df0fd5dda..8572d943320214 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -96,6 +96,76 @@  module_param_named(enable_unsafe_noiommu_mode,
 MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode.  This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel.  If you do not know what this is for, step away. (default: false)");
 #endif
 
+static DEFINE_XARRAY(vfio_device_set_xa);
+
+int vfio_assign_device_set(struct vfio_device *device, void *set_id)
+{
+	struct vfio_device_set *alloc_dev_set = NULL;
+	struct vfio_device_set *dev_set;
+
+	if (WARN_ON(!set_id))
+		return -EINVAL;
+
+	/*
+	 * Atomically acquire a singleton object in the xarray for this set_id
+	 */
+again:
+	xa_lock(&vfio_device_set_xa);
+	if (alloc_dev_set) {
+		dev_set = __xa_cmpxchg(&vfio_device_set_xa,
+				       (unsigned long)set_id, NULL,
+				       alloc_dev_set, GFP_KERNEL);
+		if (xa_is_err(dev_set)) {
+			xa_unlock(&vfio_device_set_xa);
+			kfree(alloc_dev_set);
+			return xa_err(dev_set);
+		}
+		if (!dev_set)
+			dev_set = alloc_dev_set;
+	} else {
+		dev_set = xa_load(&vfio_device_set_xa, (unsigned long)set_id);
+	}
+
+	if (dev_set) {
+		dev_set->device_count++;
+		xa_unlock(&vfio_device_set_xa);
+		device->dev_set = dev_set;
+		if (dev_set != alloc_dev_set)
+			kfree(alloc_dev_set);
+		return 0;
+	}
+	xa_unlock(&vfio_device_set_xa);
+
+	if (WARN_ON(alloc_dev_set))
+		return -EINVAL;
+
+	alloc_dev_set = kzalloc(sizeof(*alloc_dev_set), GFP_KERNEL);
+	if (!alloc_dev_set)
+		return -ENOMEM;
+	mutex_init(&alloc_dev_set->lock);
+	alloc_dev_set->set_id = set_id;
+	goto again;
+}
+EXPORT_SYMBOL_GPL(vfio_assign_device_set);
+
+static void vfio_release_device_set(struct vfio_device *device)
+{
+	struct vfio_device_set *dev_set = device->dev_set;
+
+	if (!dev_set)
+		return;
+
+	xa_lock(&vfio_device_set_xa);
+	dev_set->device_count--;
+	if (!dev_set->device_count) {
+		__xa_erase(&vfio_device_set_xa,
+			   (unsigned long)dev_set->set_id);
+		mutex_destroy(&dev_set->lock);
+		kfree(dev_set);
+	}
+	xa_unlock(&vfio_device_set_xa);
+}
+
 /*
  * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
  * and remove functions, any use cases other than acquiring the first
@@ -751,6 +821,7 @@  EXPORT_SYMBOL_GPL(vfio_init_group_dev);
 
 void vfio_uninit_group_dev(struct vfio_device *device)
 {
+	vfio_release_device_set(device);
 }
 EXPORT_SYMBOL_GPL(vfio_uninit_group_dev);
 
@@ -760,6 +831,13 @@  int vfio_register_group_dev(struct vfio_device *device)
 	struct iommu_group *iommu_group;
 	struct vfio_group *group;
 
+	/*
+	 * If the driver doesn't specify a set then the device is added to a
+	 * singleton set just for itself.
+	 */
+	if (!device->dev_set)
+		vfio_assign_device_set(device, device);
+
 	iommu_group = iommu_group_get(device->dev);
 	if (!iommu_group)
 		return -EINVAL;
@@ -1361,7 +1439,8 @@  static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
 {
 	struct vfio_device *device;
 	struct file *filep;
-	int ret;
+	int fdno;
+	int ret = 0;
 
 	if (0 == atomic_read(&group->container_users) ||
 	    !group->container->iommu_driver || !vfio_group_viable(group))
@@ -1375,38 +1454,38 @@  static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
 		return PTR_ERR(device);
 
 	if (!try_module_get(device->dev->driver->owner)) {
-		vfio_device_put(device);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto err_device_put;
 	}
 
-	ret = device->ops->open(device);
-	if (ret) {
-		module_put(device->dev->driver->owner);
-		vfio_device_put(device);
-		return ret;
+	mutex_lock(&device->dev_set->lock);
+	device->open_count++;
+	if (device->open_count == 1 && device->ops->open_device) {
+		ret = device->ops->open_device(device);
+		if (ret)
+			goto err_undo_count;
+	}
+	mutex_unlock(&device->dev_set->lock);
+
+	if (device->ops->open) {
+		ret = device->ops->open(device);
+		if (ret)
+			goto err_close_device;
 	}
 
 	/*
 	 * We can't use anon_inode_getfd() because we need to modify
 	 * the f_mode flags directly to allow more than just ioctls
 	 */
-	ret = get_unused_fd_flags(O_CLOEXEC);
-	if (ret < 0) {
-		device->ops->release(device);
-		module_put(device->dev->driver->owner);
-		vfio_device_put(device);
-		return ret;
-	}
+	fdno = ret = get_unused_fd_flags(O_CLOEXEC);
+	if (ret < 0)
+		goto err_release;
 
 	filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
 				   device, O_RDWR);
 	if (IS_ERR(filep)) {
-		put_unused_fd(ret);
 		ret = PTR_ERR(filep);
-		device->ops->release(device);
-		module_put(device->dev->driver->owner);
-		vfio_device_put(device);
-		return ret;
+		goto err_fd;
 	}
 
 	/*
@@ -1418,12 +1497,28 @@  static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
 
 	atomic_inc(&group->container_users);
 
-	fd_install(ret, filep);
+	fd_install(fdno, filep);
 
 	if (group->noiommu)
 		dev_warn(device->dev, "vfio-noiommu device opened by user "
 			 "(%s:%d)\n", current->comm, task_pid_nr(current));
+	return fdno;
 
+err_fd:
+	put_unused_fd(fdno);
+err_release:
+	if (device->ops->release)
+		device->ops->release(device);
+err_close_device:
+	mutex_lock(&device->dev_set->lock);
+	if (device->open_count == 1 && device->ops->close_device)
+		device->ops->close_device(device);
+err_undo_count:
+	device->open_count--;
+	mutex_unlock(&device->dev_set->lock);
+	module_put(device->dev->driver->owner);
+err_device_put:
+	vfio_device_put(device);
 	return ret;
 }
 
@@ -1561,7 +1656,13 @@  static int vfio_device_fops_release(struct inode *inode, struct file *filep)
 {
 	struct vfio_device *device = filep->private_data;
 
-	device->ops->release(device);
+	if (device->ops->release)
+		device->ops->release(device);
+
+	mutex_lock(&device->dev_set->lock);
+	if (!--device->open_count && device->ops->close_device)
+		device->ops->close_device(device);
+	mutex_unlock(&device->dev_set->lock);
 
 	module_put(device->dev->driver->owner);
 
@@ -2364,6 +2465,7 @@  static void __exit vfio_cleanup(void)
 	class_destroy(vfio.class);
 	vfio.class = NULL;
 	misc_deregister(&vfio_dev);
+	xa_destroy(&vfio_device_set_xa);
 }
 
 module_init(vfio_init);
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index 3a38598c260559..cb5b7ed1d7c30d 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -111,6 +111,8 @@  struct mdev_parent_ops {
 
 	int     (*create)(struct mdev_device *mdev);
 	int     (*remove)(struct mdev_device *mdev);
+	int     (*open_device)(struct mdev_device *mdev);
+	void    (*close_device)(struct mdev_device *mdev);
 	int     (*open)(struct mdev_device *mdev);
 	void    (*release)(struct mdev_device *mdev);
 	ssize_t (*read)(struct mdev_device *mdev, char __user *buf,
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index b0875cf8e496bb..128b4db00adc57 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -15,13 +15,26 @@ 
 #include <linux/poll.h>
 #include <uapi/linux/vfio.h>
 
+/*
+ * VFIO devices can be placed in a set, this allows all devices to share this
+ * structure and the VFIO core will provide a lock that is held around
+ * open_device()/close_device() for all devices in the set.
+ */
+struct vfio_device_set {
+	void *set_id;
+	struct mutex lock;
+	unsigned int device_count;
+};
+
 struct vfio_device {
 	struct device *dev;
 	const struct vfio_device_ops *ops;
 	struct vfio_group *group;
+	struct vfio_device_set *dev_set;
 
 	/* Members below here are private, not for driver use */
 	refcount_t refcount;
+	unsigned int open_count;
 	struct completion comp;
 	struct list_head group_next;
 };
@@ -29,6 +42,8 @@  struct vfio_device {
 /**
  * struct vfio_device_ops - VFIO bus driver device callbacks
  *
+ * @open_device: Called when the first file descriptor is opened for this device
+ * @close_device: Opposite of open_device
  * @open: Called when userspace creates new file descriptor for device
  * @release: Called when userspace releases file descriptor for device
  * @read: Perform read(2) on device file descriptor
@@ -43,6 +58,8 @@  struct vfio_device {
  */
 struct vfio_device_ops {
 	char	*name;
+	int	(*open_device)(struct vfio_device *vdev);
+	void	(*close_device)(struct vfio_device *vdev);
 	int	(*open)(struct vfio_device *vdev);
 	void	(*release)(struct vfio_device *vdev);
 	ssize_t	(*read)(struct vfio_device *vdev, char __user *buf,
@@ -67,6 +84,8 @@  void vfio_unregister_group_dev(struct vfio_device *device);
 extern struct vfio_device *vfio_device_get_from_dev(struct device *dev);
 extern void vfio_device_put(struct vfio_device *device);
 
+int vfio_assign_device_set(struct vfio_device *device, void *set_id);
+
 /* events for the backend driver notify callback */
 enum vfio_iommu_notify_type {
 	VFIO_IOMMU_CONTAINER_CLOSE = 0,