diff mbox series

[v2,4/7] vfio/mdev: Pass in a struct vfio_device * to vfio_dma_rw()

Message ID 4-v2-6011bde8e0a1+5f-vfio_mdev_no_group_jgg@nvidia.com (mailing list archive)
State New, archived
Headers show
Series Make the rest of the VFIO driver interface use vfio_device | expand

Commit Message

Jason Gunthorpe April 21, 2022, 4:28 p.m. UTC
Every caller has a readily available vfio_device pointer, use that instead
of passing in a generic struct device. The struct vfio_device already
contains the group we need so this avoids complexity, extra refcountings,
and a confusing lifecycle model.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/gpu/drm/i915/gvt/gvt.h |  4 ++--
 drivers/vfio/vfio.c            | 24 +++++++++++-------------
 include/linux/vfio.h           |  2 +-
 3 files changed, 14 insertions(+), 16 deletions(-)

Comments

Tian, Kevin April 22, 2022, 1:50 a.m. UTC | #1
> From: Jason Gunthorpe <jgg@nvidia.com>
> Sent: Friday, April 22, 2022 12:29 AM
> 
> Every caller has a readily available vfio_device pointer, use that instead
> of passing in a generic struct device. The struct vfio_device already
> contains the group we need so this avoids complexity, extra refcountings,
> and a confusing lifecycle model.

Using the same description as last patch leaves the impression that
the two patches do the exactly same type of change. But this
patch actually includes one more change to grab a reference on the
container. This is worth an explanation.

> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

otherwise:

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

> ---
>  drivers/gpu/drm/i915/gvt/gvt.h |  4 ++--
>  drivers/vfio/vfio.c            | 24 +++++++++++-------------
>  include/linux/vfio.h           |  2 +-
>  3 files changed, 14 insertions(+), 16 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index 03ecffc2ba56a9..5a28ee965b7f3e 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -732,7 +732,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu
> *vgpu, unsigned long gpa,
>  {
>  	if (!vgpu->attached)
>  		return -ESRCH;
> -	return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
> +	return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
>  }
> 
>  /**
> @@ -750,7 +750,7 @@ static inline int intel_gvt_write_gpa(struct intel_vgpu
> *vgpu,
>  {
>  	if (!vgpu->attached)
>  		return -ESRCH;
> -	return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
> +	return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
>  }
> 
>  void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
> diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
> index 24b92a45cfc8f1..d10d20d393b706 100644
> --- a/drivers/vfio/vfio.c
> +++ b/drivers/vfio/vfio.c
> @@ -2323,32 +2323,28 @@ EXPORT_SYMBOL(vfio_group_unpin_pages);
>   * As the read/write of user space memory is conducted via the CPUs and is
>   * not a real device DMA, it is not necessary to pin the user space memory.
>   *
> - * The caller needs to call vfio_group_get_external_user() or
> - * vfio_group_get_external_user_from_dev() prior to calling this interface,
> - * so as to prevent the VFIO group from disposal in the middle of the call.
> - * But it can keep the reference to the VFIO group for several calls into
> - * this interface.
> - * After finishing using of the VFIO group, the caller needs to release the
> - * VFIO group by calling vfio_group_put_external_user().
> - *
> - * @group [in]		: VFIO group
> + * @vdev [in]		: VFIO device
>   * @user_iova [in]	: base IOVA of a user space buffer
>   * @data [in]		: pointer to kernel buffer
>   * @len [in]		: kernel buffer length
>   * @write		: indicate read or write
>   * Return error code on failure or 0 on success.
>   */
> -int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
> -		void *data, size_t len, bool write)
> +int vfio_dma_rw(struct vfio_device *vdev, dma_addr_t user_iova, void
> *data,
> +		size_t len, bool write)
>  {
>  	struct vfio_container *container;
>  	struct vfio_iommu_driver *driver;
>  	int ret = 0;
> 
> -	if (!group || !data || len <= 0)
> +	if (!data || len <= 0)
>  		return -EINVAL;
> 
> -	container = group->container;
> +	ret = vfio_group_add_container_user(vdev->group);
> +	if (ret)
> +		return ret;
> +
> +	container = vdev->group->container;
>  	driver = container->iommu_driver;
> 
>  	if (likely(driver && driver->ops->dma_rw))
> @@ -2357,6 +2353,8 @@ int vfio_dma_rw(struct vfio_group *group,
> dma_addr_t user_iova,
>  	else
>  		ret = -ENOTTY;
> 
> +	vfio_group_try_dissolve_container(vdev->group);
> +
>  	return ret;
>  }
>  EXPORT_SYMBOL(vfio_dma_rw);
> diff --git a/include/linux/vfio.h b/include/linux/vfio.h
> index 8f2a09801a660b..91d46e532ca104 100644
> --- a/include/linux/vfio.h
> +++ b/include/linux/vfio.h
> @@ -161,7 +161,7 @@ extern int vfio_group_pin_pages(struct vfio_group
> *group,
>  extern int vfio_group_unpin_pages(struct vfio_group *group,
>  				  unsigned long *user_iova_pfn, int npage);
> 
> -extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
> +extern int vfio_dma_rw(struct vfio_device *vdev, dma_addr_t user_iova,
>  		       void *data, size_t len, bool write);
> 
>  extern struct iommu_domain *vfio_group_iommu_domain(struct
> vfio_group *group);
> --
> 2.36.0
Jason Gunthorpe April 22, 2022, 5:59 p.m. UTC | #2
On Fri, Apr 22, 2022 at 01:50:00AM +0000, Tian, Kevin wrote:
> > From: Jason Gunthorpe <jgg@nvidia.com>
> > Sent: Friday, April 22, 2022 12:29 AM
> > 
> > Every caller has a readily available vfio_device pointer, use that instead
> > of passing in a generic struct device. The struct vfio_device already
> > contains the group we need so this avoids complexity, extra refcountings,
> > and a confusing lifecycle model.
> 
> Using the same description as last patch leaves the impression that
> the two patches do the exactly same type of change. But this
> patch actually includes one more change to grab a reference on the
> container. This is worth an explanation.

How about this:

Every caller has a readily available vfio_device pointer, use that instead
of passing in a generic struct device. Change vfio_dma_rw() to take in the
struct vfio_device and move the container users that would have been held
by vfio_group_get_external_user_from_dev() to vfio_dma_rw() directly, like
vfio_pin/unpin_pages().

Thanks,
Jason
Tian, Kevin April 24, 2022, 5:52 a.m. UTC | #3
> From: Jason Gunthorpe
> Sent: Saturday, April 23, 2022 2:00 AM
> 
> On Fri, Apr 22, 2022 at 01:50:00AM +0000, Tian, Kevin wrote:
> > > From: Jason Gunthorpe <jgg@nvidia.com>
> > > Sent: Friday, April 22, 2022 12:29 AM
> > >
> > > Every caller has a readily available vfio_device pointer, use that instead
> > > of passing in a generic struct device. The struct vfio_device already
> > > contains the group we need so this avoids complexity, extra refcountings,
> > > and a confusing lifecycle model.
> >
> > Using the same description as last patch leaves the impression that
> > the two patches do the exactly same type of change. But this
> > patch actually includes one more change to grab a reference on the
> > container. This is worth an explanation.
> 
> How about this:
> 
> Every caller has a readily available vfio_device pointer, use that instead
> of passing in a generic struct device. Change vfio_dma_rw() to take in the
> struct vfio_device and move the container users that would have been held
> by vfio_group_get_external_user_from_dev() to vfio_dma_rw() directly, like
> vfio_pin/unpin_pages().
> 

Yes, this is clearer.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 03ecffc2ba56a9..5a28ee965b7f3e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -732,7 +732,7 @@  static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
 {
 	if (!vgpu->attached)
 		return -ESRCH;
-	return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, false);
+	return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
 }
 
 /**
@@ -750,7 +750,7 @@  static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
 {
 	if (!vgpu->attached)
 		return -ESRCH;
-	return vfio_dma_rw(vgpu->vfio_group, gpa, buf, len, true);
+	return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
 }
 
 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 24b92a45cfc8f1..d10d20d393b706 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -2323,32 +2323,28 @@  EXPORT_SYMBOL(vfio_group_unpin_pages);
  * As the read/write of user space memory is conducted via the CPUs and is
  * not a real device DMA, it is not necessary to pin the user space memory.
  *
- * The caller needs to call vfio_group_get_external_user() or
- * vfio_group_get_external_user_from_dev() prior to calling this interface,
- * so as to prevent the VFIO group from disposal in the middle of the call.
- * But it can keep the reference to the VFIO group for several calls into
- * this interface.
- * After finishing using of the VFIO group, the caller needs to release the
- * VFIO group by calling vfio_group_put_external_user().
- *
- * @group [in]		: VFIO group
+ * @vdev [in]		: VFIO device
  * @user_iova [in]	: base IOVA of a user space buffer
  * @data [in]		: pointer to kernel buffer
  * @len [in]		: kernel buffer length
  * @write		: indicate read or write
  * Return error code on failure or 0 on success.
  */
-int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
-		void *data, size_t len, bool write)
+int vfio_dma_rw(struct vfio_device *vdev, dma_addr_t user_iova, void *data,
+		size_t len, bool write)
 {
 	struct vfio_container *container;
 	struct vfio_iommu_driver *driver;
 	int ret = 0;
 
-	if (!group || !data || len <= 0)
+	if (!data || len <= 0)
 		return -EINVAL;
 
-	container = group->container;
+	ret = vfio_group_add_container_user(vdev->group);
+	if (ret)
+		return ret;
+
+	container = vdev->group->container;
 	driver = container->iommu_driver;
 
 	if (likely(driver && driver->ops->dma_rw))
@@ -2357,6 +2353,8 @@  int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
 	else
 		ret = -ENOTTY;
 
+	vfio_group_try_dissolve_container(vdev->group);
+
 	return ret;
 }
 EXPORT_SYMBOL(vfio_dma_rw);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 8f2a09801a660b..91d46e532ca104 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -161,7 +161,7 @@  extern int vfio_group_pin_pages(struct vfio_group *group,
 extern int vfio_group_unpin_pages(struct vfio_group *group,
 				  unsigned long *user_iova_pfn, int npage);
 
-extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
+extern int vfio_dma_rw(struct vfio_device *vdev, dma_addr_t user_iova,
 		       void *data, size_t len, bool write);
 
 extern struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group);