diff mbox series

[05/13] vfio/fsl: Move to the device set infrastructure

Message ID 5-v1-eaf3ccbba33c+1add0-vfio_reflck_jgg@nvidia.com (mailing list archive)
State New, archived
Headers show
Series Provide core infrastructure for managing open/release | expand

Commit Message

Jason Gunthorpe July 15, 2021, 12:20 a.m. UTC
FSL uses the internal reflck to implement the open_device() functionality,
conversion to the core code is straightforward.

The decision on which set to be part of is trivially based on the
is_fsl_mc_bus_dprc() and we use a 'struct device *' pointer as the set_id.

It isn't entirely clear what the device set lock is actually protecting,
but I think it is related to the interrupt setup.

Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/vfio/fsl-mc/vfio_fsl_mc.c         | 152 ++++------------------
 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c    |   6 +-
 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h |   7 -
 3 files changed, 26 insertions(+), 139 deletions(-)

Comments

Diana Madalina Craciun July 20, 2021, 4:12 p.m. UTC | #1
On 7/15/2021 3:20 AM, Jason Gunthorpe wrote:
> FSL uses the internal reflck to implement the open_device() functionality,
> conversion to the core code is straightforward.
> 
> The decision on which set to be part of is trivially based on the
> is_fsl_mc_bus_dprc() and we use a 'struct device *' pointer as the set_id.
> 
> It isn't entirely clear what the device set lock is actually protecting,
> but I think it is related to the interrupt setup.

Yes, it is protecting the interrupts setup. The FSL MC devices are using 
MSIs and only the DPRC device is allocating the MSIs from the MSI 
domain. The other devices just take interrupts from a pool. The lock is 
protecting the access to this pool.

> 
> Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>   drivers/vfio/fsl-mc/vfio_fsl_mc.c         | 152 ++++------------------
>   drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c    |   6 +-
>   drivers/vfio/fsl-mc/vfio_fsl_mc_private.h |   7 -
>   3 files changed, 26 insertions(+), 139 deletions(-)
> 
> diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
> index 3d2be06e1bc146..49b93de05d5d62 100644
> --- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
> +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
> @@ -19,81 +19,10 @@
>   
>   static struct fsl_mc_driver vfio_fsl_mc_driver;
>   
> -static DEFINE_MUTEX(reflck_lock);
> -
> -static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
> -{
> -	kref_get(&reflck->kref);
> -}
> -
> -static void vfio_fsl_mc_reflck_release(struct kref *kref)
> -{
> -	struct vfio_fsl_mc_reflck *reflck = container_of(kref,
> -						      struct vfio_fsl_mc_reflck,
> -						      kref);
> -
> -	mutex_destroy(&reflck->lock);
> -	kfree(reflck);
> -	mutex_unlock(&reflck_lock);
> -}
> -
> -static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
> -{
> -	kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
> -}
> -
> -static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
> -{
> -	struct vfio_fsl_mc_reflck *reflck;
> -
> -	reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
> -	if (!reflck)
> -		return ERR_PTR(-ENOMEM);
> -
> -	kref_init(&reflck->kref);
> -	mutex_init(&reflck->lock);
> -
> -	return reflck;
> -}
> -
> -static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
> -{
> -	int ret = 0;
> -
> -	mutex_lock(&reflck_lock);
> -	if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
> -		vdev->reflck = vfio_fsl_mc_reflck_alloc();
> -		ret = PTR_ERR_OR_ZERO(vdev->reflck);
> -	} else {
> -		struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
> -		struct vfio_device *device;
> -		struct vfio_fsl_mc_device *cont_vdev;
> -
> -		device = vfio_device_get_from_dev(mc_cont_dev);
> -		if (!device) {
> -			ret = -ENODEV;
> -			goto unlock;
> -		}
> -
> -		cont_vdev =
> -			container_of(device, struct vfio_fsl_mc_device, vdev);
> -		if (!cont_vdev || !cont_vdev->reflck) {
> -			vfio_device_put(device);
> -			ret = -ENODEV;
> -			goto unlock;
> -		}
> -		vfio_fsl_mc_reflck_get(cont_vdev->reflck);
> -		vdev->reflck = cont_vdev->reflck;
> -		vfio_device_put(device);
> -	}
> -
> -unlock:
> -	mutex_unlock(&reflck_lock);
> -	return ret;
> -}
> -
> -static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
> +static int vfio_fsl_mc_open_device(struct vfio_device *core_vdev)
>   {
> +	struct vfio_fsl_mc_device *vdev =
> +		container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
>   	struct fsl_mc_device *mc_dev = vdev->mc_dev;
>   	int count = mc_dev->obj_desc.region_count;
>   	int i;
> @@ -136,58 +65,30 @@ static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
>   	kfree(vdev->regions);
>   }
>   
> -static int vfio_fsl_mc_open(struct vfio_device *core_vdev)
> -{
> -	struct vfio_fsl_mc_device *vdev =
> -		container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
> -	int ret = 0;
> -
> -	mutex_lock(&vdev->reflck->lock);
> -	if (!vdev->refcnt) {
> -		ret = vfio_fsl_mc_regions_init(vdev);
> -		if (ret)
> -			goto out;
> -	}
> -	vdev->refcnt++;
> -out:
> -	mutex_unlock(&vdev->reflck->lock);
>   
> -	return ret;
> -}
> -
> -static void vfio_fsl_mc_release(struct vfio_device *core_vdev)
> +static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
>   {
>   	struct vfio_fsl_mc_device *vdev =
>   		container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
> +	struct fsl_mc_device *mc_dev = vdev->mc_dev;
> +	struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
> +	struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
>   	int ret;
>   
> -	mutex_lock(&vdev->reflck->lock);
> +	vfio_fsl_mc_regions_cleanup(vdev);
>   
> -	if (!(--vdev->refcnt)) {
> -		struct fsl_mc_device *mc_dev = vdev->mc_dev;
> -		struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
> -		struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
> -
> -		vfio_fsl_mc_regions_cleanup(vdev);
> +	/* reset the device before cleaning up the interrupts */
> +	ret = dprc_reset_container(mc_cont->mc_io, 0, mc_cont->mc_handle,
> +				   mc_cont->obj_desc.id,
> +				   DPRC_RESET_OPTION_NON_RECURSIVE);
>   
> -		/* reset the device before cleaning up the interrupts */
> -		ret = dprc_reset_container(mc_cont->mc_io, 0,
> -		      mc_cont->mc_handle,
> -			  mc_cont->obj_desc.id,
> -			  DPRC_RESET_OPTION_NON_RECURSIVE);
> +	if (WARN_ON(ret))
> +		dev_warn(&mc_cont->dev,
> +			 "VFIO_FLS_MC: reset device has failed (%d)\n", ret);
>   
> -		if (ret) {
> -			dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
> -				 ret);
> -			WARN_ON(1);
> -		}
> +	vfio_fsl_mc_irqs_cleanup(vdev);
>   
> -		vfio_fsl_mc_irqs_cleanup(vdev);
> -
> -		fsl_mc_cleanup_irq_pool(mc_cont);

There is also a need for the lock here. Eventhough the close function is 
called only once, there might be a race between the devices in the set. 
The lock is protecting the pool of interrupts and releasing interrupts 
to the pool might generate races if not protected:

--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -86,9 +86,12 @@ static void vfio_fsl_mc_close_device(struct 
vfio_device *core_vdev)
                 dev_warn(&mc_cont->dev,
                          "VFIO_FLS_MC: reset device has failed (%d)\n", 
ret);

+       mutex_lock(&vdev->vdev.dev_set->lock);
         vfio_fsl_mc_irqs_cleanup(vdev);

         fsl_mc_cleanup_irq_pool(mc_cont);
+       mutex_unlock(&vdev->vdev.dev_set->lock);
+
  }


> -	}
> -
> -	mutex_unlock(&vdev->reflck->lock);
> +	fsl_mc_cleanup_irq_pool(mc_cont);
>   }
>   
>   static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev,
> @@ -504,8 +405,8 @@ static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev,
>   
>   static const struct vfio_device_ops vfio_fsl_mc_ops = {
>   	.name		= "vfio-fsl-mc",
> -	.open		= vfio_fsl_mc_open,
> -	.release	= vfio_fsl_mc_release,
> +	.open_device	= vfio_fsl_mc_open_device,
> +	.close_device	= vfio_fsl_mc_close_device,
>   	.ioctl		= vfio_fsl_mc_ioctl,
>   	.read		= vfio_fsl_mc_read,
>   	.write		= vfio_fsl_mc_write,
> @@ -625,13 +526,15 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
>   	vdev->mc_dev = mc_dev;
>   	mutex_init(&vdev->igate);
>   
> -	ret = vfio_fsl_mc_reflck_attach(vdev);
> +	ret = vfio_assign_device_set(&vdev->vdev, is_fsl_mc_bus_dprc(mc_dev) ?
> +							  &mc_dev->dev :
> +							  mc_dev->dev.parent);
>   	if (ret)
>   		goto out_uninit;
>   
>   	ret = vfio_fsl_mc_init_device(vdev);
>   	if (ret)
> -		goto out_reflck;
> +		goto out_uninit;
>   
>   	ret = vfio_register_group_dev(&vdev->vdev);
>   	if (ret) {
> @@ -639,12 +542,6 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
>   		goto out_device;
>   	}
>   
> -	/*
> -	 * This triggers recursion into vfio_fsl_mc_probe() on another device
> -	 * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
> -	 * vfio_add_group_dev() above. It has no impact on this vdev, so it is
> -	 * safe to be after the vfio device is made live.
> -	 */
>   	ret = vfio_fsl_mc_scan_container(mc_dev);
>   	if (ret)
>   		goto out_group_dev;
> @@ -655,8 +552,6 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
>   	vfio_unregister_group_dev(&vdev->vdev);
>   out_device:
>   	vfio_fsl_uninit_device(vdev);
> -out_reflck:
> -	vfio_fsl_mc_reflck_put(vdev->reflck);
>   out_uninit:
>   	vfio_uninit_group_dev(&vdev->vdev);
>   	kfree(vdev);
> @@ -676,7 +571,6 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
>   	dprc_remove_devices(mc_dev, NULL, 0);
>   	vfio_fsl_uninit_device(vdev);
>   	vfio_uninit_group_dev(&vdev->vdev);
> -	vfio_fsl_mc_reflck_put(vdev->reflck);
>   
>   	kfree(vdev);
>   	vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
> diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
> index 0d9f3002df7f51..77e584093a233d 100644
> --- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
> +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
> @@ -120,7 +120,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
>   	if (start != 0 || count != 1)
>   		return -EINVAL;
>   
> -	mutex_lock(&vdev->reflck->lock);
> +	mutex_lock(&vdev->vdev.dev_set->lock);
>   	ret = fsl_mc_populate_irq_pool(mc_cont,
>   			FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
>   	if (ret)
> @@ -129,7 +129,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
>   	ret = vfio_fsl_mc_irqs_allocate(vdev);
>   	if (ret)
>   		goto unlock;
> -	mutex_unlock(&vdev->reflck->lock);
> +	mutex_unlock(&vdev->vdev.dev_set->lock);
>   
>   	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
>   		s32 fd = *(s32 *)data;
> @@ -154,7 +154,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
>   	return 0;
>   
>   unlock:
> -	mutex_unlock(&vdev->reflck->lock);
> +	mutex_unlock(&vdev->vdev.dev_set->lock);
>   	return ret;
>   
>   }
> diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
> index 89700e00e77d10..4ad63ececb914b 100644
> --- a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
> +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
> @@ -22,11 +22,6 @@ struct vfio_fsl_mc_irq {
>   	char            *name;
>   };
>   
> -struct vfio_fsl_mc_reflck {
> -	struct kref		kref;
> -	struct mutex		lock;
> -};
> -
>   struct vfio_fsl_mc_region {
>   	u32			flags;
>   	u32			type;
> @@ -39,9 +34,7 @@ struct vfio_fsl_mc_device {
>   	struct vfio_device		vdev;
>   	struct fsl_mc_device		*mc_dev;
>   	struct notifier_block        nb;
> -	int				refcnt;
>   	struct vfio_fsl_mc_region	*regions;
> -	struct vfio_fsl_mc_reflck   *reflck;
>   	struct mutex         igate;
>   	struct vfio_fsl_mc_irq      *mc_irqs;
>   };
> 

Diana
Jason Gunthorpe July 20, 2021, 4:17 p.m. UTC | #2
On Tue, Jul 20, 2021 at 07:12:26PM +0300, Diana Craciun OSS wrote:
> On 7/15/2021 3:20 AM, Jason Gunthorpe wrote:
> > FSL uses the internal reflck to implement the open_device() functionality,
> > conversion to the core code is straightforward.
> > 
> > The decision on which set to be part of is trivially based on the
> > is_fsl_mc_bus_dprc() and we use a 'struct device *' pointer as the set_id.
> > 
> > It isn't entirely clear what the device set lock is actually protecting,
> > but I think it is related to the interrupt setup.
> 
> Yes, it is protecting the interrupts setup. The FSL MC devices are using
> MSIs and only the DPRC device is allocating the MSIs from the MSI domain.
> The other devices just take interrupts from a pool. The lock is protecting
> the access to this pool.

It would be much clearer if the lock was near the data it was
protecting, the DPRC pool seems in an entirely different layer..

> > -static void vfio_fsl_mc_release(struct vfio_device *core_vdev)
> > +static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
> >   {
> >   	struct vfio_fsl_mc_device *vdev =
> >   		container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
> > +	struct fsl_mc_device *mc_dev = vdev->mc_dev;
> > +	struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
> > +	struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
> >   	int ret;
> > -	mutex_lock(&vdev->reflck->lock);
> > +	vfio_fsl_mc_regions_cleanup(vdev);
> > -	if (!(--vdev->refcnt)) {
> > -		struct fsl_mc_device *mc_dev = vdev->mc_dev;
> > -		struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
> > -		struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
> > -
> > -		vfio_fsl_mc_regions_cleanup(vdev);
> > +	/* reset the device before cleaning up the interrupts */
> > +	ret = dprc_reset_container(mc_cont->mc_io, 0, mc_cont->mc_handle,
> > +				   mc_cont->obj_desc.id,
> > +				   DPRC_RESET_OPTION_NON_RECURSIVE);
> > -		/* reset the device before cleaning up the interrupts */
> > -		ret = dprc_reset_container(mc_cont->mc_io, 0,
> > -		      mc_cont->mc_handle,
> > -			  mc_cont->obj_desc.id,
> > -			  DPRC_RESET_OPTION_NON_RECURSIVE);
> > +	if (WARN_ON(ret))
> > +		dev_warn(&mc_cont->dev,
> > +			 "VFIO_FLS_MC: reset device has failed (%d)\n", ret);
> > -		if (ret) {
> > -			dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
> > -				 ret);
> > -			WARN_ON(1);
> > -		}
> > +	vfio_fsl_mc_irqs_cleanup(vdev);
> > -		vfio_fsl_mc_irqs_cleanup(vdev);
> > -
> > -		fsl_mc_cleanup_irq_pool(mc_cont);
> 
> There is also a need for the lock here. Eventhough the close function is
> called only once, there might be a race between the devices in the
> set. 

vfio_fsl_mc_close_device() is already called under this lock:

	mutex_lock(&device->dev_set->lock);
	if (!--device->open_count && device->ops->close_device)
		device->ops->close_device(device);
	mutex_unlock(&device->dev_set->lock);

Thanks,
Jason
Diana Madalina Craciun July 20, 2021, 4:23 p.m. UTC | #3
On 7/20/2021 7:17 PM, Jason Gunthorpe wrote:
> On Tue, Jul 20, 2021 at 07:12:26PM +0300, Diana Craciun OSS wrote:
>> On 7/15/2021 3:20 AM, Jason Gunthorpe wrote:
>>> FSL uses the internal reflck to implement the open_device() functionality,
>>> conversion to the core code is straightforward.
>>>
>>> The decision on which set to be part of is trivially based on the
>>> is_fsl_mc_bus_dprc() and we use a 'struct device *' pointer as the set_id.
>>>
>>> It isn't entirely clear what the device set lock is actually protecting,
>>> but I think it is related to the interrupt setup.
>>
>> Yes, it is protecting the interrupts setup. The FSL MC devices are using
>> MSIs and only the DPRC device is allocating the MSIs from the MSI domain.
>> The other devices just take interrupts from a pool. The lock is protecting
>> the access to this pool.
> 
> It would be much clearer if the lock was near the data it was
> protecting, the DPRC pool seems in an entirely different layer..

Yes, I agree. I will think about of a more clearer design for a future 
improvement.

> 
>>> -static void vfio_fsl_mc_release(struct vfio_device *core_vdev)
>>> +static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
>>>    {
>>>    	struct vfio_fsl_mc_device *vdev =
>>>    		container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
>>> +	struct fsl_mc_device *mc_dev = vdev->mc_dev;
>>> +	struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
>>> +	struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
>>>    	int ret;
>>> -	mutex_lock(&vdev->reflck->lock);
>>> +	vfio_fsl_mc_regions_cleanup(vdev);
>>> -	if (!(--vdev->refcnt)) {
>>> -		struct fsl_mc_device *mc_dev = vdev->mc_dev;
>>> -		struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
>>> -		struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
>>> -
>>> -		vfio_fsl_mc_regions_cleanup(vdev);
>>> +	/* reset the device before cleaning up the interrupts */
>>> +	ret = dprc_reset_container(mc_cont->mc_io, 0, mc_cont->mc_handle,
>>> +				   mc_cont->obj_desc.id,
>>> +				   DPRC_RESET_OPTION_NON_RECURSIVE);
>>> -		/* reset the device before cleaning up the interrupts */
>>> -		ret = dprc_reset_container(mc_cont->mc_io, 0,
>>> -		      mc_cont->mc_handle,
>>> -			  mc_cont->obj_desc.id,
>>> -			  DPRC_RESET_OPTION_NON_RECURSIVE);
>>> +	if (WARN_ON(ret))
>>> +		dev_warn(&mc_cont->dev,
>>> +			 "VFIO_FLS_MC: reset device has failed (%d)\n", ret);
>>> -		if (ret) {
>>> -			dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
>>> -				 ret);
>>> -			WARN_ON(1);
>>> -		}
>>> +	vfio_fsl_mc_irqs_cleanup(vdev);
>>> -		vfio_fsl_mc_irqs_cleanup(vdev);
>>> -
>>> -		fsl_mc_cleanup_irq_pool(mc_cont);
>>
>> There is also a need for the lock here. Eventhough the close function is
>> called only once, there might be a race between the devices in the
>> set.
> 
> vfio_fsl_mc_close_device() is already called under this lock:
> 
> 	mutex_lock(&device->dev_set->lock);
> 	if (!--device->open_count && device->ops->close_device)
> 		device->ops->close_device(device);
> 	mutex_unlock(&device->dev_set->lock);
> 

OK, I missed that.

> Thanks,
> Jason
> 

I have tested the changes and everything works as expected.

Thanks,
Diana
Jason Gunthorpe July 20, 2021, 4:25 p.m. UTC | #4
On Tue, Jul 20, 2021 at 07:23:35PM +0300, Diana Craciun OSS wrote:
> I have tested the changes and everything works as expected.

Great, thanks, I added a Tested-by for you

Jason
diff mbox series

Patch

diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index 3d2be06e1bc146..49b93de05d5d62 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -19,81 +19,10 @@ 
 
 static struct fsl_mc_driver vfio_fsl_mc_driver;
 
-static DEFINE_MUTEX(reflck_lock);
-
-static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
-{
-	kref_get(&reflck->kref);
-}
-
-static void vfio_fsl_mc_reflck_release(struct kref *kref)
-{
-	struct vfio_fsl_mc_reflck *reflck = container_of(kref,
-						      struct vfio_fsl_mc_reflck,
-						      kref);
-
-	mutex_destroy(&reflck->lock);
-	kfree(reflck);
-	mutex_unlock(&reflck_lock);
-}
-
-static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
-{
-	kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
-}
-
-static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
-{
-	struct vfio_fsl_mc_reflck *reflck;
-
-	reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
-	if (!reflck)
-		return ERR_PTR(-ENOMEM);
-
-	kref_init(&reflck->kref);
-	mutex_init(&reflck->lock);
-
-	return reflck;
-}
-
-static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
-{
-	int ret = 0;
-
-	mutex_lock(&reflck_lock);
-	if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
-		vdev->reflck = vfio_fsl_mc_reflck_alloc();
-		ret = PTR_ERR_OR_ZERO(vdev->reflck);
-	} else {
-		struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
-		struct vfio_device *device;
-		struct vfio_fsl_mc_device *cont_vdev;
-
-		device = vfio_device_get_from_dev(mc_cont_dev);
-		if (!device) {
-			ret = -ENODEV;
-			goto unlock;
-		}
-
-		cont_vdev =
-			container_of(device, struct vfio_fsl_mc_device, vdev);
-		if (!cont_vdev || !cont_vdev->reflck) {
-			vfio_device_put(device);
-			ret = -ENODEV;
-			goto unlock;
-		}
-		vfio_fsl_mc_reflck_get(cont_vdev->reflck);
-		vdev->reflck = cont_vdev->reflck;
-		vfio_device_put(device);
-	}
-
-unlock:
-	mutex_unlock(&reflck_lock);
-	return ret;
-}
-
-static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
+static int vfio_fsl_mc_open_device(struct vfio_device *core_vdev)
 {
+	struct vfio_fsl_mc_device *vdev =
+		container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
 	struct fsl_mc_device *mc_dev = vdev->mc_dev;
 	int count = mc_dev->obj_desc.region_count;
 	int i;
@@ -136,58 +65,30 @@  static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
 	kfree(vdev->regions);
 }
 
-static int vfio_fsl_mc_open(struct vfio_device *core_vdev)
-{
-	struct vfio_fsl_mc_device *vdev =
-		container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
-	int ret = 0;
-
-	mutex_lock(&vdev->reflck->lock);
-	if (!vdev->refcnt) {
-		ret = vfio_fsl_mc_regions_init(vdev);
-		if (ret)
-			goto out;
-	}
-	vdev->refcnt++;
-out:
-	mutex_unlock(&vdev->reflck->lock);
 
-	return ret;
-}
-
-static void vfio_fsl_mc_release(struct vfio_device *core_vdev)
+static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
 {
 	struct vfio_fsl_mc_device *vdev =
 		container_of(core_vdev, struct vfio_fsl_mc_device, vdev);
+	struct fsl_mc_device *mc_dev = vdev->mc_dev;
+	struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
+	struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
 	int ret;
 
-	mutex_lock(&vdev->reflck->lock);
+	vfio_fsl_mc_regions_cleanup(vdev);
 
-	if (!(--vdev->refcnt)) {
-		struct fsl_mc_device *mc_dev = vdev->mc_dev;
-		struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
-		struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
-
-		vfio_fsl_mc_regions_cleanup(vdev);
+	/* reset the device before cleaning up the interrupts */
+	ret = dprc_reset_container(mc_cont->mc_io, 0, mc_cont->mc_handle,
+				   mc_cont->obj_desc.id,
+				   DPRC_RESET_OPTION_NON_RECURSIVE);
 
-		/* reset the device before cleaning up the interrupts */
-		ret = dprc_reset_container(mc_cont->mc_io, 0,
-		      mc_cont->mc_handle,
-			  mc_cont->obj_desc.id,
-			  DPRC_RESET_OPTION_NON_RECURSIVE);
+	if (WARN_ON(ret))
+		dev_warn(&mc_cont->dev,
+			 "VFIO_FLS_MC: reset device has failed (%d)\n", ret);
 
-		if (ret) {
-			dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
-				 ret);
-			WARN_ON(1);
-		}
+	vfio_fsl_mc_irqs_cleanup(vdev);
 
-		vfio_fsl_mc_irqs_cleanup(vdev);
-
-		fsl_mc_cleanup_irq_pool(mc_cont);
-	}
-
-	mutex_unlock(&vdev->reflck->lock);
+	fsl_mc_cleanup_irq_pool(mc_cont);
 }
 
 static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev,
@@ -504,8 +405,8 @@  static int vfio_fsl_mc_mmap(struct vfio_device *core_vdev,
 
 static const struct vfio_device_ops vfio_fsl_mc_ops = {
 	.name		= "vfio-fsl-mc",
-	.open		= vfio_fsl_mc_open,
-	.release	= vfio_fsl_mc_release,
+	.open_device	= vfio_fsl_mc_open_device,
+	.close_device	= vfio_fsl_mc_close_device,
 	.ioctl		= vfio_fsl_mc_ioctl,
 	.read		= vfio_fsl_mc_read,
 	.write		= vfio_fsl_mc_write,
@@ -625,13 +526,15 @@  static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
 	vdev->mc_dev = mc_dev;
 	mutex_init(&vdev->igate);
 
-	ret = vfio_fsl_mc_reflck_attach(vdev);
+	ret = vfio_assign_device_set(&vdev->vdev, is_fsl_mc_bus_dprc(mc_dev) ?
+							  &mc_dev->dev :
+							  mc_dev->dev.parent);
 	if (ret)
 		goto out_uninit;
 
 	ret = vfio_fsl_mc_init_device(vdev);
 	if (ret)
-		goto out_reflck;
+		goto out_uninit;
 
 	ret = vfio_register_group_dev(&vdev->vdev);
 	if (ret) {
@@ -639,12 +542,6 @@  static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
 		goto out_device;
 	}
 
-	/*
-	 * This triggers recursion into vfio_fsl_mc_probe() on another device
-	 * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
-	 * vfio_add_group_dev() above. It has no impact on this vdev, so it is
-	 * safe to be after the vfio device is made live.
-	 */
 	ret = vfio_fsl_mc_scan_container(mc_dev);
 	if (ret)
 		goto out_group_dev;
@@ -655,8 +552,6 @@  static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
 	vfio_unregister_group_dev(&vdev->vdev);
 out_device:
 	vfio_fsl_uninit_device(vdev);
-out_reflck:
-	vfio_fsl_mc_reflck_put(vdev->reflck);
 out_uninit:
 	vfio_uninit_group_dev(&vdev->vdev);
 	kfree(vdev);
@@ -676,7 +571,6 @@  static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
 	dprc_remove_devices(mc_dev, NULL, 0);
 	vfio_fsl_uninit_device(vdev);
 	vfio_uninit_group_dev(&vdev->vdev);
-	vfio_fsl_mc_reflck_put(vdev->reflck);
 
 	kfree(vdev);
 	vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index 0d9f3002df7f51..77e584093a233d 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -120,7 +120,7 @@  static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
 	if (start != 0 || count != 1)
 		return -EINVAL;
 
-	mutex_lock(&vdev->reflck->lock);
+	mutex_lock(&vdev->vdev.dev_set->lock);
 	ret = fsl_mc_populate_irq_pool(mc_cont,
 			FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
 	if (ret)
@@ -129,7 +129,7 @@  static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
 	ret = vfio_fsl_mc_irqs_allocate(vdev);
 	if (ret)
 		goto unlock;
-	mutex_unlock(&vdev->reflck->lock);
+	mutex_unlock(&vdev->vdev.dev_set->lock);
 
 	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
 		s32 fd = *(s32 *)data;
@@ -154,7 +154,7 @@  static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
 	return 0;
 
 unlock:
-	mutex_unlock(&vdev->reflck->lock);
+	mutex_unlock(&vdev->vdev.dev_set->lock);
 	return ret;
 
 }
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
index 89700e00e77d10..4ad63ececb914b 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
@@ -22,11 +22,6 @@  struct vfio_fsl_mc_irq {
 	char            *name;
 };
 
-struct vfio_fsl_mc_reflck {
-	struct kref		kref;
-	struct mutex		lock;
-};
-
 struct vfio_fsl_mc_region {
 	u32			flags;
 	u32			type;
@@ -39,9 +34,7 @@  struct vfio_fsl_mc_device {
 	struct vfio_device		vdev;
 	struct fsl_mc_device		*mc_dev;
 	struct notifier_block        nb;
-	int				refcnt;
 	struct vfio_fsl_mc_region	*regions;
-	struct vfio_fsl_mc_reflck   *reflck;
 	struct mutex         igate;
 	struct vfio_fsl_mc_irq      *mc_irqs;
 };