@@ -386,28 +386,19 @@ void iommufd_device_detach(struct iommufd_device *idev)
{
struct iommufd_hw_pagetable *hwpt = idev->hwpt;
- mutex_lock(&hwpt->ioas->mutex);
mutex_lock(&hwpt->devices_lock);
list_del(&idev->devices_item);
- if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) {
- if (list_empty(&hwpt->devices)) {
- iopt_table_remove_domain(&hwpt->ioas->iopt,
- hwpt->domain);
- list_del(&hwpt->hwpt_item);
- }
+ if (!iommufd_hw_pagetable_has_group(hwpt, idev->group))
iommu_detach_group(hwpt->domain, idev->group);
- }
iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev);
+ idev->hwpt = NULL;
mutex_unlock(&hwpt->devices_lock);
- mutex_unlock(&hwpt->ioas->mutex);
if (hwpt->auto_domain)
iommufd_object_destroy_user(idev->ictx, &hwpt->obj);
else
refcount_dec(&hwpt->obj.users);
- idev->hwpt = NULL;
-
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
@@ -769,10 +760,6 @@ iommufd_device_selftest_attach(struct iommufd_ctx *ictx,
void iommufd_device_selftest_detach(struct iommufd_ctx *ictx,
struct iommufd_hw_pagetable *hwpt)
{
- mutex_lock(&hwpt->ioas->mutex);
- iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain);
- list_del(&hwpt->hwpt_item);
- mutex_unlock(&hwpt->ioas->mutex);
refcount_dec(&hwpt->obj.users);
}
#endif
@@ -13,7 +13,17 @@ void iommufd_hw_pagetable_destroy(struct iommufd_object *obj)
WARN_ON(!list_empty(&hwpt->devices));
- iommu_domain_free(hwpt->domain);
+ if (!list_empty(&hwpt->hwpt_item)) {
+ mutex_lock(&hwpt->ioas->mutex);
+ list_del(&hwpt->hwpt_item);
+ mutex_unlock(&hwpt->ioas->mutex);
+
+ iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain);
+ }
+
+ if (hwpt->domain)
+ iommu_domain_free(hwpt->domain);
+
refcount_dec(&hwpt->ioas->obj.users);
mutex_destroy(&hwpt->devices_lock);
}
@@ -37,21 +47,22 @@ iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
if (IS_ERR(hwpt))
return hwpt;
- hwpt->domain = iommu_domain_alloc(dev->bus);
- if (!hwpt->domain) {
- rc = -ENOMEM;
- goto out_abort;
- }
-
INIT_LIST_HEAD(&hwpt->devices);
INIT_LIST_HEAD(&hwpt->hwpt_item);
mutex_init(&hwpt->devices_lock);
/* Pairs with iommufd_hw_pagetable_destroy() */
refcount_inc(&ioas->obj.users);
hwpt->ioas = ioas;
+
+ hwpt->domain = iommu_domain_alloc(dev->bus);
+ if (!hwpt->domain) {
+ rc = -ENOMEM;
+ goto out_abort;
+ }
+
return hwpt;
out_abort:
- iommufd_object_abort(ictx, &hwpt->obj);
+ iommufd_object_abort_and_destroy(ictx, &hwpt->obj);
return ERR_PTR(rc);
}
A HWPT is permanently associated with an IOAS when it is created, remove the strange situation where a refcount != 0 HWPT can have been disconnected from the IOAS by putting all the IOAS related destruction in the object destroy function. Initializing a HWPT is two stages, we have to allocate it, attach it to a device and then populate the domain. Once the domain is populated it is fully linked to the IOAS. Arrange things so that all the error unwinds flow through the iommufd_hw_pagetable_destroy() and allow it to handle all cases. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/iommufd/device.c | 17 ++--------------- drivers/iommu/iommufd/hw_pagetable.c | 27 +++++++++++++++++++-------- 2 files changed, 21 insertions(+), 23 deletions(-)