@@ -536,6 +536,8 @@ struct iommufd_viommu {
struct iommufd_hwpt_paging *hwpt;
struct xarray vdev_ids;
+ const struct iommufd_viommu_ops *ops;
+
unsigned int type;
};
@@ -27,6 +27,7 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
struct iommufd_hwpt_paging *hwpt_paging;
struct iommufd_viommu *viommu;
struct iommufd_device *idev;
+ struct iommu_domain *domain;
int rc;
if (cmd->flags)
@@ -46,6 +47,7 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
rc = -EINVAL;
goto out_put_hwpt;
}
+ domain = hwpt_paging->common.domain;
if (cmd->type != IOMMU_VIOMMU_TYPE_DEFAULT) {
rc = -EOPNOTSUPP;
@@ -61,6 +63,7 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
viommu->type = cmd->type;
viommu->ictx = ucmd->ictx;
viommu->hwpt = hwpt_paging;
+ viommu->ops = domain->ops->default_viommu_ops;
viommu->iommu_dev = idev->dev->iommu->iommu_dev;
refcount_inc(&viommu->hwpt->common.obj.users);
@@ -680,6 +680,8 @@ struct iommu_ops {
* array->entry_num to report the number of handled
* invalidation requests. The driver data structure
* must be defined in include/uapi/linux/iommufd.h
+ * @default_viommu_ops: Driver can choose to use a default core-allocated core-
+ * managed viommu object by providing a default viommu ops.
* @iova_to_phys: translate iova to physical address
* @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
* including no-snoop TLPs on PCIe or other platform
@@ -712,6 +714,8 @@ struct iommu_domain_ops {
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
dma_addr_t iova);
+ const struct iommufd_viommu_ops *default_viommu_ops;
+
bool (*enforce_cache_coherency)(struct iommu_domain *domain);
int (*set_pgtable_quirks)(struct iommu_domain *domain,
unsigned long quirks);
@@ -17,6 +17,25 @@ struct iommufd_ctx;
struct iommufd_access;
struct file;
struct iommu_group;
+struct iommufd_viommu;
+struct iommu_user_data_array;
+
+/**
+ * struct iommufd_viommu_ops - viommu specific operations
+ * @cache_invalidate: Flush hardware cache used by a viommu. It can be used for
+ * any IOMMU hardware specific cache as long as a viommu has
+ * enough information to identify it: for example, a VMID or
+ * a vdev_id lookup table.
+ * The @array passes in the cache invalidation requests, in
+ * form of a driver data structure. A driver must update the
+ * array->entry_num to report the number of handled requests.
+ * The data structure of the array entry must be defined in
+ * include/uapi/linux/iommufd.h
+ */
+struct iommufd_viommu_ops {
+ int (*cache_invalidate)(struct iommufd_viommu *viommu,
+ struct iommu_user_data_array *array);
+};
struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id);
Add a default_viommu_ops with a new op for cache invaldiation, similar to the cache_invalidate_user op in structure iommu_domain_ops, but wider. An IOMMU driver that allocated a nested domain with a core-managed viommu is able to use the same viommu pointer for this cache invalidation API. ARM SMMUv3 for example supports IOTLB and ATC device cache invaldiations. The IOTLB invalidation is per-VMID, held currently by a parent S2 domain. The ATC invalidation is per device (Stream ID) that should be tranlsated by a virtual device ID lookup table. Either case fits the viommu context. Signed-off-by: Nicolin Chen <nicolinc@nvidia.com> --- drivers/iommu/iommufd/iommufd_private.h | 2 ++ drivers/iommu/iommufd/viommu.c | 3 +++ include/linux/iommu.h | 4 ++++ include/linux/iommufd.h | 19 +++++++++++++++++++ 4 files changed, 28 insertions(+)