@@ -1571,6 +1571,7 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
struct iommu_probe_info pinf = {
.dev = dev,
.is_dma_configure = true,
+ .is_acpi = true,
};
/* Serialise to make dev->iommu stable under our potential fwspec */
@@ -11,6 +11,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_DART) += io-pgtable-dart.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
+obj-$(CONFIG_ACPI_VIOT) += viot_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
new file mode 100644
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
+ */
+#include <linux/acpi_viot.h>
+#include <linux/iommu.h>
+#include <linux/iommu-driver.h>
+
+struct parse_info {
+ struct iommu_probe_info *pinf;
+ const struct iommu_ops *ops;
+ u32 *ids;
+};
+
+static int parse_single_iommu(struct viot_iommu *viommu, u32 epid, void *_info)
+{
+ struct fwnode_handle *fwnode = viommu->fwnode;
+ struct parse_info *info = _info;
+ struct iommu_probe_info *pinf = info->pinf;
+ struct iommu_device *iommu;
+
+ /* We're not translating ourself */
+ if (device_match_fwnode(pinf->dev, fwnode))
+ return -ENODEV;
+
+ iommu = iommu_device_from_fwnode_pinf(pinf, info->ops, fwnode);
+ if (IS_ERR(iommu)) {
+ if (!IS_ENABLED(CONFIG_VIRTIO_IOMMU) &&
+ iommu == ERR_PTR(-EPROBE_DEFER))
+ return -ENODEV;
+ return PTR_ERR(iommu);
+ }
+ iommu_fw_cache_id(pinf, epid);
+ return 0;
+}
+
+static int parse_read_ids(struct viot_iommu *viommu, u32 epid, void *_info)
+{
+ struct parse_info *info = _info;
+
+ *info->ids = epid;
+ (*info->ids)++;
+ return 0;
+}
+
+static int viot_get_u32_ids(struct iommu_probe_info *pinf, u32 *ids)
+{
+ struct parse_info info = { .pinf = pinf, .ids = ids };
+
+ return viot_iommu_for_each_id(pinf->dev, parse_read_ids, &info);
+}
+
+struct iommu_device *
+__iommu_viot_get_single_iommu(struct iommu_probe_info *pinf,
+ const struct iommu_ops *ops)
+{
+ struct parse_info info = { .pinf = pinf, .ops = ops };
+ int err;
+
+ if (!pinf->is_dma_configure || !pinf->is_acpi)
+ return ERR_PTR(-ENODEV);
+
+ iommu_fw_clear_cache(pinf);
+ err = viot_iommu_for_each_id(pinf->dev, parse_single_iommu, &info);
+ if (err)
+ return ERR_PTR(err);
+ pinf->get_u32_ids = viot_get_u32_ids;
+ return iommu_fw_finish_get_single(pinf);
+}
+EXPORT_SYMBOL(__iommu_viot_get_single_iommu);
@@ -45,6 +45,7 @@ struct iommu_probe_info {
u32 cached_ids[8];
bool defer_setup : 1;
bool is_dma_configure : 1;
+ bool is_acpi : 1;
bool cached_single_iommu : 1;
};
@@ -188,4 +189,28 @@ static inline int iommu_dummy_of_xlate(struct device *dev,
return 0;
}
+#define __iommu_first(a, b) \
+ ({ \
+ struct iommu_device *a_dev = a; \
+ a_dev != ERR_PTR(-ENODEV) ? a_dev : (b); \
+ })
+
+#if IS_ENABLED(CONFIG_ACPI_VIOT)
+struct iommu_device *
+__iommu_viot_get_single_iommu(struct iommu_probe_info *pinf,
+ const struct iommu_ops *ops);
+#else
+static inline struct iommu_device *
+__iommu_viot_get_single_iommu(struct iommu_probe_info *pinf,
+ const struct iommu_ops *ops)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
+#define iommu_viot_get_single_iommu(pinf, ops, drv_struct, member) \
+ container_of_err( \
+ __iommu_first(__iommu_viot_get_single_iommu(pinf, ops), \
+ __iommu_of_get_single_iommu(pinf, ops, -1)), \
+ drv_struct, member)
+
#endif
This is the ACPI VIOT version like iommu_of_get_single_iommu(). It parses the ACPI table, confirms all entries points to a single iommu_driver and then returns a pointer to it. Also cache the u32 id list in the iommu_probe_info and provide a getter function which re-parses in case we overflow the cache. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/acpi/scan.c | 1 + drivers/iommu/Makefile | 1 + drivers/iommu/viot_iommu.c | 70 ++++++++++++++++++++++++++++++++++++ include/linux/iommu-driver.h | 25 +++++++++++++ 4 files changed, 97 insertions(+) create mode 100644 drivers/iommu/viot_iommu.c