diff mbox series

[RFC,v2,01/10] iommu: Add APIs for multiple domains per device

Message ID 20180830040922.30426-2-baolu.lu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series vfio/mdev: IOMMU aware mediated device | expand

Commit Message

Baolu Lu Aug. 30, 2018, 4:09 a.m. UTC
Sharing a physical PCI device in a finer-granularity way
is becoming a consensus in the industry. IOMMU vendors
are also engaging efforts to support such sharing as well
as possible. Among the efforts, the capability of support
finer-granularity DMA isolation is a common requirement
due to the security consideration. With finer-granularity
DMA isolation, all DMA requests out of or to a subset of
a physical PCI device can be protected by the IOMMU. As a
result, there is a request in software to attach multiple
domains to a physical PCI device. One example of such use
model is the Intel Scalable IOV [1] [2]. The Intel vt-d
3.0 spec [3] introduces the scalable mode which enables
PASID granularity DMA isolation.

This adds the APIs to support multiple domains per device.
In order to ease the discussions, we call it 'a domain in
auxiliary mode' or simply 'auxiliary domain' when multiple
domains are attached to a physical device.

The APIs includes:

* iommu_capable(IOMMU_CAP_AUX_DOMAIN)
  - Represents the ability of supporting multiple domains
    per device.

* iommu_en(dis)able_aux_domain(struct device *dev)
  - Enable/disable the multiple domains capability for a
    device referenced by @dev.

* iommu_auxiliary_id(struct iommu_domain *domain)
  - Return ID used for finer-granularity DMA translation.
    For the Intel Scalable IOV usage model, this will be
    a PASID. The device which supports Scalalbe IOV needs
    to writes this ID to the device register so that DMA
    requests could be tagged with a right PASID prefix.

Many people involved in discussions of this design. They're

Kevin Tian <kevin.tian@intel.com>
Liu Yi L <yi.l.liu@intel.com>
Ashok Raj <ashok.raj@intel.com>
Sanjay Kumar <sanjay.k.kumar@intel.com>
Alex Williamson <alex.williamson@redhat.com>
Jean-Philippe Brucker <jean-philippe.brucker@arm.com>

and some discussions can be found here [4].

[1] https://software.intel.com/en-us/download/intel-scalable-io-virtualization-technical-specification
[2] https://schd.ws/hosted_files/lc32018/00/LC3-SIOV-final.pdf
[3] https://software.intel.com/en-us/download/intel-virtualization-technology-for-directed-io-architecture-specification
[4] https://lkml.org/lkml/2018/7/26/4

Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Liu Yi L <yi.l.liu@intel.com>
Suggested-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/iommu.c | 29 +++++++++++++++++++++++++++++
 include/linux/iommu.h | 13 +++++++++++++
 2 files changed, 42 insertions(+)
diff mbox series

Patch

diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8c15c5980299..2c6faf417dd5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -2014,3 +2014,32 @@  int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
 	return 0;
 }
 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
+
+int iommu_enable_aux_domain(struct device *dev)
+{
+	const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+	if (ops && ops->enable_auxd)
+		return ops->enable_auxd(dev);
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iommu_enable_aux_domain);
+
+void iommu_disable_aux_domain(struct device *dev)
+{
+	const struct iommu_ops *ops = dev->bus->iommu_ops;
+
+	if (ops && ops->disable_auxd)
+		ops->disable_auxd(dev);
+}
+EXPORT_SYMBOL_GPL(iommu_disable_aux_domain);
+
+int iommu_auxiliary_id(struct iommu_domain *domain)
+{
+	if (domain->ops->auxd_id)
+		return domain->ops->auxd_id(domain);
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iommu_auxiliary_id);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 87994c265bf5..ffd20b315bee 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -101,6 +101,8 @@  enum iommu_cap {
 					   transactions */
 	IOMMU_CAP_INTR_REMAP,		/* IOMMU supports interrupt isolation */
 	IOMMU_CAP_NOEXEC,		/* IOMMU_NOEXEC flag */
+	IOMMU_CAP_AUX_DOMAIN,		/* IOMMU supports multiple domains per
+					   device */
 };
 
 /*
@@ -185,6 +187,9 @@  struct iommu_resv_region {
  * @domain_get_windows: Return the number of windows for a domain
  * @of_xlate: add OF master IDs to iommu grouping
  * @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @enable_auxd: enable multiple domains per device support
+ * @disable_auxd: disable multiple domains per device support
+ * @auxd_id: return the id of an auxiliary domain
  */
 struct iommu_ops {
 	bool (*capable)(enum iommu_cap);
@@ -231,6 +236,10 @@  struct iommu_ops {
 	int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
 	bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
 
+	int (*enable_auxd)(struct device *dev);
+	void (*disable_auxd)(struct device *dev);
+	int (*auxd_id)(struct iommu_domain *domain);
+
 	unsigned long pgsize_bitmap;
 };
 
@@ -400,6 +409,10 @@  void iommu_fwspec_free(struct device *dev);
 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
 
+int iommu_enable_aux_domain(struct device *dev);
+void iommu_disable_aux_domain(struct device *dev);
+int iommu_auxiliary_id(struct iommu_domain *domain);
+
 #else /* CONFIG_IOMMU_API */
 
 struct iommu_ops {};