diff mbox series

[v2,6/8] iommu/vt-d: Implement first level page table ops

Message ID 20191128022550.9832-7-baolu.lu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Use 1st-level for DMA remapping | expand

Commit Message

Baolu Lu Nov. 28, 2019, 2:25 a.m. UTC
This adds the implementation of page table callbacks for
the first level page table.

Cc: Ashok Raj <ashok.raj@intel.com>
Cc: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: Liu Yi L <yi.l.liu@intel.com>
Cc: Yi Sun <yi.y.sun@linux.intel.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 drivers/iommu/intel-iommu.c | 56 +++++++++++++++++++++++++++++++++++++
 1 file changed, 56 insertions(+)
diff mbox series

Patch

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a314892ee72b..695a7a5fbe8e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -414,6 +414,7 @@  int for_each_device_domain(int (*fn)(struct device_domain_info *info,
 }
 
 const struct iommu_ops intel_iommu_ops;
+static const struct pgtable_ops first_lvl_pgtable_ops;
 static const struct pgtable_ops second_lvl_pgtable_ops;
 
 static bool translation_pre_enabled(struct intel_iommu *iommu)
@@ -2330,6 +2331,61 @@  static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 	return 0;
 }
 
+static int first_lvl_domain_map_range(struct dmar_domain *domain,
+				      unsigned long iova, phys_addr_t paddr,
+				      size_t size, int prot)
+{
+	return first_lvl_map_range(domain, PAGE_ALIGN(iova),
+				   round_up(iova + size, PAGE_SIZE),
+				   PAGE_ALIGN(paddr), prot);
+}
+
+static struct page *
+first_lvl_domain_unmap_range(struct dmar_domain *domain,
+			     unsigned long iova, size_t size)
+{
+	return first_lvl_unmap_range(domain, PAGE_ALIGN(iova),
+				     round_up(iova + size, PAGE_SIZE));
+}
+
+static phys_addr_t
+first_lvl_domain_iova_to_phys(struct dmar_domain *domain,
+			      unsigned long iova)
+{
+	return first_lvl_iova_to_phys(domain, iova);
+}
+
+static void
+first_lvl_domain_flush_tlb_range(struct dmar_domain *domain,
+				 struct intel_iommu *iommu,
+				 unsigned long iova, size_t size, bool ih)
+{
+	unsigned long pages = aligned_nrpages(iova, size);
+	u16 did = domain->iommu_did[iommu->seq_id];
+	unsigned int mask;
+
+	if (pages) {
+		mask = ilog2(__roundup_pow_of_two(pages));
+		iova &= (u64)-1 << (VTD_PAGE_SHIFT + mask);
+	} else {
+		mask = MAX_AGAW_PFN_WIDTH;
+		iova = 0;
+		pages = -1;
+	}
+
+	iommu->flush.p_iotlb_inv(iommu, did, domain->default_pasid,
+				 iova, pages, ih);
+
+	iommu_flush_dev_iotlb(domain, iova, mask);
+}
+
+static const struct pgtable_ops first_lvl_pgtable_ops = {
+	.map_range		= first_lvl_domain_map_range,
+	.unmap_range		= first_lvl_domain_unmap_range,
+	.iova_to_phys		= first_lvl_domain_iova_to_phys,
+	.flush_tlb_range	= first_lvl_domain_flush_tlb_range,
+};
+
 static int second_lvl_domain_map_range(struct dmar_domain *domain,
 				       unsigned long iova, phys_addr_t paddr,
 				       size_t size, int prot)