diff mbox

[v4] vfio/type1: Adopt fast IOTLB flush interface when unmap IOVAs

Message ID 1517391946-7830-1-git-send-email-suravee.suthikulpanit@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Suthikulpanit, Suravee Jan. 31, 2018, 9:45 a.m. UTC
Currently, VFIO IOMMU type1 unmaps IOVA pages synchronously, which requires
IOTLB flush for every IOVA unmap. This results in a large number of IOTLB
flushes during initialization of pass-through devices.

This can be avoided using the asynchronous (fast) IOTLB flush interface.

Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---

Changes from v3 (https://lkml.org/lkml/2018/1/21/244)
 * Refactor the code to unmap_unpin_fast() and unmap_unpin_slow()
   to improve code readability.
 * Fix logic in vfio_unmap_unpin() to fallback to unmap_unpin_slow()
   only for the failing iova unmapping, and continue the next unmapping
   with the unmap_unpin_fast(). (per Alex)
 * Fix error handling in case of failing to do fast unmapping to warn
   only once.
 * Remove reference to GPU in the commit message.

 drivers/vfio/vfio_iommu_type1.c | 127 ++++++++++++++++++++++++++++++++++++----
 1 file changed, 116 insertions(+), 11 deletions(-)

Comments

Suthikulpanit, Suravee Feb. 1, 2018, 5:11 a.m. UTC | #1
Alex,

On 1/31/18 4:45 PM, Suravee Suthikulpanit wrote:
> Currently, VFIO IOMMU type1 unmaps IOVA pages synchronously, which requires
> IOTLB flush for every IOVA unmap. This results in a large number of IOTLB
> flushes during initialization of pass-through devices.
> 
> This can be avoided using the asynchronous (fast) IOTLB flush interface.
> 
> Cc: Alex Williamson<alex.williamson@redhat.com>
> Cc: Joerg Roedel<joro@8bytes.org>
> Signed-off-by: Suravee Suthikulpanit<suravee.suthikulpanit@amd.com>
> ---
> 
> Changes from v3 (https://lkml.org/lkml/2018/1/21/244)
>   * Refactor the code to unmap_unpin_fast() and unmap_unpin_slow()
>     to improve code readability.
>   * Fix logic in vfio_unmap_unpin() to fallback to unmap_unpin_slow()
>     only for the failing iova unmapping, and continue the next unmapping
>     with the unmap_unpin_fast(). (per Alex)
>   * Fix error handling in case of failing to do fast unmapping to warn
>     only once.
>   * Remove reference to GPU in the commit message.

Please ignore v4. I found an issue in error handling logic. Also, I need to change
the return value back to size_t (as this was in a discussed in a separate thread).

Sorry for confusion. I'll clean up and send out v5.

Thanks,
Suravee
diff mbox

Patch

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c580518..bec8512 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -102,6 +102,13 @@  struct vfio_pfn {
 	atomic_t		ref_count;
 };
 
+struct vfio_regions {
+	struct list_head list;
+	dma_addr_t iova;
+	phys_addr_t phys;
+	size_t len;
+};
+
 #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)	\
 					(!list_empty(&iommu->domain_list))
 
@@ -479,6 +486,29 @@  static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
 	return unlocked;
 }
 
+static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
+				struct list_head *regions)
+{
+	long unlocked = 0;
+	struct vfio_regions *entry, *next;
+
+	iommu_tlb_sync(domain->domain);
+
+	list_for_each_entry_safe(entry, next, regions, list) {
+		unlocked += vfio_unpin_pages_remote(dma,
+						    entry->iova,
+						    entry->phys >> PAGE_SHIFT,
+						    entry->len >> PAGE_SHIFT,
+						    false);
+		list_del(&entry->list);
+		kfree(entry);
+	}
+
+	cond_resched();
+
+	return unlocked;
+}
+
 static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
 				  unsigned long *pfn_base, bool do_accounting)
 {
@@ -648,12 +678,78 @@  static int vfio_iommu_type1_unpin_pages(void *iommu_data,
 	return i > npage ? npage : (i > 0 ? i : -EINVAL);
 }
 
+static ssize_t unmap_unpin_slow(struct vfio_domain *domain,
+				struct vfio_dma *dma, dma_addr_t *iova,
+				size_t len, phys_addr_t phys,
+				long *unlocked)
+{
+	ssize_t unmapped = iommu_unmap(domain->domain, *iova, len);
+
+	if (unmapped <= 0)
+		return unmapped;
+
+	*unlocked += vfio_unpin_pages_remote(dma, *iova,
+					     phys >> PAGE_SHIFT,
+					     unmapped >> PAGE_SHIFT,
+					     false);
+	*iova += unmapped;
+	cond_resched();
+	return unmapped;
+}
+
+/*
+ * Generally, VFIO needs to unpin remote pages after each IOTLB flush.
+ * Therefore, when using IOTLB flush sync interface, VFIO need to keep track
+ * of these regions (currently using a list).
+ *
+ * This value specifies maximum number of regions for each IOTLB flush sync.
+ */
+#define VFIO_IOMMU_TLB_SYNC_MAX		512
+
+static ssize_t unmap_unpin_fast(struct vfio_domain *domain,
+				struct vfio_dma *dma, dma_addr_t *iova,
+				size_t len, phys_addr_t phys,
+				struct list_head *unmapped_regions,
+				long *unlocked, int *cnt)
+{
+	struct vfio_regions *entry;
+	ssize_t unmapped;
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+
+	unmapped = iommu_unmap_fast(domain->domain, *iova, len);
+	if (unmapped <= 0) {
+		kfree(entry);
+	} else {
+		iommu_tlb_range_add(domain->domain, *iova, unmapped);
+		entry->iova = *iova;
+		entry->phys = phys;
+		entry->len  = unmapped;
+		list_add_tail(&entry->list, unmapped_regions);
+
+		*iova += unmapped;
+		(*cnt)++;
+	}
+
+	if (*cnt >= VFIO_IOMMU_TLB_SYNC_MAX || unmapped <= 0) {
+		*unlocked += vfio_sync_unpin(dma, domain,
+					     unmapped_regions);
+		*cnt = 0;
+	}
+
+	return unmapped;
+}
+
 static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 			     bool do_accounting)
 {
 	dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
 	struct vfio_domain *domain, *d;
+	struct list_head unmapped_regions;
 	long unlocked = 0;
+	int cnt = 0;
 
 	if (!dma->size)
 		return 0;
@@ -661,6 +757,8 @@  static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 	if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
 		return 0;
 
+	INIT_LIST_HEAD(&unmapped_regions);
+
 	/*
 	 * We use the IOMMU to track the physical addresses, otherwise we'd
 	 * need a much more complicated tracking system.  Unfortunately that
@@ -699,20 +797,27 @@  static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 				break;
 		}
 
-		unmapped = iommu_unmap(domain->domain, iova, len);
-		if (WARN_ON(unmapped <= 0))
-			break;
-
-		unlocked += vfio_unpin_pages_remote(dma, iova,
-						    phys >> PAGE_SHIFT,
-						    unmapped >> PAGE_SHIFT,
-						    false);
-		iova += unmapped;
-
-		cond_resched();
+		/*
+		 * First, try to use fast unmap/unpin. In case of failure,
+		 * sync upto the current point, and switch to slow unmap/unpin
+		 * path.
+		 */
+		unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
+					    &unmapped_regions, &unlocked,
+					    &cnt);
+		if (unmapped <= 0) {
+			unmapped = unmap_unpin_slow(domain, dma, &iova, len,
+						    phys, &unlocked);
+			if (WARN_ON(unmapped <= 0))
+				break;
+		}
 	}
 
 	dma->iommu_mapped = false;
+
+	if (cnt)
+		unlocked += vfio_sync_unpin(dma, domain, &unmapped_regions);
+
 	if (do_accounting) {
 		vfio_lock_acct(dma->task, -unlocked, NULL);
 		return 0;