diff mbox series

vfio/type1: Remove Fine Grained Superpages detection

Message ID 0-v1-0eed68063e59+93d-vfio_fgsp_jgg@nvidia.com (mailing list archive)
State New
Headers show
Series vfio/type1: Remove Fine Grained Superpages detection | expand

Commit Message

Jason Gunthorpe April 8, 2025, 5:39 p.m. UTC
VFIO is looking to enable an optimization where it can rely on the
unmap operation not splitting and returning the size of a larger IOPTE.

However since commits:
  d50651636fb ("iommu/io-pgtable-arm-v7s: Remove split on unmap behavior")
  33729a5fc0ca ("iommu/io-pgtable-arm: Remove split on unmap behavior")

There are no iommu drivers that do split on unmap anymore. Instead all
iommu drivers are expected to unmap the whole contiguous page and return
its size.

Thus, there is no purpose in vfio_test_domain_fgsp() as it is only
checking if the iommu supports 2*PAGE_SIZE as a contiguous page or not.

Currently only AMD v1 supports such a page size so all this logic only
activates on AMD v1.

Remove vfio_test_domain_fgsp() and just rely on a direct 2*PAGE_SIZE check
instead so there is no behavior change.

Maybe it should always activate the iommu_iova_to_phys(), it shouldn't
have a performance downside since split is gone.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/vfio/vfio_iommu_type1.c | 71 +++++++++------------------------
 1 file changed, 19 insertions(+), 52 deletions(-)


base-commit: 5a7ff05a5717e2ac4f4f83bcdd9033f246e9946b

Comments

Alex Williamson April 8, 2025, 7:23 p.m. UTC | #1
On Tue,  8 Apr 2025 14:39:52 -0300
Jason Gunthorpe <jgg@nvidia.com> wrote:

> VFIO is looking to enable an optimization where it can rely on the
> unmap operation not splitting and returning the size of a larger IOPTE.
> 
> However since commits:
>   d50651636fb ("iommu/io-pgtable-arm-v7s: Remove split on unmap behavior")
>   33729a5fc0ca ("iommu/io-pgtable-arm: Remove split on unmap behavior")
> 
> There are no iommu drivers that do split on unmap anymore. Instead all
> iommu drivers are expected to unmap the whole contiguous page and return
> its size.
> 
> Thus, there is no purpose in vfio_test_domain_fgsp() as it is only
> checking if the iommu supports 2*PAGE_SIZE as a contiguous page or not.
> 
> Currently only AMD v1 supports such a page size so all this logic only
> activates on AMD v1.
> 
> Remove vfio_test_domain_fgsp() and just rely on a direct 2*PAGE_SIZE check
> instead so there is no behavior change.
> 
> Maybe it should always activate the iommu_iova_to_phys(), it shouldn't
> have a performance downside since split is gone.

We were never looking for splitting here, in fact an IOMMU driver that
supports splitting would break the fgsp test.  Nor was the intent ever
to look for 2*PAGE_SIZE support.  This was simply a test to see if we
mapped two contiguous pages and tried to unmap only the first, does the
IOMMU unmap only the first page (VT-d), or both pages (AMD v1).  If both
pages are unmapped, then we expect the same behavior with runtime
mappings, ie. the IOMMU will unmap larger chunks than we've asked for
based on whether the original mapping was contiguous.  If only one page
is unmapped, then we need to look for contiguous ranges ourselves.

We also previously couldn't rely on pgsize_bitmap to indicate the
physical page sizes supported by the IOMMU, ex. VT-d essentially
reported PAGE_MASK util a886d5a7e67b ("iommu/vt-d: Report real pgsize
bitmap to iommu core").

I don't recall the optimization being overwhelming in the first place,
so if it's relegated to AMD v1 maybe we should just remove it
altogether rather than introducing this confusing notion that
2*PAGE_SIZE has some particular importance.  Thanks,

Alex

> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
>  drivers/vfio/vfio_iommu_type1.c | 71 +++++++++------------------------
>  1 file changed, 19 insertions(+), 52 deletions(-)
> 
> diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
> index 0ac56072af9f23..529561bbbef98a 100644
> --- a/drivers/vfio/vfio_iommu_type1.c
> +++ b/drivers/vfio/vfio_iommu_type1.c
> @@ -80,7 +80,6 @@ struct vfio_domain {
>  	struct iommu_domain	*domain;
>  	struct list_head	next;
>  	struct list_head	group_list;
> -	bool			fgsp : 1;	/* Fine-grained super pages */
>  	bool			enforce_cache_coherency : 1;
>  };
>  
> @@ -1056,6 +1055,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
>  	LIST_HEAD(unmapped_region_list);
>  	struct iommu_iotlb_gather iotlb_gather;
>  	int unmapped_region_cnt = 0;
> +	bool scan_for_contig;
>  	long unlocked = 0;
>  
>  	if (!dma->size)
> @@ -1079,9 +1079,15 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
>  		cond_resched();
>  	}
>  
> +	/*
> +	 * For historical reasons this has only triggered on AMDv1 page tables,
> +	 * though these days it should work everywhere.
> +	 */
> +	scan_for_contig = !(domain->domain->pgsize_bitmap & (2 * PAGE_SIZE));
>  	iommu_iotlb_gather_init(&iotlb_gather);
>  	while (iova < end) {
> -		size_t unmapped, len;
> +		size_t len = PAGE_SIZE;
> +		size_t unmapped;
>  		phys_addr_t phys, next;
>  
>  		phys = iommu_iova_to_phys(domain->domain, iova);
> @@ -1094,12 +1100,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
>  		 * To optimize for fewer iommu_unmap() calls, each of which
>  		 * may require hardware cache flushing, try to find the
>  		 * largest contiguous physical memory chunk to unmap.
> +		 *
> +		 * If the iova is part of a contiguous page > PAGE_SIZE then
> +		 * unmap will unmap the whole contiguous page and return its
> +		 * size.
>  		 */
> -		for (len = PAGE_SIZE;
> -		     !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
> -			next = iommu_iova_to_phys(domain->domain, iova + len);
> -			if (next != phys + len)
> -				break;
> +		if (scan_for_contig) {
> +			for (; iova + len < end; len += PAGE_SIZE) {
> +				next = iommu_iova_to_phys(domain->domain,
> +							  iova + len);
> +				if (next != phys + len)
> +					break;
> +			}
>  		}
>  
>  		/*
> @@ -1833,49 +1845,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
>  	return ret;
>  }
>  
> -/*
> - * We change our unmap behavior slightly depending on whether the IOMMU
> - * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
> - * for practically any contiguous power-of-two mapping we give it.  This means
> - * we don't need to look for contiguous chunks ourselves to make unmapping
> - * more efficient.  On IOMMUs with coarse-grained super pages, like Intel VT-d
> - * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
> - * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
> - * hugetlbfs is in use.
> - */
> -static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
> -{
> -	int ret, order = get_order(PAGE_SIZE * 2);
> -	struct vfio_iova *region;
> -	struct page *pages;
> -	dma_addr_t start;
> -
> -	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
> -	if (!pages)
> -		return;
> -
> -	list_for_each_entry(region, regions, list) {
> -		start = ALIGN(region->start, PAGE_SIZE * 2);
> -		if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
> -			continue;
> -
> -		ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
> -				IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE,
> -				GFP_KERNEL_ACCOUNT);
> -		if (!ret) {
> -			size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
> -
> -			if (unmapped == PAGE_SIZE)
> -				iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
> -			else
> -				domain->fgsp = true;
> -		}
> -		break;
> -	}
> -
> -	__free_pages(pages, order);
> -}
> -
>  static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain,
>  						 struct iommu_group *iommu_group)
>  {
> @@ -2314,8 +2283,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
>  		}
>  	}
>  
> -	vfio_test_domain_fgsp(domain, &iova_copy);
> -
>  	/* replay mappings on new domains */
>  	ret = vfio_iommu_replay(iommu, domain);
>  	if (ret)
> 
> base-commit: 5a7ff05a5717e2ac4f4f83bcdd9033f246e9946b
Jason Gunthorpe April 9, 2025, 3:50 p.m. UTC | #2
On Tue, Apr 08, 2025 at 01:23:33PM -0600, Alex Williamson wrote:
> > Remove vfio_test_domain_fgsp() and just rely on a direct 2*PAGE_SIZE check
> > instead so there is no behavior change.
> > 
> > Maybe it should always activate the iommu_iova_to_phys(), it shouldn't
> > have a performance downside since split is gone.
> 
> We were never looking for splitting here, in fact an IOMMU driver that
> supports splitting would break the fgsp test. 

Yes, I thought that was the point as expensive splitting in the driver
would be the main reason not to use the unmap path.

> Nor was the intent ever to look for 2*PAGE_SIZE support.

Maybe, but that is what it ended up doing :)

> I don't recall the optimization being overwhelming in the first place,
> so if it's relegated to AMD v1 maybe we should just remove it
> altogether rather than introducing this confusing notion that
> 2*PAGE_SIZE has some particular importance.  

Okay, lets do that instead. We have more and more cases where we can
get smaller orders than 2M now and the iova loop is probably going to
be faster than unmapping a small order page 4k at a time. Only AMDv1
has the ability to store the arbitary orders..

Jason
diff mbox series

Patch

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0ac56072af9f23..529561bbbef98a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -80,7 +80,6 @@  struct vfio_domain {
 	struct iommu_domain	*domain;
 	struct list_head	next;
 	struct list_head	group_list;
-	bool			fgsp : 1;	/* Fine-grained super pages */
 	bool			enforce_cache_coherency : 1;
 };
 
@@ -1056,6 +1055,7 @@  static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 	LIST_HEAD(unmapped_region_list);
 	struct iommu_iotlb_gather iotlb_gather;
 	int unmapped_region_cnt = 0;
+	bool scan_for_contig;
 	long unlocked = 0;
 
 	if (!dma->size)
@@ -1079,9 +1079,15 @@  static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 		cond_resched();
 	}
 
+	/*
+	 * For historical reasons this has only triggered on AMDv1 page tables,
+	 * though these days it should work everywhere.
+	 */
+	scan_for_contig = !(domain->domain->pgsize_bitmap & (2 * PAGE_SIZE));
 	iommu_iotlb_gather_init(&iotlb_gather);
 	while (iova < end) {
-		size_t unmapped, len;
+		size_t len = PAGE_SIZE;
+		size_t unmapped;
 		phys_addr_t phys, next;
 
 		phys = iommu_iova_to_phys(domain->domain, iova);
@@ -1094,12 +1100,18 @@  static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
 		 * To optimize for fewer iommu_unmap() calls, each of which
 		 * may require hardware cache flushing, try to find the
 		 * largest contiguous physical memory chunk to unmap.
+		 *
+		 * If the iova is part of a contiguous page > PAGE_SIZE then
+		 * unmap will unmap the whole contiguous page and return its
+		 * size.
 		 */
-		for (len = PAGE_SIZE;
-		     !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
-			next = iommu_iova_to_phys(domain->domain, iova + len);
-			if (next != phys + len)
-				break;
+		if (scan_for_contig) {
+			for (; iova + len < end; len += PAGE_SIZE) {
+				next = iommu_iova_to_phys(domain->domain,
+							  iova + len);
+				if (next != phys + len)
+					break;
+			}
 		}
 
 		/*
@@ -1833,49 +1845,6 @@  static int vfio_iommu_replay(struct vfio_iommu *iommu,
 	return ret;
 }
 
-/*
- * We change our unmap behavior slightly depending on whether the IOMMU
- * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
- * for practically any contiguous power-of-two mapping we give it.  This means
- * we don't need to look for contiguous chunks ourselves to make unmapping
- * more efficient.  On IOMMUs with coarse-grained super pages, like Intel VT-d
- * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
- * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
- * hugetlbfs is in use.
- */
-static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
-{
-	int ret, order = get_order(PAGE_SIZE * 2);
-	struct vfio_iova *region;
-	struct page *pages;
-	dma_addr_t start;
-
-	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
-	if (!pages)
-		return;
-
-	list_for_each_entry(region, regions, list) {
-		start = ALIGN(region->start, PAGE_SIZE * 2);
-		if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
-			continue;
-
-		ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
-				IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE,
-				GFP_KERNEL_ACCOUNT);
-		if (!ret) {
-			size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
-
-			if (unmapped == PAGE_SIZE)
-				iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
-			else
-				domain->fgsp = true;
-		}
-		break;
-	}
-
-	__free_pages(pages, order);
-}
-
 static struct vfio_iommu_group *find_iommu_group(struct vfio_domain *domain,
 						 struct iommu_group *iommu_group)
 {
@@ -2314,8 +2283,6 @@  static int vfio_iommu_type1_attach_group(void *iommu_data,
 		}
 	}
 
-	vfio_test_domain_fgsp(domain, &iova_copy);
-
 	/* replay mappings on new domains */
 	ret = vfio_iommu_replay(iommu, domain);
 	if (ret)