diff mbox series

[v3,03/23] VT-d: limit page table population in domain_pgd_maddr()

Message ID 10d6b416-1a20-4b1b-d39a-1bf17b2e174a@suse.com (mailing list archive)
State New, archived
Headers show
Series IOMMU: superpage support when not sharing pagetables | expand

Commit Message

Jan Beulich Jan. 10, 2022, 4:23 p.m. UTC
I have to admit that I never understood why domain_pgd_maddr() wants to
populate all page table levels for DFN 0. I can only assume that despite
the comment there what is needed is population just down to the smallest
possible nr_pt_levels that the loop later in the function may need to
run to. Hence what is needed is the minimum of all possible
iommu->nr_pt_levels, to then be passed into addr_to_dma_page_maddr()
instead of literal 1.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: New.

Comments

Tian, Kevin Jan. 30, 2022, 3:22 a.m. UTC | #1
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Tuesday, January 11, 2022 12:23 AM
> 
> I have to admit that I never understood why domain_pgd_maddr() wants to
> populate all page table levels for DFN 0. I can only assume that despite
> the comment there what is needed is population just down to the smallest
> possible nr_pt_levels that the loop later in the function may need to
> run to. Hence what is needed is the minimum of all possible
> iommu->nr_pt_levels, to then be passed into addr_to_dma_page_maddr()
> instead of literal 1.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

> ---
> v3: New.
> 
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -55,6 +55,7 @@ bool __read_mostly iommu_snoop = true;
>  #endif
> 
>  static unsigned int __read_mostly nr_iommus;
> +static unsigned int __read_mostly min_pt_levels = UINT_MAX;
> 
>  static struct iommu_ops vtd_ops;
>  static struct tasklet vtd_fault_tasklet;
> @@ -482,8 +483,11 @@ static uint64_t domain_pgd_maddr(struct
>      {
>          if ( !hd->arch.vtd.pgd_maddr )
>          {
> -            /* Ensure we have pagetables allocated down to leaf PTE. */
> -            addr_to_dma_page_maddr(d, 0, 1, NULL, true);
> +            /*
> +             * Ensure we have pagetables allocated down to the smallest
> +             * level the loop below may need to run to.
> +             */
> +            addr_to_dma_page_maddr(d, 0, min_pt_levels, NULL, true);
> 
>              if ( !hd->arch.vtd.pgd_maddr )
>                  return 0;
> @@ -1381,6 +1385,8 @@ int __init iommu_alloc(struct acpi_drhd_
>          return -ENODEV;
>      }
>      iommu->nr_pt_levels = agaw_to_level(agaw);
> +    if ( min_pt_levels > iommu->nr_pt_levels )
> +        min_pt_levels = iommu->nr_pt_levels;
> 
>      if ( !ecap_coherent(iommu->ecap) )
>          vtd_ops.sync_cache = sync_cache;
diff mbox series

Patch

--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -55,6 +55,7 @@  bool __read_mostly iommu_snoop = true;
 #endif
 
 static unsigned int __read_mostly nr_iommus;
+static unsigned int __read_mostly min_pt_levels = UINT_MAX;
 
 static struct iommu_ops vtd_ops;
 static struct tasklet vtd_fault_tasklet;
@@ -482,8 +483,11 @@  static uint64_t domain_pgd_maddr(struct
     {
         if ( !hd->arch.vtd.pgd_maddr )
         {
-            /* Ensure we have pagetables allocated down to leaf PTE. */
-            addr_to_dma_page_maddr(d, 0, 1, NULL, true);
+            /*
+             * Ensure we have pagetables allocated down to the smallest
+             * level the loop below may need to run to.
+             */
+            addr_to_dma_page_maddr(d, 0, min_pt_levels, NULL, true);
 
             if ( !hd->arch.vtd.pgd_maddr )
                 return 0;
@@ -1381,6 +1385,8 @@  int __init iommu_alloc(struct acpi_drhd_
         return -ENODEV;
     }
     iommu->nr_pt_levels = agaw_to_level(agaw);
+    if ( min_pt_levels > iommu->nr_pt_levels )
+        min_pt_levels = iommu->nr_pt_levels;
 
     if ( !ecap_coherent(iommu->ecap) )
         vtd_ops.sync_cache = sync_cache;