diff mbox series

[v3,23/23] IOMMU/x86: add perf counters for page table splitting / coalescing

Message ID b5d5d7b1-7b53-5c74-1988-3baec74a9f45@suse.com (mailing list archive)
State New, archived
Headers show
Series IOMMU: superpage support when not sharing pagetables | expand

Commit Message

Jan Beulich Jan. 10, 2022, 4:38 p.m. UTC
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: New.

Comments

Tian, Kevin Feb. 18, 2022, 5:23 a.m. UTC | #1
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Tuesday, January 11, 2022 12:39 AM
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Kevin tian <kevin.tian@intel.com>

> ---
> v3: New.
> 
> --- a/xen/drivers/passthrough/amd/iommu_map.c
> +++ b/xen/drivers/passthrough/amd/iommu_map.c
> @@ -283,6 +283,8 @@ static int iommu_pde_from_dfn(struct dom
>                                       level, PTE_kind_table);
> 
>              *flush_flags |= IOMMU_FLUSHF_modified;
> +
> +            perfc_incr(iommu_pt_shatters);
>          }
> 
>          /* Install lower level page table for non-present entries */
> @@ -411,6 +413,7 @@ int amd_iommu_map_page(struct domain *d,
>                                flags & IOMMUF_readable, &contig);
>          *flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
>          iommu_queue_free_pgtable(d, pg);
> +        perfc_incr(iommu_pt_coalesces);
>      }
> 
>      spin_unlock(&hd->arch.mapping_lock);
> @@ -471,6 +474,7 @@ int amd_iommu_unmap_page(struct domain *
>              clear_iommu_pte_present(pt_mfn, dfn_x(dfn), level, &free);
>              *flush_flags |= IOMMU_FLUSHF_all;
>              iommu_queue_free_pgtable(d, pg);
> +            perfc_incr(iommu_pt_coalesces);
>          }
>      }
> 
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -451,6 +451,8 @@ static uint64_t addr_to_dma_page_maddr(s
> 
>                  if ( flush_flags )
>                      *flush_flags |= IOMMU_FLUSHF_modified;
> +
> +                perfc_incr(iommu_pt_shatters);
>              }
> 
>              write_atomic(&pte->val, new_pte.val);
> @@ -907,6 +909,7 @@ static int dma_pte_clear_one(struct doma
> 
>          *flush_flags |= IOMMU_FLUSHF_all;
>          iommu_queue_free_pgtable(domain, pg);
> +        perfc_incr(iommu_pt_coalesces);
>      }
> 
>      spin_unlock(&hd->arch.mapping_lock);
> @@ -2099,6 +2102,7 @@ static int __must_check intel_iommu_map_
> 
>          *flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
>          iommu_queue_free_pgtable(d, pg);
> +        perfc_incr(iommu_pt_coalesces);
>      }
> 
>      spin_unlock(&hd->arch.mapping_lock);
> --- a/xen/arch/x86/include/asm/perfc_defn.h
> +++ b/xen/arch/x86/include/asm/perfc_defn.h
> @@ -125,4 +125,7 @@ PERFCOUNTER(realmode_exits,      "vmexit
> 
>  PERFCOUNTER(pauseloop_exits, "vmexits from Pause-Loop Detection")
> 
> +PERFCOUNTER(iommu_pt_shatters,    "IOMMU page table shatters")
> +PERFCOUNTER(iommu_pt_coalesces,   "IOMMU page table coalesces")
> +
>  /*#endif*/ /* __XEN_PERFC_DEFN_H__ */
diff mbox series

Patch

--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -283,6 +283,8 @@  static int iommu_pde_from_dfn(struct dom
                                      level, PTE_kind_table);
 
             *flush_flags |= IOMMU_FLUSHF_modified;
+
+            perfc_incr(iommu_pt_shatters);
         }
 
         /* Install lower level page table for non-present entries */
@@ -411,6 +413,7 @@  int amd_iommu_map_page(struct domain *d,
                               flags & IOMMUF_readable, &contig);
         *flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
         iommu_queue_free_pgtable(d, pg);
+        perfc_incr(iommu_pt_coalesces);
     }
 
     spin_unlock(&hd->arch.mapping_lock);
@@ -471,6 +474,7 @@  int amd_iommu_unmap_page(struct domain *
             clear_iommu_pte_present(pt_mfn, dfn_x(dfn), level, &free);
             *flush_flags |= IOMMU_FLUSHF_all;
             iommu_queue_free_pgtable(d, pg);
+            perfc_incr(iommu_pt_coalesces);
         }
     }
 
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -451,6 +451,8 @@  static uint64_t addr_to_dma_page_maddr(s
 
                 if ( flush_flags )
                     *flush_flags |= IOMMU_FLUSHF_modified;
+
+                perfc_incr(iommu_pt_shatters);
             }
 
             write_atomic(&pte->val, new_pte.val);
@@ -907,6 +909,7 @@  static int dma_pte_clear_one(struct doma
 
         *flush_flags |= IOMMU_FLUSHF_all;
         iommu_queue_free_pgtable(domain, pg);
+        perfc_incr(iommu_pt_coalesces);
     }
 
     spin_unlock(&hd->arch.mapping_lock);
@@ -2099,6 +2102,7 @@  static int __must_check intel_iommu_map_
 
         *flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
         iommu_queue_free_pgtable(d, pg);
+        perfc_incr(iommu_pt_coalesces);
     }
 
     spin_unlock(&hd->arch.mapping_lock);
--- a/xen/arch/x86/include/asm/perfc_defn.h
+++ b/xen/arch/x86/include/asm/perfc_defn.h
@@ -125,4 +125,7 @@  PERFCOUNTER(realmode_exits,      "vmexit
 
 PERFCOUNTER(pauseloop_exits, "vmexits from Pause-Loop Detection")
 
+PERFCOUNTER(iommu_pt_shatters,    "IOMMU page table shatters")
+PERFCOUNTER(iommu_pt_coalesces,   "IOMMU page table coalesces")
+
 /*#endif*/ /* __XEN_PERFC_DEFN_H__ */