diff mbox series

[v4,19/21] IOMMU/x86: add perf counters for page table splitting / coalescing

Message ID cd077dac-c53c-3369-03db-f2e4260ee94f@suse.com (mailing list archive)
State Superseded
Headers show
Series IOMMU: superpage support when not sharing pagetables | expand

Commit Message

Jan Beulich April 25, 2022, 8:44 a.m. UTC
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin tian <kevin.tian@intel.com>
---
v3: New.

Comments

Roger Pau Monne May 11, 2022, 1:48 p.m. UTC | #1
On Mon, Apr 25, 2022 at 10:44:11AM +0200, Jan Beulich wrote:
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Kevin tian <kevin.tian@intel.com>

Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Would be helpful to also have those per-guest I think.

Thanks, Roger.
Jan Beulich May 18, 2022, 11:39 a.m. UTC | #2
On 11.05.2022 15:48, Roger Pau Monné wrote:
> On Mon, Apr 25, 2022 at 10:44:11AM +0200, Jan Beulich wrote:
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> Reviewed-by: Kevin tian <kevin.tian@intel.com>
> 
> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks.

> Would be helpful to also have those per-guest I think.

Perhaps, but we don't have per-guest counter infrastructure, do we?

Jan
Roger Pau Monne May 20, 2022, 10:41 a.m. UTC | #3
On Wed, May 18, 2022 at 01:39:02PM +0200, Jan Beulich wrote:
> On 11.05.2022 15:48, Roger Pau Monné wrote:
> > On Mon, Apr 25, 2022 at 10:44:11AM +0200, Jan Beulich wrote:
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> Reviewed-by: Kevin tian <kevin.tian@intel.com>
> > 
> > Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
> 
> Thanks.
> 
> > Would be helpful to also have those per-guest I think.
> 
> Perhaps, but we don't have per-guest counter infrastructure, do we?

No, I don't think so?  Would be nice, but I don't see us doing it any
time soon.

Thanks, Roger.
diff mbox series

Patch

--- a/xen/arch/x86/include/asm/perfc_defn.h
+++ b/xen/arch/x86/include/asm/perfc_defn.h
@@ -125,4 +125,7 @@  PERFCOUNTER(realmode_exits,      "vmexit
 
 PERFCOUNTER(pauseloop_exits, "vmexits from Pause-Loop Detection")
 
+PERFCOUNTER(iommu_pt_shatters,    "IOMMU page table shatters")
+PERFCOUNTER(iommu_pt_coalesces,   "IOMMU page table coalesces")
+
 /*#endif*/ /* __XEN_PERFC_DEFN_H__ */
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -343,6 +343,8 @@  static int iommu_pde_from_dfn(struct dom
                                      level, PTE_kind_table);
 
             *flush_flags |= IOMMU_FLUSHF_modified;
+
+            perfc_incr(iommu_pt_shatters);
         }
 
         /* Install lower level page table for non-present entries */
@@ -472,6 +474,7 @@  int cf_check amd_iommu_map_page(
                               flags & IOMMUF_readable, &contig);
         *flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
         iommu_queue_free_pgtable(hd, pg);
+        perfc_incr(iommu_pt_coalesces);
     }
 
     spin_unlock(&hd->arch.mapping_lock);
@@ -532,6 +535,7 @@  int cf_check amd_iommu_unmap_page(
             clear_iommu_pte_present(pt_mfn, dfn_x(dfn), level, &free);
             *flush_flags |= IOMMU_FLUSHF_all;
             iommu_queue_free_pgtable(hd, pg);
+            perfc_incr(iommu_pt_coalesces);
         }
     }
 
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -404,6 +404,8 @@  static uint64_t addr_to_dma_page_maddr(s
 
                 if ( flush_flags )
                     *flush_flags |= IOMMU_FLUSHF_modified;
+
+                perfc_incr(iommu_pt_shatters);
             }
 
             write_atomic(&pte->val, new_pte.val);
@@ -865,6 +867,7 @@  static int dma_pte_clear_one(struct doma
 
         *flush_flags |= IOMMU_FLUSHF_all;
         iommu_queue_free_pgtable(hd, pg);
+        perfc_incr(iommu_pt_coalesces);
     }
 
     spin_unlock(&hd->arch.mapping_lock);
@@ -2244,6 +2247,7 @@  static int __must_check cf_check intel_i
 
         *flush_flags |= IOMMU_FLUSHF_modified | IOMMU_FLUSHF_all;
         iommu_queue_free_pgtable(hd, pg);
+        perfc_incr(iommu_pt_coalesces);
     }
 
     spin_unlock(&hd->arch.mapping_lock);