@@ -416,6 +416,26 @@ TRACE_EVENT(
)
);
+TRACE_EVENT(
+ kvm_mmu_split_large_page,
+ TP_PROTO(u64 gfn, u64 spte, int level),
+ TP_ARGS(gfn, spte, level),
+
+ TP_STRUCT__entry(
+ __field(u64, gfn)
+ __field(u64, spte)
+ __field(int, level)
+ ),
+
+ TP_fast_assign(
+ __entry->gfn = gfn;
+ __entry->spte = spte;
+ __entry->level = level;
+ ),
+
+ TP_printk("gfn %llx spte %llx level %d", __entry->gfn, __entry->spte, __entry->level)
+);
+
#endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH
@@ -1284,6 +1284,8 @@ static bool tdp_mmu_split_large_page_atomic(struct kvm *kvm, struct tdp_iter *it
BUG_ON(mmu_split_caches_need_topup(kvm));
+ trace_kvm_mmu_split_large_page(iter->gfn, large_spte, level);
+
child_sp = alloc_child_tdp_mmu_page(&kvm->arch.split_caches, iter);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
Add a tracepoint that records whenever we split a large page. Signed-off-by: David Matlack <dmatlack@google.com> --- arch/x86/kvm/mmu/mmutrace.h | 20 ++++++++++++++++++++ arch/x86/kvm/mmu/tdp_mmu.c | 2 ++ 2 files changed, 22 insertions(+)