diff mbox series

[v2,17/18] KVM: x86/mmu: Add tracepoint for splitting huge pages

Message ID 20220119230739.2234394-18-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Eager Page Splitting for the TDP MMU | expand

Commit Message

David Matlack Jan. 19, 2022, 11:07 p.m. UTC
Add a tracepoint that records whenever KVM eagerly splits a huge page
and the error status of the split to indicate if it succeeded or failed
and why.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmutrace.h | 23 +++++++++++++++++++++++
 arch/x86/kvm/mmu/tdp_mmu.c  | 10 +++++++---
 2 files changed, 30 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index de5e8e4e1aa7..12247b96af01 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -416,6 +416,29 @@  TRACE_EVENT(
 	)
 );
 
+TRACE_EVENT(
+	kvm_mmu_split_huge_page,
+	TP_PROTO(u64 gfn, u64 spte, int level, int errno),
+	TP_ARGS(gfn, spte, level, errno),
+
+	TP_STRUCT__entry(
+		__field(u64, gfn)
+		__field(u64, spte)
+		__field(int, level)
+		__field(int, errno)
+	),
+
+	TP_fast_assign(
+		__entry->gfn = gfn;
+		__entry->spte = spte;
+		__entry->level = level;
+		__entry->errno = errno;
+	),
+
+	TP_printk("gfn %llx spte %llx level %d errno %d",
+		  __entry->gfn, __entry->spte, __entry->level, __entry->errno)
+);
+
 #endif /* _TRACE_KVMMMU_H */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index d5e713b849e9..1ed8e20270f0 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1332,7 +1332,7 @@  static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
 	 */
 	ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared);
 	if (ret)
-		return ret;
+		goto out;
 
 	/*
 	 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
@@ -1341,7 +1341,9 @@  static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
 	 */
 	kvm_update_page_stats(kvm, level - 1, PT64_ENT_PER_PAGE);
 
-	return 0;
+out:
+	trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
+	return ret;
 }
 
 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
@@ -1378,6 +1380,9 @@  static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
 			sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
 			if (!sp) {
 				ret = -ENOMEM;
+				trace_kvm_mmu_split_huge_page(iter.gfn,
+							      iter.old_spte,
+							      iter.level, ret);
 				break;
 			}
 
@@ -1401,7 +1406,6 @@  static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
 	if (sp)
 		tdp_mmu_free_sp(sp);
 
-
 	return ret;
 }