diff mbox series

[16/16] KVM: MMU: change tracepoints arguments to kvm_page_fault

Message ID 20210807134936.3083984-17-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: pass arguments on the page fault path via struct kvm_page_fault | expand

Commit Message

Paolo Bonzini Aug. 7, 2021, 1:49 p.m. UTC
Pass struct kvm_page_fault to tracepoints instead of
extracting the arguments from the struct.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 arch/x86/kvm/mmu/mmu.c         |  4 ++--
 arch/x86/kvm/mmu/mmutrace.h    | 18 +++++++++---------
 arch/x86/kvm/mmu/paging_tmpl.h |  2 +-
 arch/x86/kvm/mmu/tdp_mmu.c     |  2 +-
 4 files changed, 13 insertions(+), 13 deletions(-)

Comments

Sean Christopherson Sept. 1, 2021, 11:28 p.m. UTC | #1
On Sat, Aug 07, 2021, Paolo Bonzini wrote:
> @@ -377,9 +377,9 @@ TRACE_EVENT(
>  	),
>  
>  	TP_fast_assign(
> -		__entry->gfn = addr >> PAGE_SHIFT;
> -		__entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
> -		__entry->level = level;
> +		__entry->gfn = fault->addr >> PAGE_SHIFT;

Eww.  The existing code also bastardizes addr vs. gpa, but this just looks even
more wrong because we have fault->gfn.

Maybe do this as a prep patch at the beginning of the series?  And then use
fault->gfn directly.

diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 7d03e9b7ccfa..b159749300b5 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -725,7 +725,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
        level = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn,
                                        huge_page_disallowed, &req_level);

-       trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
+       trace_kvm_mmu_spte_requested(gw->gfn << PAGE_SHIFT, gw->level, pfn);

        for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
                clear_sp_write_flooding_count(it.sptep);

> +		__entry->pfn = fault->pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(fault->goal_level) - 1));

Similar thing here, it could use fault->gfn directly.

> +		__entry->level = fault->goal_level;
>  	),
>  
>  	TP_printk("gfn %llx pfn %llx level %d",
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a41325f452f4..0c0061893ebe 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2950,7 +2950,7 @@  static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 
 	kvm_mmu_hugepage_adjust(vcpu, fault);
 
-	trace_kvm_mmu_spte_requested(fault->addr, fault->goal_level, fault->pfn);
+	trace_kvm_mmu_spte_requested(fault);
 	for_each_shadow_entry(vcpu, fault->addr, it) {
 		/*
 		 * We cannot overwrite existing page tables with an NX
@@ -3242,7 +3242,7 @@  static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 
 	} while (true);
 
-	trace_fast_page_fault(vcpu, fault->addr, fault->error_code, sptep, spte, ret);
+	trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
 	walk_shadow_page_lockless_end(vcpu);
 
 	return ret;
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index 2924a4081a19..0c37fb9d532e 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -252,9 +252,9 @@  TRACE_EVENT(
 
 TRACE_EVENT(
 	fast_page_fault,
-	TP_PROTO(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 error_code,
+	TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
 		 u64 *sptep, u64 old_spte, int ret),
-	TP_ARGS(vcpu, cr2_or_gpa, error_code, sptep, old_spte, ret),
+	TP_ARGS(vcpu, fault, sptep, old_spte, ret),
 
 	TP_STRUCT__entry(
 		__field(int, vcpu_id)
@@ -268,8 +268,8 @@  TRACE_EVENT(
 
 	TP_fast_assign(
 		__entry->vcpu_id = vcpu->vcpu_id;
-		__entry->cr2_or_gpa = cr2_or_gpa;
-		__entry->error_code = error_code;
+		__entry->cr2_or_gpa = fault->addr;
+		__entry->error_code = fault->error_code;
 		__entry->sptep = sptep;
 		__entry->old_spte = old_spte;
 		__entry->new_spte = *sptep;
@@ -367,8 +367,8 @@  TRACE_EVENT(
 
 TRACE_EVENT(
 	kvm_mmu_spte_requested,
-	TP_PROTO(gpa_t addr, int level, kvm_pfn_t pfn),
-	TP_ARGS(addr, level, pfn),
+	TP_PROTO(struct kvm_page_fault *fault),
+	TP_ARGS(fault),
 
 	TP_STRUCT__entry(
 		__field(u64, gfn)
@@ -377,9 +377,9 @@  TRACE_EVENT(
 	),
 
 	TP_fast_assign(
-		__entry->gfn = addr >> PAGE_SHIFT;
-		__entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
-		__entry->level = level;
+		__entry->gfn = fault->addr >> PAGE_SHIFT;
+		__entry->pfn = fault->pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(fault->goal_level) - 1));
+		__entry->level = fault->goal_level;
 	),
 
 	TP_printk("gfn %llx pfn %llx level %d",
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 3f82f469abdf..1c182ef61c79 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -719,7 +719,7 @@  static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
 
 	kvm_mmu_hugepage_adjust(vcpu, fault);
 
-	trace_kvm_mmu_spte_requested(fault->addr, gw->level, fault->pfn);
+	trace_kvm_mmu_spte_requested(fault);
 
 	for (; shadow_walk_okay(&it); shadow_walk_next(&it)) {
 		clear_sp_write_flooding_count(it.sptep);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 803da0334933..7f7de01cf665 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1004,7 +1004,7 @@  int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 
 	kvm_mmu_hugepage_adjust(vcpu, fault);
 
-	trace_kvm_mmu_spte_requested(fault->addr, fault->goal_level, fault->pfn);
+	trace_kvm_mmu_spte_requested(fault);
 
 	rcu_read_lock();