@@ -3098,8 +3098,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte)
/*
* Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
*/
-static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
- u32 error_code)
+static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
{
struct kvm_shadow_walk_iterator iterator;
struct kvm_mmu_page *sp;
@@ -3115,7 +3114,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
do {
u64 new_spte;
- for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
+ for_each_shadow_entry_lockless(vcpu, gpa, iterator, spte)
if (!is_shadow_present_pte(spte))
break;
@@ -3194,8 +3193,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
} while (true);
- trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
- spte, ret);
+ trace_fast_page_fault(vcpu, gpa, error_code, iterator.sptep, spte, ret);
walk_shadow_page_lockless_end(vcpu);
return ret;