@@ -1447,6 +1447,8 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
+void kvm_inject_l1_page_fault(struct kvm_vcpu *vcpu,
+ struct x86_exception *fault);
bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
@@ -4357,12 +4357,6 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
return kvm_read_cr3(vcpu);
}
-static void inject_page_fault(struct kvm_vcpu *vcpu,
- struct x86_exception *fault)
-{
- vcpu->arch.mmu->inject_page_fault(vcpu, fault);
-}
-
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned int access, int *nr_present)
{
@@ -812,7 +812,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
if (!r) {
pgprintk("%s: guest page fault\n", __func__);
if (!prefault)
- inject_page_fault(vcpu, &walker.fault);
+ kvm_inject_l1_page_fault(vcpu, &walker.fault);
return RET_PF_RETRY;
}
@@ -611,6 +611,12 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
}
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
+void kvm_inject_l1_page_fault(struct kvm_vcpu *vcpu,
+ struct x86_exception *fault)
+{
+ vcpu->arch.mmu->inject_page_fault(vcpu, fault);
+}
+
bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
@@ -619,7 +625,7 @@ bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else
- vcpu->arch.mmu->inject_page_fault(vcpu, fault);
+ kvm_inject_l1_page_fault(vcpu, fault);
return fault->nested_page_fault;
}
Move the MMU's inject_page_fault(), which is used to inject page faults encountered when walking L1's page tables, to x86.c and use it to handle the non-nested path of kvm_inject_emulated_page_fault(). Using a common helper will reduce duplicate code in a future patch to sync SPTEs on emulated page faults, and also eliminates the rather confusing function name "inject_page_fault", which collides with struct kvm_mmu's hook. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/mmu/mmu.c | 6 ------ arch/x86/kvm/mmu/paging_tmpl.h | 2 +- arch/x86/kvm/x86.c | 8 +++++++- 4 files changed, 10 insertions(+), 8 deletions(-)