diff mbox series

[v3,06/37] KVM: x86: Consolidate logic for injecting page faults to L1

Message ID 20200320212833.3507-7-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86: TLB flushing fixes and enhancements | expand

Commit Message

Sean Christopherson March 20, 2020, 9:28 p.m. UTC
Move the MMU's inject_page_fault(), which is used to inject page faults
encountered when walking L1's page tables, to x86.c and use it to handle
the non-nested path of kvm_inject_emulated_page_fault().  Using a common
helper will reduce duplicate code in a future patch to sync SPTEs on
emulated page faults, and also eliminates the rather confusing function
name "inject_page_fault", which collides with struct kvm_mmu's hook.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/include/asm/kvm_host.h | 2 ++
 arch/x86/kvm/mmu/mmu.c          | 6 ------
 arch/x86/kvm/mmu/paging_tmpl.h  | 2 +-
 arch/x86/kvm/x86.c              | 8 +++++++-
 4 files changed, 10 insertions(+), 8 deletions(-)

Comments

Paolo Bonzini March 24, 2020, 12:47 a.m. UTC | #1
On 20/03/20 22:28, Sean Christopherson wrote:
> +void kvm_inject_l1_page_fault(struct kvm_vcpu *vcpu,
> +			      struct x86_exception *fault)
> +{
> +	vcpu->arch.mmu->inject_page_fault(vcpu, fault);
> +}
> +
>  bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
>  				    struct x86_exception *fault)
>  {
> @@ -619,7 +625,7 @@ bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
>  	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
>  		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
>  	else
> -		vcpu->arch.mmu->inject_page_fault(vcpu, fault);
> +		kvm_inject_l1_page_fault(vcpu, fault);
>  
>  	return fault->nested_page_fault;

This all started with "I don't like the name of the function" but
thinking more about it, we can also write this as

	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
		vcpu->arch.walk_mmu->inject_page_fault(vcpu, fault);
	else
		vcpu->arch.mmu->inject_page_fault(vcpu, fault);

Now, if !mmu_is_nested(vcpu) then walk_mmu == mmu, so it's much simpler
up until this patch:

	fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : vcpu->arch.walk_mmu;
	fault_mmu->inject_page_fault(vcpu, fault);

(which also matches how fault->nested_page_fault is assigned to).
In patch 7 we add the invalidation in kvm_inject_l1_page_fault, but
is it necessary to do it only in the else?

+	if (!vcpu->arch.mmu->direct_map &&
+	    (fault->error_code & PFERR_PRESENT_MASK))
+		vcpu->arch.mmu->invlpg(vcpu, fault->address,
+				       vcpu->arch.mmu->root_hpa);
+
 	vcpu->arch.mmu->inject_page_fault(vcpu, fault);
 }
 
The direct_map check is really just an optimization to avoid a
retpoline if ->invlpg is nonpaging_invlpg.  We can change it to
!vcpu->arch.mmu->invlpg if nonpaging_invlpg is replaced with NULL,
and then the same "if" condition can also be used for the nested_mmu
i.e. what patch 7 writes as

+		/*
+		 * No need to sync SPTEs, the fault is being injected into L2,
+		 * whose page tables are not being shadowed.
+		 */
 		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);


Finally, patch 7 also adds a tlb_flush_gva call which is already present
in kvm_mmu_invlpg, and this brings the final form to look like this:

bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
                                    struct x86_exception *fault)
{
        struct kvm_mmu *fault_mmu;
        WARN_ON_ONCE(fault->vector != PF_VECTOR);

        fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : vcpu->arch.walk_mmu;

        /*
         * Invalidate the TLB entry for the faulting address, if it exists,
         * else the access will fault indefinitely (and to emulate hardware).
         */
        if (fault->error_code & PFERR_PRESENT_MASK)
                __kvm_mmu_invlpg(vcpu, fault_mmu, fault->address);

        fault_mmu->inject_page_fault(vcpu, fault);
        return fault->nested_page_fault;
}

This will become a formal mini-series replacing patches 6 and 7
after I test it, so no need to do anything on your part.

Paolo
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 328b1765ff76..cdbf822c5c8b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1447,6 +1447,8 @@  void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
+void kvm_inject_l1_page_fault(struct kvm_vcpu *vcpu,
+			      struct x86_exception *fault);
 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
 				    struct x86_exception *fault);
 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 560e85ebdf22..5ae620881bbc 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4357,12 +4357,6 @@  static unsigned long get_cr3(struct kvm_vcpu *vcpu)
 	return kvm_read_cr3(vcpu);
 }
 
-static void inject_page_fault(struct kvm_vcpu *vcpu,
-			      struct x86_exception *fault)
-{
-	vcpu->arch.mmu->inject_page_fault(vcpu, fault);
-}
-
 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
 			   unsigned int access, int *nr_present)
 {
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 1ddbfff64ccc..ac613f2fae01 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -812,7 +812,7 @@  static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
 	if (!r) {
 		pgprintk("%s: guest page fault\n", __func__);
 		if (!prefault)
-			inject_page_fault(vcpu, &walker.fault);
+			kvm_inject_l1_page_fault(vcpu, &walker.fault);
 
 		return RET_PF_RETRY;
 	}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 64ed6e6e2b56..fcad522f221e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -611,6 +611,12 @@  void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
 }
 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
 
+void kvm_inject_l1_page_fault(struct kvm_vcpu *vcpu,
+			      struct x86_exception *fault)
+{
+	vcpu->arch.mmu->inject_page_fault(vcpu, fault);
+}
+
 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
 				    struct x86_exception *fault)
 {
@@ -619,7 +625,7 @@  bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
 	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
 		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
 	else
-		vcpu->arch.mmu->inject_page_fault(vcpu, fault);
+		kvm_inject_l1_page_fault(vcpu, fault);
 
 	return fault->nested_page_fault;
 }