diff mbox series

[5/9] KVM: x86/mmu: Separate TDP and non-paging fault handling

Message ID 20220815230110.2266741-6-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Always enable the TDP MMU when TDP is enabled | expand

Commit Message

David Matlack Aug. 15, 2022, 11:01 p.m. UTC
Separate the page fault handling for TDP faults and non-paging faults.
This creates some duplicate code in the short term, but makes each
routine simpler to read by eliminating branches and enables future
cleanups by allowing the two paths to diverge.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 77 +++++++++++++++++++++++++++---------------
 1 file changed, 50 insertions(+), 27 deletions(-)

Comments

kernel test robot Aug. 24, 2022, 5:06 p.m. UTC | #1
Hi David,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on 93472b79715378a2386598d6632c654a2223267b]

url:    https://github.com/intel-lab-lkp/linux/commits/David-Matlack/KVM-x86-mmu-Always-enable-the-TDP-MMU-when-TDP-is-enabled/20220816-135710
base:   93472b79715378a2386598d6632c654a2223267b
config: i386-debian-10.3-kselftests (https://download.01.org/0day-ci/archive/20220825/202208250034.Vo6dL7C7-lkp@intel.com/config)
compiler: gcc-11 (Debian 11.3.0-5) 11.3.0
reproduce (this is a W=1 build):
        # https://github.com/intel-lab-lkp/linux/commit/0cbb5e1684635e3f36d0283f5b3696b0ee0660e1
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review David-Matlack/KVM-x86-mmu-Always-enable-the-TDP-MMU-when-TDP-is-enabled/20220816-135710
        git checkout 0cbb5e1684635e3f36d0283f5b3696b0ee0660e1
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        make W=1 O=build_dir ARCH=i386 SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>, old ones prefixed by <<):

ERROR: modpost: "kvm_tdp_mmu_test_age_gfn" [arch/x86/kvm/kvm.ko] undefined!
ERROR: modpost: "kvm_tdp_mmu_zap_all" [arch/x86/kvm/kvm.ko] undefined!
ERROR: modpost: "kvm_tdp_mmu_clear_dirty_pt_masked" [arch/x86/kvm/kvm.ko] undefined!
>> ERROR: modpost: "kvm_tdp_mmu_map" [arch/x86/kvm/kvm.ko] undefined!
ERROR: modpost: "kvm_tdp_mmu_age_gfn_range" [arch/x86/kvm/kvm.ko] undefined!
ERROR: modpost: "kvm_tdp_mmu_zap_leafs" [arch/x86/kvm/kvm.ko] undefined!
ERROR: modpost: "kvm_tdp_mmu_unmap_gfn_range" [arch/x86/kvm/kvm.ko] undefined!
ERROR: modpost: "kvm_tdp_mmu_invalidate_all_roots" [arch/x86/kvm/kvm.ko] undefined!
ERROR: modpost: "kvm_tdp_mmu_get_vcpu_root_hpa" [arch/x86/kvm/kvm.ko] undefined!
ERROR: modpost: "kvm_tdp_mmu_set_spte_gfn" [arch/x86/kvm/kvm.ko] undefined!
WARNING: modpost: suppressed 6 unresolved symbol warnings because there were too many)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 3e03407f1321..182f9f417e4e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4209,11 +4209,15 @@  static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
 	       mmu_notifier_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva);
 }
 
-static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
+				struct kvm_page_fault *fault)
 {
-	bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
 	int r;
 
+	pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code);
+
+	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
+	fault->max_level = PG_LEVEL_2M;
 	fault->gfn = fault->addr >> PAGE_SHIFT;
 	fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
 
@@ -4237,11 +4241,7 @@  static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 		return r;
 
 	r = RET_PF_RETRY;
-
-	if (is_tdp_mmu_fault)
-		read_lock(&vcpu->kvm->mmu_lock);
-	else
-		write_lock(&vcpu->kvm->mmu_lock);
+	write_lock(&vcpu->kvm->mmu_lock);
 
 	if (is_page_fault_stale(vcpu, fault))
 		goto out_unlock;
@@ -4250,30 +4250,14 @@  static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
 	if (r)
 		goto out_unlock;
 
-	if (is_tdp_mmu_fault)
-		r = kvm_tdp_mmu_map(vcpu, fault);
-	else
-		r = nonpaging_map(vcpu, fault);
+	r = nonpaging_map(vcpu, fault);
 
 out_unlock:
-	if (is_tdp_mmu_fault)
-		read_unlock(&vcpu->kvm->mmu_lock);
-	else
-		write_unlock(&vcpu->kvm->mmu_lock);
+	write_unlock(&vcpu->kvm->mmu_lock);
 	kvm_release_pfn_clean(fault->pfn);
 	return r;
 }
 
-static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
-				struct kvm_page_fault *fault)
-{
-	pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code);
-
-	/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
-	fault->max_level = PG_LEVEL_2M;
-	return direct_page_fault(vcpu, fault);
-}
-
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 				u64 fault_address, char *insn, int insn_len)
 {
@@ -4309,6 +4293,11 @@  EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
 
 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
+	int r;
+
+	fault->gfn = fault->addr >> PAGE_SHIFT;
+	fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn);
+
 	/*
 	 * If the guest's MTRRs may be used to compute the "real" memtype,
 	 * restrict the mapping level to ensure KVM uses a consistent memtype
@@ -4324,14 +4313,48 @@  int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 	if (shadow_memtype_mask && kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
 		for ( ; fault->max_level > PG_LEVEL_4K; --fault->max_level) {
 			int page_num = KVM_PAGES_PER_HPAGE(fault->max_level);
-			gfn_t base = (fault->addr >> PAGE_SHIFT) & ~(page_num - 1);
+			gfn_t base = fault->gfn & ~(page_num - 1);
 
 			if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num))
 				break;
 		}
 	}
 
-	return direct_page_fault(vcpu, fault);
+	if (page_fault_handle_page_track(vcpu, fault))
+		return RET_PF_EMULATE;
+
+	r = fast_page_fault(vcpu, fault);
+	if (r != RET_PF_INVALID)
+		return r;
+
+	r = mmu_topup_memory_caches(vcpu, false);
+	if (r)
+		return r;
+
+	r = kvm_faultin_pfn(vcpu, fault);
+	if (r != RET_PF_CONTINUE)
+		return r;
+
+	r = handle_abnormal_pfn(vcpu, fault, ACC_ALL);
+	if (r != RET_PF_CONTINUE)
+		return r;
+
+	r = RET_PF_RETRY;
+	read_lock(&vcpu->kvm->mmu_lock);
+
+	if (is_page_fault_stale(vcpu, fault))
+		goto out_unlock;
+
+	r = make_mmu_pages_available(vcpu);
+	if (r)
+		goto out_unlock;
+
+	r = kvm_tdp_mmu_map(vcpu, fault);
+
+out_unlock:
+	read_unlock(&vcpu->kvm->mmu_lock);
+	kvm_release_pfn_clean(fault->pfn);
+	return r;
 }
 
 static void nonpaging_init_context(struct kvm_mmu *context)