diff mbox series

[21/21] KVM: x86/mmu: Split out Shadow MMU lockless walk begin/end

Message ID 20230202182809.1929122-22-bgardon@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/MMU: Formalize the Shadow MMU | expand

Commit Message

Ben Gardon Feb. 2, 2023, 6:28 p.m. UTC
Split out the meat of kvm_shadow_mmu_walk_lockless_begin/end() to
functions in shadow_mmu.c since there's no need for it in the common MMU
code.

Suggested-by: David Matlack <dmatlack@google.com>

Signed-off-by: Ben Gardon <bgardon@google.com>
---
 arch/x86/kvm/mmu/mmu.c        | 31 ++++++-------------------------
 arch/x86/kvm/mmu/shadow_mmu.c | 27 +++++++++++++++++++++++++++
 arch/x86/kvm/mmu/shadow_mmu.h |  3 +++
 3 files changed, 36 insertions(+), 25 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 10aff23dea75d..cfccc4c7a1427 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -207,37 +207,18 @@  static inline bool is_tdp_mmu_active(struct kvm_vcpu *vcpu)
 
 void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
 {
-	if (is_tdp_mmu_active(vcpu)) {
+	if (is_tdp_mmu_active(vcpu))
 		kvm_tdp_mmu_walk_lockless_begin();
-	} else {
-		/*
-		 * Prevent page table teardown by making any free-er wait during
-		 * kvm_flush_remote_tlbs() IPI to all active vcpus.
-		 */
-		local_irq_disable();
-
-		/*
-		 * Make sure a following spte read is not reordered ahead of the write
-		 * to vcpu->mode.
-		 */
-		smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
-	}
+	else
+		kvm_shadow_mmu_walk_lockless_begin(vcpu);
 }
 
 void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
 {
-	if (is_tdp_mmu_active(vcpu)) {
+	if (is_tdp_mmu_active(vcpu))
 		kvm_tdp_mmu_walk_lockless_end();
-	} else {
-		/*
-		 * Make sure the write to vcpu->mode is not reordered in front
-		 * of reads to sptes.  If it does,
-		 * kvm_shadow_mmu_commit_zap_page() can see us
-		 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
-		 */
-		smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
-		local_irq_enable();
-	}
+	else
+		kvm_shadow_mmu_walk_lockless_end(vcpu);
 }
 
 int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
diff --git a/arch/x86/kvm/mmu/shadow_mmu.c b/arch/x86/kvm/mmu/shadow_mmu.c
index 6449ac4de4883..c5d0accd6e057 100644
--- a/arch/x86/kvm/mmu/shadow_mmu.c
+++ b/arch/x86/kvm/mmu/shadow_mmu.c
@@ -3663,3 +3663,30 @@  void kvm_mmu_uninit_shadow_mmu(struct kvm *kvm)
 	kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
 	kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
 }
+
+void kvm_shadow_mmu_walk_lockless_begin(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * Prevent page table teardown by making any free-er wait during
+	 * kvm_flush_remote_tlbs() IPI to all active vcpus.
+	 */
+	local_irq_disable();
+
+	/*
+	 * Make sure a following spte read is not reordered ahead of the write
+	 * to vcpu->mode.
+	 */
+	smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
+}
+
+void kvm_shadow_mmu_walk_lockless_end(struct kvm_vcpu *vcpu)
+{
+	/*
+	 * Make sure the write to vcpu->mode is not reordered in front
+	 * of reads to sptes.  If it does,
+	 * kvm_shadow_mmu_commit_zap_page() can see us
+	 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
+	 */
+	smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
+	local_irq_enable();
+}
diff --git a/arch/x86/kvm/mmu/shadow_mmu.h b/arch/x86/kvm/mmu/shadow_mmu.h
index f2e54355ebb19..12835872bda34 100644
--- a/arch/x86/kvm/mmu/shadow_mmu.h
+++ b/arch/x86/kvm/mmu/shadow_mmu.h
@@ -103,6 +103,9 @@  void kvm_shadow_mmu_zap_all(struct kvm *kvm);
 void kvm_mmu_init_shadow_mmu(struct kvm *kvm);
 void kvm_mmu_uninit_shadow_mmu(struct kvm *kvm);
 
+void kvm_shadow_mmu_walk_lockless_begin(struct kvm_vcpu *vcpu);
+void kvm_shadow_mmu_walk_lockless_end(struct kvm_vcpu *vcpu);
+
 /* Exports from paging_tmpl.h */
 gpa_t paging32_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 			  gpa_t vaddr, u64 access,