diff mbox series

KVM: x86/mmu: Drop "struct kvm_mmu *mmu" from __kvm_mmu_invalidate_addr()

Message ID 20230519081711.72906-1-likexu@tencent.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Drop "struct kvm_mmu *mmu" from __kvm_mmu_invalidate_addr() | expand

Commit Message

Like Xu May 19, 2023, 8:17 a.m. UTC
From: Like Xu <likexu@tencent.com>

Remove incoming parameter "struct kvm_mmu *mmu" that are no longer used.
Whether the func is using "vcpu->arch.root_mmu" or "vcpu->arch.guest_mmu",
it can be referenced as expected via "vcpu->arch.mmu". Thus the "*mmu"
is replaced by the use of "vcpu->arch.mmu" in commit 19ace7d6ca15 ("KVM:
x86/mmu: Skip calling mmu->sync_spte() when the spte is 0").

No functional change is intended.

Signed-off-by: Like Xu <likexu@tencent.com>
---
 arch/x86/kvm/mmu/mmu.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)


base-commit: f1fcbaa18b28dec10281551dfe6ed3a3ed80e3d6

Comments

Sean Christopherson May 19, 2023, 3:12 p.m. UTC | #1
On Fri, May 19, 2023, Like Xu wrote:
> From: Like Xu <likexu@tencent.com>
> 
> Remove incoming parameter "struct kvm_mmu *mmu" that are no longer used.
> Whether the func is using "vcpu->arch.root_mmu" or "vcpu->arch.guest_mmu",
> it can be referenced as expected via "vcpu->arch.mmu". Thus the "*mmu"
> is replaced by the use of "vcpu->arch.mmu" in commit 19ace7d6ca15 ("KVM:
> x86/mmu: Skip calling mmu->sync_spte() when the spte is 0").
> 
> No functional change is intended.
> 
> Signed-off-by: Like Xu <likexu@tencent.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 8 ++++----
>  1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index c8961f45e3b1..160c40c83330 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -5790,8 +5790,8 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
>  }
>  EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
>  
> -static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
> -				      u64 addr, hpa_t root_hpa)
> +static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, u64 addr,
> +				      hpa_t root_hpa)
>  {
>  	struct kvm_shadow_walk_iterator iterator;

Oof, there's a mess waiting to happen here.  for_each_shadow_entry_using_root()
and kvm_sync_spte() do indeed operate on vcpu->arch.mmu, but the only reason that
doesn't cause explosions is because handle_invept() frees roots instead of doing
a manual invalidation.  At a glance, I don't see any major roadblocks to switching
INVEPT emulation over to use kvm_mmu_invalidate_addr().

I'm leaning towards asserting on @mmu instead of deleting it.

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c8961f45e3b1..258f12235874 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5797,6 +5797,14 @@ static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu
 
 	vcpu_clear_mmio_info(vcpu, addr);
 
+	/*
+	 * Walking and synchronizing SPTEs both assume they are operating in
+	 * the context of the current MMU, and would need to be reworked if
+	 * this is ever used to sync the guest_mmu, e.g. to emulate INVEPT.
+	 */
+	if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
+		return;
+
 	if (!VALID_PAGE(root_hpa))
 		return;
 

base-commit: 5c291b93e5d665380dbecc6944973583f9565ee5
--
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c8961f45e3b1..160c40c83330 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5790,8 +5790,8 @@  int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
-static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-				      u64 addr, hpa_t root_hpa)
+static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, u64 addr,
+				      hpa_t root_hpa)
 {
 	struct kvm_shadow_walk_iterator iterator;
 
@@ -5839,11 +5839,11 @@  void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 		return;
 
 	if (roots & KVM_MMU_ROOT_CURRENT)
-		__kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
+		__kvm_mmu_invalidate_addr(vcpu, addr, mmu->root.hpa);
 
 	for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
 		if (roots & KVM_MMU_ROOT_PREVIOUS(i))
-			__kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa);
+			__kvm_mmu_invalidate_addr(vcpu, addr, mmu->prev_roots[i].hpa);
 	}
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr);