diff mbox series

[v15,01/20] Revert "KVM: x86: Add gmem hook for determining max NPT mapping level"

Message ID 20240501085210.2213060-2-michael.roth@amd.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth May 1, 2024, 8:51 a.m. UTC
This reverts commit 20cc50a0410f338657e23e77fcc21fee2bc291e6.

As pointed out here[1], this patch has a few issues:
  - the error response could theoretically kill a guest in cases where
    retrying based on mmu_invalidate_seq might have been sufficient and
    so it should purely be a means to find the max mapping level that
    never returns error
  - the gpa/private arguments are not currently needed for anything
  - it's not really a "gmem" hook but uses the same naming convention
    as actual gmem hooks

Revert it so can replaced with a fully-intact replacement patch that
addresses the above.

[1] https://lore.kernel.org/kvm/ZimnngU7hn7sKoSc@google.com/

Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/include/asm/kvm-x86-ops.h | 1 -
 arch/x86/include/asm/kvm_host.h    | 2 --
 arch/x86/kvm/mmu/mmu.c             | 8 --------
 3 files changed, 11 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 2db87a6fd52a..c81990937ab4 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -140,7 +140,6 @@  KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
 KVM_X86_OP_OPTIONAL(get_untagged_addr)
 KVM_X86_OP_OPTIONAL(alloc_apic_backing_page)
 KVM_X86_OP_OPTIONAL_RET0(gmem_prepare)
-KVM_X86_OP_OPTIONAL_RET0(gmem_validate_fault)
 KVM_X86_OP_OPTIONAL(gmem_invalidate)
 
 #undef KVM_X86_OP
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4c9d8a22840a..c6c5018376be 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1816,8 +1816,6 @@  struct kvm_x86_ops {
 	void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
 	int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
 	void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end);
-	int (*gmem_validate_fault)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, bool is_private,
-				   u8 *max_level);
 };
 
 struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index eebb1562c5bc..510eb1117012 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4292,14 +4292,6 @@  static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
 			       fault->max_level);
 	fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
 
-	r = static_call(kvm_x86_gmem_validate_fault)(vcpu->kvm, fault->pfn,
-						     fault->gfn, fault->is_private,
-						     &fault->max_level);
-	if (r) {
-		kvm_release_pfn_clean(fault->pfn);
-		return r;
-	}
-
 	return RET_PF_CONTINUE;
 }