diff mbox series

[v11,30/35] KVM: x86: Add gmem hook for determining max NPT mapping level

Message ID 20231230172351.574091-31-michael.roth@amd.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth Dec. 30, 2023, 5:23 p.m. UTC
In the case of SEV-SNP, whether or not a 2MB page can be mapped via a
2MB mapping in the guest's nested page table depends on whether or not
any subpages within the range have already been initialized as private
in the RMP table. The existing mixed-attribute tracking in KVM is
insufficient here, for instance:

  - gmem allocates 2MB page
  - guest issues PVALIDATE on 2MB page
  - guest later converts a subpage to shared
  - SNP host code issues PSMASH to split 2MB RMP mapping to 4K
  - KVM MMU splits NPT mapping to 4K

At this point there are no mixed attributes, and KVM would normally
allow for 2MB NPT mappings again, but this is actually not allowed
because the RMP table mappings are 4K and cannot be promoted on the
hypervisor side, so the NPT mappings must still be limited to 4K to
match this.

Add a hook to determine the max NPT mapping size in situations like
this.

Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/include/asm/kvm-x86-ops.h |  1 +
 arch/x86/include/asm/kvm_host.h    |  1 +
 arch/x86/kvm/mmu/mmu.c             | 12 ++++++++++--
 arch/x86/kvm/svm/sev.c             | 27 +++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c             |  1 +
 5 files changed, 40 insertions(+), 2 deletions(-)

Comments

Paolo Bonzini Feb. 12, 2024, 10:50 a.m. UTC | #1
On Sat, Dec 30, 2023 at 6:32 PM Michael Roth <michael.roth@amd.com> wrote:
>         int max_order, r;
> +       u8 max_level;
>
>         if (!kvm_slot_can_be_private(fault->slot)) {
>                 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
> @@ -4321,8 +4322,15 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
>                 return r;
>         }
>
> -       fault->max_level = min(kvm_max_level_for_order(max_order),
> -                              fault->max_level);
> +       max_level = kvm_max_level_for_order(max_order);
> +       r = static_call(kvm_x86_gmem_max_level)(vcpu->kvm, fault->pfn,
> +                                               fault->gfn, &max_level);

Might as well pass &fault->max_level directly to the callback, with no
change to the vendor-specific code.

I'll include the MMU part in a generic series to be the base for both
Intel TDX and AMD SEV-SNP, and will do that change.

Paolo

> +       if (r) {
> +               kvm_release_pfn_clean(fault->pfn);
> +               return r;
> +       }
> +
> +       fault->max_level = min(max_level, fault->max_level);
>         fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
>
>         return RET_PF_CONTINUE;
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 85f63b6842b6..5eb836b73131 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -4315,3 +4315,30 @@ void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
>                 pfn += use_2m_update ? PTRS_PER_PMD : 1;
>         }
>  }
> +
> +int sev_gmem_max_level(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, u8 *max_level)
> +{
> +       int level, rc;
> +       bool assigned;
> +
> +       if (!sev_snp_guest(kvm))
> +               return 0;
> +
> +       rc = snp_lookup_rmpentry(pfn, &assigned, &level);
> +       if (rc) {
> +               pr_err_ratelimited("SEV: RMP entry not found: GFN %llx PFN %llx level %d error %d\n",
> +                                  gfn, pfn, level, rc);
> +               return -ENOENT;
> +       }
> +
> +       if (!assigned) {
> +               pr_err_ratelimited("SEV: RMP entry is not assigned: GFN %llx PFN %llx level %d\n",
> +                                  gfn, pfn, level);
> +               return -EINVAL;
> +       }
> +
> +       if (level < *max_level)
> +               *max_level = level;
> +
> +       return 0;
> +}
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index f26b8c2a8be4..f745022f7454 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -5067,6 +5067,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
>         .alloc_apic_backing_page = svm_alloc_apic_backing_page,
>
>         .gmem_prepare = sev_gmem_prepare,
> +       .gmem_max_level = sev_gmem_max_level,
>         .gmem_invalidate = sev_gmem_invalidate,
>  };
>
> --
> 2.25.1
>
Michael Roth Feb. 12, 2024, 5:03 p.m. UTC | #2
On Mon, Feb 12, 2024 at 11:50:26AM +0100, Paolo Bonzini wrote:
> On Sat, Dec 30, 2023 at 6:32 PM Michael Roth <michael.roth@amd.com> wrote:
> >         int max_order, r;
> > +       u8 max_level;
> >
> >         if (!kvm_slot_can_be_private(fault->slot)) {
> >                 kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
> > @@ -4321,8 +4322,15 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
> >                 return r;
> >         }
> >
> > -       fault->max_level = min(kvm_max_level_for_order(max_order),
> > -                              fault->max_level);
> > +       max_level = kvm_max_level_for_order(max_order);
> > +       r = static_call(kvm_x86_gmem_max_level)(vcpu->kvm, fault->pfn,
> > +                                               fault->gfn, &max_level);
> 
> Might as well pass &fault->max_level directly to the callback, with no
> change to the vendor-specific code.
> 
> I'll include the MMU part in a generic series to be the base for both
> Intel TDX and AMD SEV-SNP, and will do that change.

Sounds good. I'm not sure why I did it that way originally, but what
you're suggesting does seem like it should be equivalent.

-Mike

> 
> Paolo
> 
> > +       if (r) {
> > +               kvm_release_pfn_clean(fault->pfn);
> > +               return r;
> > +       }
> > +
> > +       fault->max_level = min(max_level, fault->max_level);
> >         fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
> >
> >         return RET_PF_CONTINUE;
> > diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> > index 85f63b6842b6..5eb836b73131 100644
> > --- a/arch/x86/kvm/svm/sev.c
> > +++ b/arch/x86/kvm/svm/sev.c
> > @@ -4315,3 +4315,30 @@ void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
> >                 pfn += use_2m_update ? PTRS_PER_PMD : 1;
> >         }
> >  }
> > +
> > +int sev_gmem_max_level(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, u8 *max_level)
> > +{
> > +       int level, rc;
> > +       bool assigned;
> > +
> > +       if (!sev_snp_guest(kvm))
> > +               return 0;
> > +
> > +       rc = snp_lookup_rmpentry(pfn, &assigned, &level);
> > +       if (rc) {
> > +               pr_err_ratelimited("SEV: RMP entry not found: GFN %llx PFN %llx level %d error %d\n",
> > +                                  gfn, pfn, level, rc);
> > +               return -ENOENT;
> > +       }
> > +
> > +       if (!assigned) {
> > +               pr_err_ratelimited("SEV: RMP entry is not assigned: GFN %llx PFN %llx level %d\n",
> > +                                  gfn, pfn, level);
> > +               return -EINVAL;
> > +       }
> > +
> > +       if (level < *max_level)
> > +               *max_level = level;
> > +
> > +       return 0;
> > +}
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index f26b8c2a8be4..f745022f7454 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -5067,6 +5067,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
> >         .alloc_apic_backing_page = svm_alloc_apic_backing_page,
> >
> >         .gmem_prepare = sev_gmem_prepare,
> > +       .gmem_max_level = sev_gmem_max_level,
> >         .gmem_invalidate = sev_gmem_invalidate,
> >  };
> >
> > --
> > 2.25.1
> >
>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index c4b7b0db7be3..b0a174213dad 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -140,6 +140,7 @@  KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
 KVM_X86_OP_OPTIONAL(get_untagged_addr)
 KVM_X86_OP_OPTIONAL(alloc_apic_backing_page)
 KVM_X86_OP_OPTIONAL_RET0(gmem_prepare)
+KVM_X86_OP_OPTIONAL_RET0(gmem_max_level)
 KVM_X86_OP_OPTIONAL(gmem_invalidate)
 
 #undef KVM_X86_OP
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9e45402e51bc..ee1e81608e07 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1806,6 +1806,7 @@  struct kvm_x86_ops {
 	void *(*alloc_apic_backing_page)(struct kvm_vcpu *vcpu);
 	int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
 	void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end);
+	int (*gmem_max_level)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, u8 *max_level);
 };
 
 struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1882096fba3e..21f44ec37b29 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4308,6 +4308,7 @@  static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
 				   struct kvm_page_fault *fault)
 {
 	int max_order, r;
+	u8 max_level;
 
 	if (!kvm_slot_can_be_private(fault->slot)) {
 		kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
@@ -4321,8 +4322,15 @@  static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
 		return r;
 	}
 
-	fault->max_level = min(kvm_max_level_for_order(max_order),
-			       fault->max_level);
+	max_level = kvm_max_level_for_order(max_order);
+	r = static_call(kvm_x86_gmem_max_level)(vcpu->kvm, fault->pfn,
+						fault->gfn, &max_level);
+	if (r) {
+		kvm_release_pfn_clean(fault->pfn);
+		return r;
+	}
+
+	fault->max_level = min(max_level, fault->max_level);
 	fault->map_writable = !(fault->slot->flags & KVM_MEM_READONLY);
 
 	return RET_PF_CONTINUE;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 85f63b6842b6..5eb836b73131 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -4315,3 +4315,30 @@  void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end)
 		pfn += use_2m_update ? PTRS_PER_PMD : 1;
 	}
 }
+
+int sev_gmem_max_level(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, u8 *max_level)
+{
+	int level, rc;
+	bool assigned;
+
+	if (!sev_snp_guest(kvm))
+		return 0;
+
+	rc = snp_lookup_rmpentry(pfn, &assigned, &level);
+	if (rc) {
+		pr_err_ratelimited("SEV: RMP entry not found: GFN %llx PFN %llx level %d error %d\n",
+				   gfn, pfn, level, rc);
+		return -ENOENT;
+	}
+
+	if (!assigned) {
+		pr_err_ratelimited("SEV: RMP entry is not assigned: GFN %llx PFN %llx level %d\n",
+				   gfn, pfn, level);
+		return -EINVAL;
+	}
+
+	if (level < *max_level)
+		*max_level = level;
+
+	return 0;
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f26b8c2a8be4..f745022f7454 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5067,6 +5067,7 @@  static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.alloc_apic_backing_page = svm_alloc_apic_backing_page,
 
 	.gmem_prepare = sev_gmem_prepare,
+	.gmem_max_level = sev_gmem_max_level,
 	.gmem_invalidate = sev_gmem_invalidate,
 };