diff mbox series

[v3,63/66] KVM: arm64: nv: Allocate VNCR page when required

Message ID 20201210160002.1407373-64-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: ARMv8.3/8.4 Nested Virtualization support | expand

Commit Message

Marc Zyngier Dec. 10, 2020, 3:59 p.m. UTC
If running a NV guest on an ARMv8.4-NV capable system, let's
allocate an additional page that will be used by the hypervisor
to fulfill system register accesses.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h | 3 ++-
 arch/arm64/kvm/nested.c           | 8 ++++++++
 arch/arm64/kvm/reset.c            | 1 +
 3 files changed, 11 insertions(+), 1 deletion(-)

Comments

Haibo Xu Jan. 21, 2021, 2:47 a.m. UTC | #1
On Fri, 11 Dec 2020 at 00:04, Marc Zyngier <maz@kernel.org> wrote:
>
> If running a NV guest on an ARMv8.4-NV capable system, let's
> allocate an additional page that will be used by the hypervisor
> to fulfill system register accesses.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/kvm_host.h | 3 ++-
>  arch/arm64/kvm/nested.c           | 8 ++++++++
>  arch/arm64/kvm/reset.c            | 1 +
>  3 files changed, 11 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 78630bd5124d..dada0678c28e 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -523,7 +523,8 @@ struct kvm_vcpu_arch {
>   */
>  static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
>  {
> -       if (unlikely(r >= __VNCR_START__ && ctxt->vncr_array))
> +       if (unlikely(cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT) &&
> +                    r >= __VNCR_START__ && ctxt->vncr_array))
>                 return &ctxt->vncr_array[r - __VNCR_START__];
>
>         return (u64 *)&ctxt->sys_regs[r];
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index eef8f9873814..88147ec99755 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -47,6 +47,12 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
>         if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
>                 return -EINVAL;
>
> +       if (cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT)) {
> +               vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
> +               if (!vcpu->arch.ctxt.vncr_array)
> +                       return -ENOMEM;
> +       }
> +

If KVM_ARM_VCPU_INIT was called multiple times, the above codes would
try to allocate a new page
without free-ing the previous one. Besides that, the following
kvm_free_stage2_pgd() call would fail in the
second call with the error message "kvm_arch already initialized?".
I think a possible fix is to add a new flag to indicate whether the NV
related meta data have been initialized,
and only initialize them for the first call.

>         mutex_lock(&kvm->lock);
>
>         /*
> @@ -64,6 +70,8 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
>                     kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
>                         kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
>                         kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
> +                       free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
> +                       vcpu->arch.ctxt.vncr_array = NULL;
>                 } else {
>                         kvm->arch.nested_mmus_size = num_mmus;
>                         ret = 0;
> diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
> index 2d2c780e6c69..d281eb39036f 100644
> --- a/arch/arm64/kvm/reset.c
> +++ b/arch/arm64/kvm/reset.c
> @@ -150,6 +150,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
>  void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
>  {
>         kfree(vcpu->arch.sve_state);
> +       free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
>  }
>
>  static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
> --
> 2.29.2
>
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Marc Zyngier Feb. 16, 2021, 7:03 p.m. UTC | #2
On Thu, 21 Jan 2021 02:47:45 +0000,
Haibo Xu <haibo.xu@linaro.org> wrote:
> 
> On Fri, 11 Dec 2020 at 00:04, Marc Zyngier <maz@kernel.org> wrote:
> >
> > If running a NV guest on an ARMv8.4-NV capable system, let's
> > allocate an additional page that will be used by the hypervisor
> > to fulfill system register accesses.
> >
> > Signed-off-by: Marc Zyngier <maz@kernel.org>
> > ---
> >  arch/arm64/include/asm/kvm_host.h | 3 ++-
> >  arch/arm64/kvm/nested.c           | 8 ++++++++
> >  arch/arm64/kvm/reset.c            | 1 +
> >  3 files changed, 11 insertions(+), 1 deletion(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > index 78630bd5124d..dada0678c28e 100644
> > --- a/arch/arm64/include/asm/kvm_host.h
> > +++ b/arch/arm64/include/asm/kvm_host.h
> > @@ -523,7 +523,8 @@ struct kvm_vcpu_arch {
> >   */
> >  static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
> >  {
> > -       if (unlikely(r >= __VNCR_START__ && ctxt->vncr_array))
> > +       if (unlikely(cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT) &&
> > +                    r >= __VNCR_START__ && ctxt->vncr_array))
> >                 return &ctxt->vncr_array[r - __VNCR_START__];
> >
> >         return (u64 *)&ctxt->sys_regs[r];
> > diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> > index eef8f9873814..88147ec99755 100644
> > --- a/arch/arm64/kvm/nested.c
> > +++ b/arch/arm64/kvm/nested.c
> > @@ -47,6 +47,12 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
> >         if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
> >                 return -EINVAL;
> >
> > +       if (cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT)) {
> > +               vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
> > +               if (!vcpu->arch.ctxt.vncr_array)
> > +                       return -ENOMEM;
> > +       }
> > +
> 
> If KVM_ARM_VCPU_INIT was called multiple times, the above codes
> would try to allocate a new page without free-ing the previous
> one. Besides that, the following kvm_free_stage2_pgd() call would

I assume you mean kvm_init_stage2_mmu() here.

> fail in the second call with the error message "kvm_arch already
> initialized?".  I think a possible fix is to add a new flag to
> indicate whether the NV related meta data have been initialized, and
> only initialize them for the first call.

Good catch. But I think we have all the data we need at this stage to
avoid this issue:

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index abb0669bdd4c..baff7373863f 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -390,7 +390,20 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
 	int cpu, err;
 	struct kvm_pgtable *pgt;
 
+	/*
+	 * If we already have our page tables in place, and that the
+	 * MMU context is the canonical one, we have a bug somewhere,
+	 * as this is only supposed to ever happen once per VM.
+	 *
+	 * Otherwise, we're building nested page tables, and that's
+	 * probably because userspace called KVM_ARM_VCPU_INIT more
+	 * than once on the same vcpu. Since that's actually legal,
+	 * don't kick a fuss and leave gracefully.
+	 */
 	if (mmu->pgt != NULL) {
+		if (&kvm->arch.mmu != mmu)
+			return 0;
+
 		kvm_err("kvm_arch already initialized?\n");
 		return -EINVAL;
 	}
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 88147ec99755..3b21ea57fbce 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -48,7 +48,9 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
 		return -EINVAL;
 
 	if (cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT)) {
-		vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!vcpu->arch.ctxt.vncr_array)
+			vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+
 		if (!vcpu->arch.ctxt.vncr_array)
 			return -ENOMEM;
 	}

Thanks,

	M.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 78630bd5124d..dada0678c28e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -523,7 +523,8 @@  struct kvm_vcpu_arch {
  */
 static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
 {
-	if (unlikely(r >= __VNCR_START__ && ctxt->vncr_array))
+	if (unlikely(cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT) &&
+		     r >= __VNCR_START__ && ctxt->vncr_array))
 		return &ctxt->vncr_array[r - __VNCR_START__];
 
 	return (u64 *)&ctxt->sys_regs[r];
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index eef8f9873814..88147ec99755 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -47,6 +47,12 @@  int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
 	if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
 		return -EINVAL;
 
+	if (cpus_have_final_cap(ARM64_HAS_ENHANCED_NESTED_VIRT)) {
+		vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+		if (!vcpu->arch.ctxt.vncr_array)
+			return -ENOMEM;
+	}
+
 	mutex_lock(&kvm->lock);
 
 	/*
@@ -64,6 +70,8 @@  int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
 		    kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2])) {
 			kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
 			kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
+			free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
+			vcpu->arch.ctxt.vncr_array = NULL;
 		} else {
 			kvm->arch.nested_mmus_size = num_mmus;
 			ret = 0;
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 2d2c780e6c69..d281eb39036f 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -150,6 +150,7 @@  bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
 	kfree(vcpu->arch.sve_state);
+	free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
 }
 
 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)