@@ -38,6 +38,12 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
return -EINVAL;
+ if (!vcpu->arch.ctxt.vncr_array)
+ vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+
+ if (!vcpu->arch.ctxt.vncr_array)
+ return -ENOMEM;
+
/*
* Let's treat memory allocation failures as benign: If we fail to
* allocate anything, return an error and keep the allocated array
@@ -65,6 +71,8 @@ int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
kvm_init_stage2_mmu(kvm, &tmp[num_mmus - 2], 0)) {
kvm_free_stage2_pgd(&tmp[num_mmus - 1]);
kvm_free_stage2_pgd(&tmp[num_mmus - 2]);
+ free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
+ vcpu->arch.ctxt.vncr_array = NULL;
} else {
kvm->arch.nested_mmus_size = num_mmus;
ret = 0;
@@ -156,6 +156,7 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
if (sve_state)
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
kfree(sve_state);
+ free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
kfree(vcpu->arch.ccsidr);
}
If running a NV guest on an ARMv8.4-NV capable system, let's allocate an additional page that will be used by the hypervisor to fulfill system register accesses. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/kvm/nested.c | 8 ++++++++ arch/arm64/kvm/reset.c | 1 + 2 files changed, 9 insertions(+)