@@ -3719,7 +3719,9 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
} else {
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+ vmload(svm->vmcb_pa);
__svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&vcpu->arch.regs);
+ vmsave(svm->vmcb_pa);
vmload(__sme_page_pa(sd->save_area));
}
@@ -79,12 +79,6 @@ SYM_FUNC_START(__svm_vcpu_run)
/* Enter guest mode */
sti
-1: vmload %_ASM_AX
- jmp 3f
-2: cmpb $0, kvm_rebooting
- jne 3f
- ud2
- _ASM_EXTABLE(1b, 2b)
3: vmrun %_ASM_AX
jmp 5f
@@ -93,13 +87,7 @@ SYM_FUNC_START(__svm_vcpu_run)
ud2
_ASM_EXTABLE(3b, 4b)
-5: vmsave %_ASM_AX
- jmp 7f
-6: cmpb $0, kvm_rebooting
- jne 7f
- ud2
- _ASM_EXTABLE(5b, 6b)
-7:
+5:
cli
#ifdef CONFIG_RETPOLINE
Thanks to the new macros that handle exception handling for SVM instructions, it is easier to just do the VMLOAD/VMSAVE in C. This is safe, as shown by the fact that the host reload is already done outside the assembly source. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- arch/x86/kvm/svm/svm.c | 2 ++ arch/x86/kvm/svm/vmenter.S | 14 +------------- 2 files changed, 3 insertions(+), 13 deletions(-)