@@ -3640,16 +3640,20 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
guest_enter_irqoff();
lockdep_hardirqs_on(CALLER_ADDR0);
- __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
+ if (sev_es_guest(svm->vcpu.kvm)) {
+ __svm_sev_es_vcpu_run(svm->vmcb_pa);
+ } else {
+ __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
#ifdef CONFIG_X86_64
- native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+ native_wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
- loadsegment(fs, svm->host.fs);
+ loadsegment(fs, svm->host.fs);
#ifndef CONFIG_X86_32_LAZY_GS
- loadsegment(gs, svm->host.gs);
+ loadsegment(gs, svm->host.gs);
#endif
#endif
+ }
/*
* VMEXIT disables interrupts (host state), but tracing and lockdep
@@ -3676,9 +3680,11 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
fastpath_t exit_fastpath;
struct vcpu_svm *svm = to_svm(vcpu);
- svm_rax_write(svm, vcpu->arch.regs[VCPU_REGS_RAX]);
- svm_rsp_write(svm, vcpu->arch.regs[VCPU_REGS_RSP]);
- svm_rip_write(svm, vcpu->arch.regs[VCPU_REGS_RIP]);
+ if (!sev_es_guest(svm->vcpu.kvm)) {
+ svm_rax_write(svm, vcpu->arch.regs[VCPU_REGS_RAX]);
+ svm_rsp_write(svm, vcpu->arch.regs[VCPU_REGS_RSP]);
+ svm_rip_write(svm, vcpu->arch.regs[VCPU_REGS_RIP]);
+ }
/*
* Disable singlestep if we're injecting an interrupt/exception.
@@ -3700,7 +3706,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
sync_lapic_to_cr8(vcpu);
- svm_cr2_write(svm, vcpu->arch.cr2);
+ if (!sev_es_guest(svm->vcpu.kvm))
+ svm_cr2_write(svm, vcpu->arch.cr2);
/*
* Run with all-zero DR6 unless needed, so that we can get the exact cause
@@ -3748,14 +3755,17 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
- reload_tss(vcpu);
+ if (!sev_es_guest(svm->vcpu.kvm))
+ reload_tss(vcpu);
x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
- vcpu->arch.cr2 = svm_cr2_read(svm);
- vcpu->arch.regs[VCPU_REGS_RAX] = svm_rax_read(svm);
- vcpu->arch.regs[VCPU_REGS_RSP] = svm_rsp_read(svm);
- vcpu->arch.regs[VCPU_REGS_RIP] = svm_rip_read(svm);
+ if (!sev_es_guest(svm->vcpu.kvm)) {
+ vcpu->arch.cr2 = svm_cr2_read(svm);
+ vcpu->arch.regs[VCPU_REGS_RAX] = svm_rax_read(svm);
+ vcpu->arch.regs[VCPU_REGS_RSP] = svm_rsp_read(svm);
+ vcpu->arch.regs[VCPU_REGS_RIP] = svm_rip_read(svm);
+ }
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_interrupt(&svm->vcpu);
@@ -584,6 +584,11 @@ void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_es_vcpu_load(struct vcpu_svm *svm, int cpu);
void sev_es_vcpu_put(struct vcpu_svm *svm);
+/* vmenter.S */
+
+void __svm_sev_es_vcpu_run(unsigned long vmcb_pa);
+void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
+
/* VMSA Accessor functions */
static inline struct vmcb_save_area *get_vmsa(struct vcpu_svm *svm)
@@ -168,3 +168,53 @@ SYM_FUNC_START(__svm_vcpu_run)
pop %_ASM_BP
ret
SYM_FUNC_END(__svm_vcpu_run)
+
+/**
+ * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
+ * @vmcb_pa: unsigned long
+ */
+SYM_FUNC_START(__svm_sev_es_vcpu_run)
+ push %_ASM_BP
+#ifdef CONFIG_X86_64
+ push %r15
+ push %r14
+ push %r13
+ push %r12
+#else
+ push %edi
+ push %esi
+#endif
+ push %_ASM_BX
+
+ /* Enter guest mode */
+ mov %_ASM_ARG1, %_ASM_AX
+ sti
+
+1: vmrun %_ASM_AX
+ jmp 3f
+2: cmpb $0, kvm_rebooting
+ jne 3f
+ ud2
+ _ASM_EXTABLE(1b, 2b)
+
+3: cli
+
+#ifdef CONFIG_RETPOLINE
+ /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
+ FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+#endif
+
+ pop %_ASM_BX
+
+#ifdef CONFIG_X86_64
+ pop %r12
+ pop %r13
+ pop %r14
+ pop %r15
+#else
+ pop %esi
+ pop %edi
+#endif
+ pop %_ASM_BP
+ ret
+SYM_FUNC_END(__svm_sev_es_vcpu_run)