Message ID | 20220803155011.43721-12-mlevitsk@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | SMM emulation and interrupt shadow fixes | expand |
On Wed, Aug 03, 2022, Maxim Levitsky wrote: > @@ -4486,28 +4482,23 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) > { > struct vcpu_svm *svm = to_svm(vcpu); > struct kvm_host_map map, map_save; > - u64 saved_efer, vmcb12_gpa; > struct vmcb *vmcb12; > int ret; > > - const char *smstate = (const char *)smram; To shorten line lengths, what about grabbing smram64 here? const kvm_smram_state_64 *smram64 = &smram->smram64; IMO, makes things a little easier to read too. > - > if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) > return 0; > > /* Non-zero if SMI arrived while vCPU was in guest mode. */ > - if (!GET_SMSTATE(u64, smstate, 0x7ed8)) > + if (!smram->smram64.svm_guest_flag) > return 0; > > if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) > return 1; > > - saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); > - if (!(saved_efer & EFER_SVME)) > + if (!(smram->smram64.efer & EFER_SVME)) > return 1; > > - vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); > - if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) > + if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram->smram64.svm_guest_vmcb_gpa), &map) == -EINVAL) Eww, this is horrifically wrong. KVM will explode if kvm_vcpu_map() returns -EFAULT, e.g. if guest memory is not backed by struct page and memremap() fails. Not to mention that it's comically fragile too. Can you add a patch to this series to drop the "== -EINVAL"? And there's another one lurking just out of sight in this diff. > return 1; > > ret = 1; > @@ -4533,7 +4524,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) > vmcb12 = map.hva; > nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); > nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); > - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); > + ret = enter_svm_guest_mode(vcpu, smram->smram64.svm_guest_vmcb_gpa, vmcb12, false); > > if (ret) > goto unmap_save; > -- > 2.26.3 >
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d752fabde94ad2..d570ec522ebb55 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2077,12 +2077,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) #endif } -#define put_smstate(type, buf, offset, val) \ - *(type *)((buf) + (offset) - 0x7e00) = val - -#define GET_SMSTATE(type, buf, offset) \ - (*(type *)((buf) + (offset) - 0x7e00)) - int kvm_cpu_dirty_log_size(void); int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 688315d1dfabd1..7ca5e06878e19a 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -4439,15 +4439,11 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram) struct kvm_host_map map_save; int ret; - char *smstate = (char *)smram; - if (!is_guest_mode(vcpu)) return 0; - /* FED8h - SVM Guest */ - put_smstate(u64, smstate, 0x7ed8, 1); - /* FEE0h - SVM Guest VMCB Physical Address */ - put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); + smram->smram64.svm_guest_flag = 1; + smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; @@ -4486,28 +4482,23 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) { struct vcpu_svm *svm = to_svm(vcpu); struct kvm_host_map map, map_save; - u64 saved_efer, vmcb12_gpa; struct vmcb *vmcb12; int ret; - const char *smstate = (const char *)smram; - if (!guest_cpuid_has(vcpu, X86_FEATURE_LM)) return 0; /* Non-zero if SMI arrived while vCPU was in guest mode. */ - if (!GET_SMSTATE(u64, smstate, 0x7ed8)) + if (!smram->smram64.svm_guest_flag) return 0; if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM)) return 1; - saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0); - if (!(saved_efer & EFER_SVME)) + if (!(smram->smram64.efer & EFER_SVME)) return 1; - vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0); - if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL) + if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram->smram64.svm_guest_vmcb_gpa), &map) == -EINVAL) return 1; ret = 1; @@ -4533,7 +4524,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram) vmcb12 = map.hva; nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); - ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false); + ret = enter_svm_guest_mode(vcpu, smram->smram64.svm_guest_vmcb_gpa, vmcb12, false); if (ret) goto unmap_save;
This removes the last user of put_smstate/GET_SMSTATE so remove these functions as well. Also add a sanity check that we don't attempt to enter the SMM on non long mode capable guest CPU with a running nested guest. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> --- arch/x86/include/asm/kvm_host.h | 6 ------ arch/x86/kvm/svm/svm.c | 21 ++++++--------------- 2 files changed, 6 insertions(+), 21 deletions(-)