Message ID | 20220929172016.319443-9-pbonzini@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86: allow compiling out SMM support | expand |
On Thu, Sep 29, 2022, Paolo Bonzini wrote: > This ensures that all the relevant code is compiled out, in fact > the process_smi stub can be removed too. > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > arch/x86/include/asm/kvm_host.h | 2 ++ > arch/x86/kvm/smm.h | 1 - > arch/x86/kvm/x86.c | 6 ++++++ > 3 files changed, 8 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index d11697504471..d58d4a62b227 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -81,7 +81,9 @@ > #define KVM_REQ_NMI KVM_ARCH_REQ(9) > #define KVM_REQ_PMU KVM_ARCH_REQ(10) > #define KVM_REQ_PMI KVM_ARCH_REQ(11) > +#ifdef CONFIG_KVM_SMM > #define KVM_REQ_SMI KVM_ARCH_REQ(12) > +#endif > #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) > #define KVM_REQ_MCLOCK_INPROGRESS \ > KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) > diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h > index 7ccce6b655ca..a6795b93ba30 100644 > --- a/arch/x86/kvm/smm.h > +++ b/arch/x86/kvm/smm.h > @@ -28,7 +28,6 @@ void process_smi(struct kvm_vcpu *vcpu); > static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; } > static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; } > static inline void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm) { WARN_ON_ONCE(1); } > -static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); } I think it's worth adding one more patch to kill off kvm_smm_changed() too. Most of the affected code already has references to CONFIG_KVM_SMM nearby. --- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/smm.c | 4 ++++ arch/x86/kvm/smm.h | 2 -- arch/x86/kvm/x86.c | 18 +++++++++--------- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0b0a82c0bb5c..6c572cf1cf8d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1994,10 +1994,11 @@ enum { #define HF_NMI_MASK (1 << 3) #define HF_IRET_MASK (1 << 4) #define HF_GUEST_MASK (1 << 5) /* VCPU is in guest-mode */ + +#ifdef CONFIG_KVM_SMM #define HF_SMM_MASK (1 << 6) #define HF_SMM_INSIDE_NMI_MASK (1 << 7) -#ifdef CONFIG_KVM_SMM # define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE # define KVM_ADDRESS_SPACE_NUM 2 # define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c index 740fca1cf3a3..12480446c43b 100644 --- a/arch/x86/kvm/smm.c +++ b/arch/x86/kvm/smm.c @@ -10,6 +10,10 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm) { + BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); + BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); + BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); + trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm); if (entering_smm) { diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h index 131fbe1817d5..9935045fcf20 100644 --- a/arch/x86/kvm/smm.h +++ b/arch/x86/kvm/smm.h @@ -29,8 +29,6 @@ void process_smi(struct kvm_vcpu *vcpu); #else static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; } static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; } -static inline void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm) { WARN_ON_ONCE(1); } - /* * emulator_leave_smm is used as a function pointer, so the * stub is defined in x86.c. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 56004890a717..ec74d579ca1b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5103,10 +5103,12 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->sipi_vector = 0; /* never valid when reporting to user space */ +#ifdef CONFIG_KVM_SMM events->smi.smm = is_smm(vcpu); events->smi.pending = vcpu->arch.smi_pending; events->smi.smm_inside_nmi = !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); +#endif events->smi.latched_init = kvm_lapic_latched_init(vcpu); events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING @@ -5194,12 +5196,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.apic->sipi_vector = events->sipi_vector; if (events->flags & KVM_VCPUEVENT_VALID_SMM) { - if (!IS_ENABLED(CONFIG_KVM_SMM) && - (events->smi.smm || - events->smi.pending || - events->smi.smm_inside_nmi)) - return -EINVAL; - +#ifdef CONFIG_KVM_SMM if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { kvm_x86_ops.nested_ops->leave_nested(vcpu); kvm_smm_changed(vcpu, events->smi.smm); @@ -5214,6 +5211,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; } +#else + if (events->smi.smm || events->smi.pending || + events->smi.smm_inside_nmi) + return -EINVAL; +#endif + if (lapic_in_kernel(vcpu)) { if (events->smi.latched_init) set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); @@ -8228,9 +8231,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : cs_db ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; - BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK); - BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); - BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); ctxt->interruptibility = 0; ctxt->have_exception = false; base-commit: f7641bcac507589d34b20d30cceb7067f8bcfd08 --
On Thu, 2022-09-29 at 13:20 -0400, Paolo Bonzini wrote: > This ensures that all the relevant code is compiled out, in fact > the process_smi stub can be removed too. > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > arch/x86/include/asm/kvm_host.h | 2 ++ > arch/x86/kvm/smm.h | 1 - > arch/x86/kvm/x86.c | 6 ++++++ > 3 files changed, 8 insertions(+), 1 deletion(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index d11697504471..d58d4a62b227 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -81,7 +81,9 @@ > #define KVM_REQ_NMI KVM_ARCH_REQ(9) > #define KVM_REQ_PMU KVM_ARCH_REQ(10) > #define KVM_REQ_PMI KVM_ARCH_REQ(11) > +#ifdef CONFIG_KVM_SMM > #define KVM_REQ_SMI KVM_ARCH_REQ(12) > +#endif > #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) > #define KVM_REQ_MCLOCK_INPROGRESS \ > KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) > diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h > index 7ccce6b655ca..a6795b93ba30 100644 > --- a/arch/x86/kvm/smm.h > +++ b/arch/x86/kvm/smm.h > @@ -28,7 +28,6 @@ void process_smi(struct kvm_vcpu *vcpu); > static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; } > static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; } > static inline void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm) { WARN_ON_ONCE(1); } > -static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); } > > /* > * emulator_leave_smm is used as a function pointer, so the > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index e22184bad92b..ba5661ee3fd7 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -5020,8 +5020,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, > > process_nmi(vcpu); > > +#ifdef CONFIG_KVM_SMM > if (kvm_check_request(KVM_REQ_SMI, vcpu)) > process_smi(vcpu); > +#endif > > /* > * KVM's ABI only allows for one exception to be migrated. Luckily, > @@ -10194,8 +10196,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) > } > if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) > record_steal_time(vcpu); > +#ifdef CONFIG_KVM_SMM > if (kvm_check_request(KVM_REQ_SMI, vcpu)) > process_smi(vcpu); > +#endif > if (kvm_check_request(KVM_REQ_NMI, vcpu)) > process_nmi(vcpu); > if (kvm_check_request(KVM_REQ_PMU, vcpu)) > @@ -12539,7 +12543,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) > return true; > > if (kvm_test_request(KVM_REQ_NMI, vcpu) || > +#ifdef CONFIG_KVM_SMM > kvm_test_request(KVM_REQ_SMI, vcpu) || > +#endif > kvm_test_request(KVM_REQ_EVENT, vcpu)) > return true; > Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> Best regards, Maxim Levitsky
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d11697504471..d58d4a62b227 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -81,7 +81,9 @@ #define KVM_REQ_NMI KVM_ARCH_REQ(9) #define KVM_REQ_PMU KVM_ARCH_REQ(10) #define KVM_REQ_PMI KVM_ARCH_REQ(11) +#ifdef CONFIG_KVM_SMM #define KVM_REQ_SMI KVM_ARCH_REQ(12) +#endif #define KVM_REQ_MASTERCLOCK_UPDATE KVM_ARCH_REQ(13) #define KVM_REQ_MCLOCK_INPROGRESS \ KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h index 7ccce6b655ca..a6795b93ba30 100644 --- a/arch/x86/kvm/smm.h +++ b/arch/x86/kvm/smm.h @@ -28,7 +28,6 @@ void process_smi(struct kvm_vcpu *vcpu); static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; } static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; } static inline void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm) { WARN_ON_ONCE(1); } -static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); } /* * emulator_leave_smm is used as a function pointer, so the diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e22184bad92b..ba5661ee3fd7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5020,8 +5020,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, process_nmi(vcpu); +#ifdef CONFIG_KVM_SMM if (kvm_check_request(KVM_REQ_SMI, vcpu)) process_smi(vcpu); +#endif /* * KVM's ABI only allows for one exception to be migrated. Luckily, @@ -10194,8 +10196,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) record_steal_time(vcpu); +#ifdef CONFIG_KVM_SMM if (kvm_check_request(KVM_REQ_SMI, vcpu)) process_smi(vcpu); +#endif if (kvm_check_request(KVM_REQ_NMI, vcpu)) process_nmi(vcpu); if (kvm_check_request(KVM_REQ_PMU, vcpu)) @@ -12539,7 +12543,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) return true; if (kvm_test_request(KVM_REQ_NMI, vcpu) || +#ifdef CONFIG_KVM_SMM kvm_test_request(KVM_REQ_SMI, vcpu) || +#endif kvm_test_request(KVM_REQ_EVENT, vcpu)) return true;
This ensures that all the relevant code is compiled out, in fact the process_smi stub can be removed too. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/smm.h | 1 - arch/x86/kvm/x86.c | 6 ++++++ 3 files changed, 8 insertions(+), 1 deletion(-)