@@ -3771,7 +3771,7 @@ static bool svm_smi_allowed(struct kvm_vcpu *vcpu)
return false;
}
- return true;
+ return !is_smm(vcpu);
}
static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
@@ -7680,7 +7680,7 @@ static bool vmx_smi_allowed(struct kvm_vcpu *vcpu)
/* we need a nested vmexit to enter SMM, postpone if run is pending */
if (to_vmx(vcpu)->nested.nested_run_pending)
return false;
- return true;
+ return !is_smm(vcpu);
}
static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
@@ -7744,8 +7744,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
if (kvm_event_needs_reinjection(vcpu))
return 0;
- if (vcpu->arch.smi_pending && !is_smm(vcpu) &&
- kvm_x86_ops.smi_allowed(vcpu)) {
+ if (vcpu->arch.smi_pending && kvm_x86_ops.smi_allowed(vcpu)) {
vcpu->arch.smi_pending = false;
++vcpu->arch.smi_count;
enter_smm(vcpu);
@@ -10174,7 +10173,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return true;
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
- (vcpu->arch.smi_pending && !is_smm(vcpu)))
+ (vcpu->arch.smi_pending &&
+ kvm_x86_ops.smi_allowed(vcpu)))
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
Do not hardcode is_smm so that all the architectural conditions for blocking SMIs are listed in a single place. Well, in two places because this introduces some code duplication between Intel and AMD. This ensures that nested SVM obeys GIF in kvm_vcpu_has_events. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> --- arch/x86/kvm/svm/svm.c | 2 +- arch/x86/kvm/vmx/vmx.c | 2 +- arch/x86/kvm/x86.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-)