@@ -5039,14 +5039,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
- kvm_before_handle_nmi(&svm->vcpu);
+ kvm_before_handle_host_interrupts(&svm->vcpu);
stgi();
/* Any pending NMI will happen here */
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
- kvm_after_handle_nmi(&svm->vcpu);
+ kvm_after_handle_host_interrupts(&svm->vcpu);
sync_cr8_to_lapic(vcpu);
@@ -9095,9 +9095,9 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
/* We need to handle NMIs before interrupts are enabled */
if (is_nmi(exit_intr_info)) {
- kvm_before_handle_nmi(&vmx->vcpu);
+ kvm_before_handle_host_interrupts(&vmx->vcpu);
asm("int $2");
- kvm_after_handle_nmi(&vmx->vcpu);
+ kvm_after_handle_host_interrupts(&vmx->vcpu);
}
}
@@ -6041,17 +6041,17 @@ static unsigned long kvm_get_guest_ip(void)
.get_guest_ip = kvm_get_guest_ip,
};
-void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
+void kvm_before_handle_host_interrupts(struct kvm_vcpu *vcpu)
{
__this_cpu_write(current_vcpu, vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
+EXPORT_SYMBOL_GPL(kvm_before_handle_host_interrupts);
-void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
+void kvm_after_handle_host_interrupts(struct kvm_vcpu *vcpu)
{
__this_cpu_write(current_vcpu, NULL);
}
-EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
+EXPORT_SYMBOL_GPL(kvm_after_handle_host_interrupts);
static void kvm_set_mmio_spte_mask(void)
{
@@ -204,8 +204,8 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
return !(kvm->arch.disabled_quirks & quirk);
}
-void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
-void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
+void kvm_before_handle_host_interrupts(struct kvm_vcpu *vcpu);
+void kvm_after_handle_host_interrupts(struct kvm_vcpu *vcpu);
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);