@@ -181,6 +181,16 @@ static int vt_handle_exit(struct kvm_vcpu *vcpu,
return vmx_handle_exit(vcpu, fastpath);
}
+static void vt_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+{
+ if (is_td_vcpu(vcpu)) {
+ tdx_handle_exit_irqoff(vcpu);
+ return;
+ }
+
+ vmx_handle_exit_irqoff(vcpu);
+}
+
#ifdef CONFIG_KVM_SMM
static int vt_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
@@ -599,7 +609,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.load_mmu_pgd = vt_load_mmu_pgd,
.check_intercept = vmx_check_intercept,
- .handle_exit_irqoff = vmx_handle_exit_irqoff,
+ .handle_exit_irqoff = vt_handle_exit_irqoff,
.cpu_dirty_log_size = PML_ENTITY_NUM,
.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
@@ -961,6 +961,10 @@ static noinstr void tdx_vcpu_enter_exit(struct kvm_vcpu *vcpu)
REG(rsi, RSI);
#undef REG
+ if (tdx_check_exit_reason(vcpu, EXIT_REASON_EXCEPTION_NMI) &&
+ is_nmi(tdexit_intr_info(vcpu)))
+ __vmx_handle_nmi(vcpu);
+
guest_state_exit_irqoff();
}
@@ -1040,6 +1044,44 @@ void tdx_inject_nmi(struct kvm_vcpu *vcpu)
vcpu->arch.nmi_pending = 0;
}
+void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
+{
+ if (tdx_check_exit_reason(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT))
+ vmx_handle_external_interrupt_irqoff(vcpu,
+ tdexit_intr_info(vcpu));
+ else if (tdx_check_exit_reason(vcpu, EXIT_REASON_EXCEPTION_NMI))
+ vmx_handle_exception_irqoff(vcpu, tdexit_intr_info(vcpu));
+}
+
+static int tdx_handle_exception_nmi(struct kvm_vcpu *vcpu)
+{
+ u32 intr_info = tdexit_intr_info(vcpu);
+
+ /*
+ * Machine checks are handled by vmx_handle_exception_irqoff(), or by
+ * tdx_handle_exit() with TDX_NON_RECOVERABLE set if a #MC occurs on
+ * VM-Entry. NMIs are handled by tdx_vcpu_enter_exit().
+ */
+ if (is_nmi(intr_info) || is_machine_check(intr_info))
+ return 1;
+
+ kvm_pr_unimpl("unexpected exception 0x%x(exit_reason 0x%llx qual 0x%lx)\n",
+ intr_info,
+ to_tdx(vcpu)->vp_enter_ret, tdexit_exit_qual(vcpu));
+
+ vcpu->run->exit_reason = KVM_EXIT_EXCEPTION;
+ vcpu->run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
+ vcpu->run->ex.error_code = 0;
+
+ return 0;
+}
+
+static int tdx_handle_external_interrupt(struct kvm_vcpu *vcpu)
+{
+ ++vcpu->stat.irq_exits;
+ return 1;
+}
+
static int tdx_handle_triple_fault(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
@@ -1765,6 +1807,10 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
exit_reason = tdexit_exit_reason(vcpu);
switch (exit_reason.basic) {
+ case EXIT_REASON_EXCEPTION_NMI:
+ return tdx_handle_exception_nmi(vcpu);
+ case EXIT_REASON_EXTERNAL_INTERRUPT:
+ return tdx_handle_external_interrupt(vcpu);
case EXIT_REASON_TDCALL:
return handle_tdvmcall(vcpu);
default:
@@ -133,6 +133,7 @@ fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void tdx_vcpu_put(struct kvm_vcpu *vcpu);
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu);
+void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu);
int tdx_handle_exit(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion fastpath);
@@ -176,6 +177,7 @@ static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediat
static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
static inline bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu) { return false; }
+static inline void tdx_handle_exit_irqoff(struct kvm_vcpu *vcpu) {}
static inline int tdx_handle_exit(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion fastpath) { return 0; }