@@ -424,7 +424,7 @@ static void vt_cancel_injection(struct kvm_vcpu *vcpu)
static int vt_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
if (is_td_vcpu(vcpu))
- return true;
+ return tdx_interrupt_allowed(vcpu);
return vmx_interrupt_allowed(vcpu, for_injection);
}
@@ -223,7 +223,8 @@ void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
return;
if (kvm_vcpu_is_blocking(vcpu) &&
- (is_td_vcpu(vcpu) || !vmx_interrupt_blocked(vcpu)))
+ ((is_td_vcpu(vcpu) && tdx_interrupt_allowed(vcpu)) ||
+ (!is_td_vcpu(vcpu) && !vmx_interrupt_blocked(vcpu))))
pi_enable_wakeup_handler(vcpu);
/*
@@ -771,9 +771,31 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
local_irq_enable();
}
+bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ /*
+ * KVM can't get the interrupt status of TDX guest and it assumes
+ * interrupt is always allowed unless TDX guest calls TDVMCALL with HLT,
+ * which passes the interrupt blocked flag.
+ */
+ if (!tdx_check_exit_reason(vcpu, EXIT_REASON_TDCALL) ||
+ tdvmcall_exit_type(vcpu) || tdvmcall_leaf(vcpu) != EXIT_REASON_HLT)
+ return true;
+
+ return !tdvmcall_a0_read(vcpu);
+}
+
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
- return pi_has_pending_interrupt(vcpu);
+ u64 vcpu_state_details;
+
+ if (pi_has_pending_interrupt(vcpu))
+ return true;
+
+ vcpu_state_details =
+ td_state_non_arch_read64(to_tdx(vcpu), TD_VCPU_STATE_DETAILS_NON_ARCH);
+
+ return tdx_vcpu_state_details_intr_pending(vcpu_state_details);
}
/*
@@ -1294,6 +1316,12 @@ static int tdx_emulate_cpuid(struct kvm_vcpu *vcpu)
return 1;
}
+static int tdx_emulate_hlt(struct kvm_vcpu *vcpu)
+{
+ tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_SUCCESS);
+ return kvm_emulate_halt_noskip(vcpu);
+}
+
static int tdx_complete_pio_out(struct kvm_vcpu *vcpu)
{
vcpu->arch.pio.count = 0;
@@ -1477,6 +1505,8 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
return tdx_report_fatal_error(vcpu);
case EXIT_REASON_CPUID:
return tdx_emulate_cpuid(vcpu);
+ case EXIT_REASON_HLT:
+ return tdx_emulate_hlt(vcpu);
case EXIT_REASON_IO_INSTRUCTION:
return tdx_emulate_io(vcpu);
case EXIT_REASON_EPT_VIOLATION:
@@ -152,6 +152,7 @@ static __always_inline void tdvps_vmcs_check(u32 field, u8 bits)
}
static __always_inline void tdvps_management_check(u64 field, u8 bits) {}
+static __always_inline void tdvps_state_non_arch_check(u64 field, u8 bits) {}
#define TDX_BUILD_TDVPS_ACCESSORS(bits, uclass, lclass) \
static __always_inline u##bits td_##lclass##_read##bits(struct vcpu_tdx *tdx, \
@@ -199,11 +200,15 @@ static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx, \
tdh_vp_wr_failed(tdx, #uclass, " &= ~", field, bit, err);\
}
+
+bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu);
+
TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
+TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
#else
static inline void tdx_bringup(void) {}
@@ -223,6 +228,7 @@ static inline bool is_td(struct kvm *kvm) { return false; }
static inline bool is_td_vcpu(struct kvm_vcpu *vcpu) { return false; }
static inline struct kvm_tdx *to_kvm_tdx(struct kvm *kvm) { return NULL; }
static inline struct vcpu_tdx *to_tdx(struct kvm_vcpu *vcpu) { return NULL; }
+static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; }
#endif
@@ -71,6 +71,17 @@ enum tdx_tdcs_execution_control {
TD_TDCS_EXEC_TSC_OFFSET = 10,
};
+enum tdx_vcpu_guest_other_state {
+ TD_VCPU_STATE_DETAILS_NON_ARCH = 0x100,
+};
+
+#define TDX_VCPU_STATE_DETAILS_INTR_PENDING BIT_ULL(0)
+
+static inline bool tdx_vcpu_state_details_intr_pending(u64 vcpu_state_details)
+{
+ return !!(vcpu_state_details & TDX_VCPU_STATE_DETAILS_INTR_PENDING);
+}
+
/* @field is any of enum tdx_tdcs_execution_control */
#define TDCS_EXEC(field) BUILD_TDX_FIELD(TD_CLASS_EXECUTION_CONTROLS, (field))