@@ -738,7 +738,18 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
bool tdx_protected_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
- return pi_has_pending_interrupt(vcpu);
+ bool ret = pi_has_pending_interrupt(vcpu);
+ union tdx_vcpu_state_details details;
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ if (ret || vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
+ return true;
+
+ if (tdx->interrupt_disabled_hlt)
+ return false;
+
+ details.full = td_state_non_arch_read64(tdx, TD_VCPU_STATE_DETAILS_NON_ARCH);
+ return !!details.vmxip;
}
void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
@@ -1180,6 +1191,17 @@ static int tdx_emulate_cpuid(struct kvm_vcpu *vcpu)
return 1;
}
+static int tdx_emulate_hlt(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_tdx *tdx = to_tdx(vcpu);
+
+ /* See tdx_protected_apic_has_interrupt() to avoid heavy seamcall */
+ tdx->interrupt_disabled_hlt = tdvmcall_a0_read(vcpu);
+
+ tdvmcall_set_return_code(vcpu, TDG_VP_VMCALL_SUCCESS);
+ return kvm_emulate_halt_noskip(vcpu);
+}
+
static int handle_tdvmcall(struct kvm_vcpu *vcpu)
{
if (tdvmcall_exit_type(vcpu))
@@ -1188,6 +1210,8 @@ static int handle_tdvmcall(struct kvm_vcpu *vcpu)
switch (tdvmcall_leaf(vcpu)) {
case EXIT_REASON_CPUID:
return tdx_emulate_cpuid(vcpu);
+ case EXIT_REASON_HLT:
+ return tdx_emulate_hlt(vcpu);
default:
break;
}
@@ -102,6 +102,8 @@ struct vcpu_tdx {
bool host_state_need_restore;
u64 msr_host_kernel_gs_base;
+ bool interrupt_disabled_hlt;
+
/*
* Dummy to make pmu_intel not corrupt memory.
* TODO: Support PMU for TDX. Future work.
@@ -225,6 +227,7 @@ TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(64, VMCS, vmcs);
TDX_BUILD_TDVPS_ACCESSORS(8, MANAGEMENT, management);
+TDX_BUILD_TDVPS_ACCESSORS(64, STATE_NON_ARCH, state_non_arch);
static __always_inline u64 td_tdcs_exec_read64(struct kvm_tdx *kvm_tdx, u32 field)
{