@@ -225,6 +225,8 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
MSR_CORE_C3_RESIDENCY,
MSR_CORE_C6_RESIDENCY,
MSR_CORE_C7_RESIDENCY,
+ MSR_IA32_HW_FEEDBACK_THREAD_CONFIG,
+ MSR_IA32_HW_FEEDBACK_CHAR,
};
/*
@@ -1288,6 +1290,30 @@ static void pt_guest_exit(struct vcpu_vmx *vmx)
wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
}
+static void itd_guest_enter(struct vcpu_vmx *vmx)
+{
+ struct vcpu_hfi_desc *vcpu_hfi = &vmx->vcpu_hfi_desc;
+
+ if (!guest_cpuid_has(&vmx->vcpu, X86_FEATURE_ITD) ||
+ !kvm_cpu_cap_has(X86_FEATURE_ITD))
+ return;
+
+ rdmsrl(MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, vcpu_hfi->host_thread_cfg);
+ wrmsrl(MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, vcpu_hfi->guest_thread_cfg);
+}
+
+static void itd_guest_exit(struct vcpu_vmx *vmx)
+{
+ struct vcpu_hfi_desc *vcpu_hfi = &vmx->vcpu_hfi_desc;
+
+ if (!guest_cpuid_has(&vmx->vcpu, X86_FEATURE_ITD) ||
+ !kvm_cpu_cap_has(X86_FEATURE_ITD))
+ return;
+
+ rdmsrl(MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, vcpu_hfi->guest_thread_cfg);
+ wrmsrl(MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, vcpu_hfi->host_thread_cfg);
+}
+
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
unsigned long fs_base, unsigned long gs_base)
{
@@ -5485,6 +5511,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx->msr_ia32_therm_control = 0;
vmx->msr_ia32_therm_interrupt = 0;
vmx->msr_ia32_therm_status = 0;
+ vmx->vcpu_hfi_desc.host_thread_cfg = 0;
+ vmx->vcpu_hfi_desc.guest_thread_cfg = 0;
vmx->hv_deadline_tsc = -1;
kvm_set_cr8(vcpu, 0);
@@ -7977,6 +8005,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
kvm_load_guest_xsave_state(vcpu);
pt_guest_enter(vmx);
+ itd_guest_enter(vmx);
atomic_switch_perf_msrs(vmx);
if (intel_pmu_lbr_is_enabled(vcpu))
@@ -8015,6 +8044,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
loadsegment(es, __USER_DS);
#endif
+ itd_guest_exit(vmx);
pt_guest_exit(vmx);
kvm_load_host_xsave_state(vcpu);
@@ -8475,6 +8505,13 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmx->hfi_table_idx = ((union cpuid6_edx)best->edx).split.index;
}
+ if (guest_cpuid_has(vcpu, X86_FEATURE_ITD) && kvm_cpu_cap_has(X86_FEATURE_ITD)) {
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_HW_FEEDBACK_THREAD_CONFIG,
+ MSR_TYPE_RW, !guest_cpuid_has(vcpu, X86_FEATURE_ITD));
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_HW_FEEDBACK_CHAR,
+ MSR_TYPE_RW, !guest_cpuid_has(vcpu, X86_FEATURE_ITD));
+ }
+
/* Refresh #PF interception to account for MAXPHYADDR changes. */
vmx_update_exception_bitmap(vcpu);
}
@@ -71,6 +71,11 @@ struct pt_desc {
struct pt_ctx guest;
};
+struct vcpu_hfi_desc {
+ u64 host_thread_cfg;
+ u64 guest_thread_cfg;
+};
+
union vmx_exit_reason {
struct {
u32 basic : 16;
@@ -286,6 +291,7 @@ struct vcpu_vmx {
u64 msr_ia32_therm_control;
u64 msr_ia32_therm_interrupt;
u64 msr_ia32_therm_status;
+ struct vcpu_hfi_desc vcpu_hfi_desc;
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
@@ -366,7 +372,7 @@ struct vcpu_vmx {
int hfi_table_idx;
/* Save desired MSR intercept (read: pass-through) state */
-#define MAX_POSSIBLE_PASSTHROUGH_MSRS 16
+#define MAX_POSSIBLE_PASSTHROUGH_MSRS 18
struct {
DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);