diff mbox series

[RFC,22/26] KVM: VMX: Pass through ITD classification related MSRs to Guest

Message ID 20240203091214.411862-23-zhao1.liu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Intel Thread Director Virtualization | expand

Commit Message

Zhao Liu Feb. 3, 2024, 9:12 a.m. UTC
From: Zhao Liu <zhao1.liu@intel.com>

ITD adds 2 new MSRs, MSR_IA32_HW_FEEDBACK_CHAR and
MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, to allow OS to classify the running
task into one of four classes [1].

Pass through these 2 MSRs to Guest:

* MSR_IA32_HW_FEEDBACK_CHAR.

MSR_IA32_HW_FEEDBACK_CHAR is a thread scope MSR. It is used to specify
the class for the currently running workload,

* MSR_IA32_HW_FEEDBACK_THREAD_CONFIG.

MSR_IA32_HW_FEEDBACK_THREAD_CONFIG is also a thread scope MSR and is
used to control the enablement of the classification function.

[1]: SDM, vol. 3B, section 15.6.8 Logical Processor Scope Intel Thread
     Director Configuration

Suggested-by: Zhenyu Wang <zhenyu.z.wang@intel.com>
Tested-by: Yanting Jiang <yanting.jiang@intel.com>
Co-developed-by: Zhuocheng Ding <zhuocheng.ding@intel.com>
Signed-off-by: Zhuocheng Ding <zhuocheng.ding@intel.com>
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
---
 arch/x86/kvm/vmx/vmx.c | 37 +++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/vmx/vmx.h |  8 +++++++-
 2 files changed, 44 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index bdff1d424b2f..11d42e0a208b 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -225,6 +225,8 @@  static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
 	MSR_CORE_C3_RESIDENCY,
 	MSR_CORE_C6_RESIDENCY,
 	MSR_CORE_C7_RESIDENCY,
+	MSR_IA32_HW_FEEDBACK_THREAD_CONFIG,
+	MSR_IA32_HW_FEEDBACK_CHAR,
 };
 
 /*
@@ -1288,6 +1290,30 @@  static void pt_guest_exit(struct vcpu_vmx *vmx)
 		wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
 }
 
+static void itd_guest_enter(struct vcpu_vmx *vmx)
+{
+	struct vcpu_hfi_desc *vcpu_hfi = &vmx->vcpu_hfi_desc;
+
+	if (!guest_cpuid_has(&vmx->vcpu, X86_FEATURE_ITD) ||
+	    !kvm_cpu_cap_has(X86_FEATURE_ITD))
+		return;
+
+	rdmsrl(MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, vcpu_hfi->host_thread_cfg);
+	wrmsrl(MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, vcpu_hfi->guest_thread_cfg);
+}
+
+static void itd_guest_exit(struct vcpu_vmx *vmx)
+{
+	struct vcpu_hfi_desc *vcpu_hfi = &vmx->vcpu_hfi_desc;
+
+	if (!guest_cpuid_has(&vmx->vcpu, X86_FEATURE_ITD) ||
+	    !kvm_cpu_cap_has(X86_FEATURE_ITD))
+		return;
+
+	rdmsrl(MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, vcpu_hfi->guest_thread_cfg);
+	wrmsrl(MSR_IA32_HW_FEEDBACK_THREAD_CONFIG, vcpu_hfi->host_thread_cfg);
+}
+
 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
 			unsigned long fs_base, unsigned long gs_base)
 {
@@ -5485,6 +5511,8 @@  static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
 	vmx->msr_ia32_therm_control = 0;
 	vmx->msr_ia32_therm_interrupt = 0;
 	vmx->msr_ia32_therm_status = 0;
+	vmx->vcpu_hfi_desc.host_thread_cfg = 0;
+	vmx->vcpu_hfi_desc.guest_thread_cfg = 0;
 
 	vmx->hv_deadline_tsc = -1;
 	kvm_set_cr8(vcpu, 0);
@@ -7977,6 +8005,7 @@  static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	kvm_load_guest_xsave_state(vcpu);
 
 	pt_guest_enter(vmx);
+	itd_guest_enter(vmx);
 
 	atomic_switch_perf_msrs(vmx);
 	if (intel_pmu_lbr_is_enabled(vcpu))
@@ -8015,6 +8044,7 @@  static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 	loadsegment(es, __USER_DS);
 #endif
 
+	itd_guest_exit(vmx);
 	pt_guest_exit(vmx);
 
 	kvm_load_host_xsave_state(vcpu);
@@ -8475,6 +8505,13 @@  static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
 			vmx->hfi_table_idx = ((union cpuid6_edx)best->edx).split.index;
 	}
 
+	if (guest_cpuid_has(vcpu, X86_FEATURE_ITD) && kvm_cpu_cap_has(X86_FEATURE_ITD)) {
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_HW_FEEDBACK_THREAD_CONFIG,
+					  MSR_TYPE_RW, !guest_cpuid_has(vcpu, X86_FEATURE_ITD));
+		vmx_set_intercept_for_msr(vcpu, MSR_IA32_HW_FEEDBACK_CHAR,
+					  MSR_TYPE_RW, !guest_cpuid_has(vcpu, X86_FEATURE_ITD));
+	}
+
 	/* Refresh #PF interception to account for MAXPHYADDR changes. */
 	vmx_update_exception_bitmap(vcpu);
 }
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 0ef767d63def..3d3238dd8fc3 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -71,6 +71,11 @@  struct pt_desc {
 	struct pt_ctx guest;
 };
 
+struct vcpu_hfi_desc {
+	u64 host_thread_cfg;
+	u64 guest_thread_cfg;
+};
+
 union vmx_exit_reason {
 	struct {
 		u32	basic			: 16;
@@ -286,6 +291,7 @@  struct vcpu_vmx {
 	u64		      msr_ia32_therm_control;
 	u64		      msr_ia32_therm_interrupt;
 	u64		      msr_ia32_therm_status;
+	struct vcpu_hfi_desc  vcpu_hfi_desc;
 
 	/*
 	 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
@@ -366,7 +372,7 @@  struct vcpu_vmx {
 	int hfi_table_idx;
 
 	/* Save desired MSR intercept (read: pass-through) state */
-#define MAX_POSSIBLE_PASSTHROUGH_MSRS	16
+#define MAX_POSSIBLE_PASSTHROUGH_MSRS	18
 	struct {
 		DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
 		DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);