@@ -1114,7 +1114,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.type = CPUID_FEATURE_WORD,
.feat_names = {
NULL, NULL, "arat", NULL,
- NULL, NULL, NULL, NULL,
+ NULL, NULL, "pts", NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
@@ -1124,6 +1124,11 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
},
.cpuid = { .eax = 6, .reg = R_EAX, },
.tcg_features = TCG_6_EAX_FEATURES,
+ /*
+ * PTS shouldn't be enabled by default since it has
+ * requirement for cpu topology.
+ */
+ .no_autoenable_flags = CPUID_6_EAX_PTS,
},
[FEAT_XSAVE_XCR0_LO] = {
.type = CPUID_FEATURE_WORD,
@@ -7424,6 +7429,21 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
goto out;
}
}
+
+ if (env->features[FEAT_6_EAX] & CPUID_6_EAX_PTS && ms->smp.sockets > 1) {
+ error_setg(errp,
+ "PTS currently only supports 1 package, "
+ "please set by \"-smp ...,sockets=1\"");
+ return;
+ }
+
+ if (env->features[FEAT_6_EAX] & CPUID_6_EAX_PTS &&
+ !(env->features[FEAT_6_EAX] & CPUID_6_EAX_ITD)) {
+ error_setg(errp,
+ "In the absence of ITD, Guest does "
+ "not need PTS");
+ return;
+ }
#endif
mce_init(cpu);
@@ -530,6 +530,9 @@ typedef enum X86Seg {
#define MSR_IA32_THERM_INTERRUPT 0x0000019b
#define MSR_IA32_THERM_STATUS 0x0000019c
+#define MSR_IA32_PACKAGE_THERM_STATUS 0x000001b1
+#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
+
#define MSR_IA32_VMX_BASIC 0x00000480
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
#define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
@@ -982,6 +985,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
#define CPUID_XSAVE_XSAVES (1U << 3)
#define CPUID_6_EAX_ARAT (1U << 2)
+#define CPUID_6_EAX_PTS (1U << 6)
+#define CPUID_6_EAX_ITD (1U << 23)
/* CPUID[0x80000007].EDX flags: */
#define CPUID_APM_INVTSC (1U << 8)
@@ -1767,6 +1772,14 @@ typedef struct CPUArchState {
uint64_t therm_interrupt;
uint64_t therm_status;
+ /*
+ * Although these are package level MSRs, for the PTS feature, we
+ * temporarily limit it to be enabled for only 1 package, so the value
+ * of each vCPU is same and it's enough to support the save/load.
+ */
+ uint64_t pkg_therm_interrupt;
+ uint64_t pkg_therm_status;
+
/* exception/interrupt handling */
int error_code;
int exception_is_int;
@@ -139,6 +139,7 @@ static bool has_msr_vmx_procbased_ctls2;
static bool has_msr_perf_capabs;
static bool has_msr_pkrs;
static bool has_msr_therm;
+static bool has_msr_pkg_therm;
static uint32_t has_architectural_pmu_version;
static uint32_t num_architectural_pmu_gp_counters;
@@ -2461,6 +2462,10 @@ static int kvm_get_supported_msrs(KVMState *s)
case MSR_IA32_THERM_STATUS:
has_msr_therm = true;
break;
+ case MSR_IA32_PACKAGE_THERM_STATUS:
+ case MSR_IA32_PACKAGE_THERM_INTERRUPT:
+ has_msr_pkg_therm = true;
+ break;
}
}
}
@@ -3313,6 +3318,15 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_IA32_THERM_INTERRUPT, env->therm_interrupt);
kvm_msr_entry_add(cpu, MSR_IA32_THERM_STATUS, env->therm_status);
}
+ /* Only sync package level MSRs to KVM on the first cpu */
+ if (current_cpu == first_cpu) {
+ if (has_msr_pkg_therm) {
+ kvm_msr_entry_add(cpu, MSR_IA32_PACKAGE_THERM_STATUS,
+ env->therm_control);
+ kvm_msr_entry_add(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ env->therm_interrupt);
+ }
+ }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
@@ -3790,6 +3804,10 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, MSR_IA32_THERM_INTERRUPT, 0);
kvm_msr_entry_add(cpu, MSR_IA32_THERM_STATUS, 0);
}
+ if (has_msr_pkg_therm) {
+ kvm_msr_entry_add(cpu, MSR_IA32_PACKAGE_THERM_STATUS, 0);
+ kvm_msr_entry_add(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, 0);
+ }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
@@ -4280,6 +4298,12 @@ static int kvm_get_msrs(X86CPU *cpu)
case MSR_IA32_THERM_STATUS:
env->therm_status = msrs[i].data;
break;
+ case MSR_IA32_PACKAGE_THERM_STATUS:
+ env->pkg_therm_status = msrs[i].data;
+ break;
+ case MSR_IA32_PACKAGE_THERM_INTERRUPT:
+ env->pkg_therm_interrupt = msrs[i].data;
+ break;
}
}