@@ -966,7 +966,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, NULL, "fzrm", "fsrs",
"fsrc", NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
- NULL, "amx-fp16", NULL, "avx-ifma",
+ NULL, "amx-fp16", "hreset", "avx-ifma",
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
},
@@ -976,6 +976,11 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.reg = R_EAX,
},
.tcg_features = TCG_7_1_EAX_FEATURES,
+ /*
+ * Currently HRESET is only used for ITD history reset. ITD is not
+ * autoenable, so also don't enable HRESET by default.
+ */
+ .no_autoenable_flags = CPUID_7_1_EAX_HRESET,
},
[FEAT_7_1_EDX] = {
.type = CPUID_FEATURE_WORD,
@@ -6502,6 +6507,22 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
break;
}
+ case 0x20: {
+ /* Processor History Reset */
+ if (kvm_enabled() &&
+ env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_HRESET) {
+ *eax = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x20,
+ count, R_EAX);
+ *ebx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x20,
+ count, R_EBX);
+ } else {
+ *eax = 0;
+ *ebx = 0;
+ }
+ *ecx = 0;
+ *edx = 0;
+ break;
+ }
case 0x40000000:
/*
* CPUID code in kvm_arch_init_vcpu() ignores stuff
@@ -7147,6 +7168,11 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX) {
x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x12);
}
+
+ /* HRESET requires CPUID[0x20] */
+ if (env->features[FEAT_7_1_EAX] & CPUID_7_1_EAX_HRESET) {
+ x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x20);
+ }
}
/* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
@@ -535,6 +535,7 @@ typedef enum X86Seg {
#define MSR_IA32_HW_FEEDBACK_CONFIG 0x000017d0
#define MSR_IA32_HW_FEEDBACK_PTR 0x000017d1
+#define MSR_IA32_HW_HRESET_ENABLE 0x000017da
#define MSR_IA32_VMX_BASIC 0x00000480
#define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
@@ -933,6 +934,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
#define CPUID_7_1_EAX_FSRC (1U << 12)
/* Support Tile Computational Operations on FP16 Numbers */
#define CPUID_7_1_EAX_AMX_FP16 (1U << 21)
+/* HISTORY RESET */
+#define CPUID_7_1_EAX_HRESET (1U << 22)
/* Support for VPMADD52[H,L]UQ */
#define CPUID_7_1_EAX_AVX_IFMA (1U << 23)
@@ -1786,6 +1789,9 @@ typedef struct CPUArchState {
uint64_t hfi_config;
uint64_t hfi_ptr;
+ /* Per-VCPU HRESET MSR */
+ uint64_t hreset_enable;
+
/* exception/interrupt handling */
int error_code;
int exception_is_int;
@@ -141,6 +141,7 @@ static bool has_msr_pkrs;
static bool has_msr_therm;
static bool has_msr_pkg_therm;
static bool has_msr_hfi;
+static bool has_msr_hreset;
static uint32_t has_architectural_pmu_version;
static uint32_t num_architectural_pmu_gp_counters;
@@ -2471,6 +2472,9 @@ static int kvm_get_supported_msrs(KVMState *s)
case MSR_IA32_HW_FEEDBACK_PTR:
has_msr_hfi = true;
break;
+ case MSR_IA32_HW_HRESET_ENABLE:
+ has_msr_hreset = true;
+ break;
}
}
}
@@ -3337,6 +3341,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_IA32_HW_FEEDBACK_PTR,
env->hfi_ptr);
}
+ if (has_msr_hreset) {
+ kvm_msr_entry_add(cpu, MSR_IA32_HW_HRESET_ENABLE,
+ env->hreset_enable);
+ }
}
#ifdef TARGET_X86_64
@@ -3823,6 +3831,9 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, MSR_IA32_HW_FEEDBACK_CONFIG, 0);
kvm_msr_entry_add(cpu, MSR_IA32_HW_FEEDBACK_PTR, 0);
}
+ if (has_msr_hreset) {
+ kvm_msr_entry_add(cpu, MSR_IA32_HW_HRESET_ENABLE, 0);
+ }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
@@ -4325,6 +4336,9 @@ static int kvm_get_msrs(X86CPU *cpu)
case MSR_IA32_HW_FEEDBACK_PTR:
env->hfi_ptr = msrs[i].data;
break;
+ case MSR_IA32_HW_HRESET_ENABLE:
+ env->hreset_enable = msrs[i].data;
+ break;
}
}