@@ -1089,10 +1089,13 @@ typedef struct CPUX86State {
uint64_t async_pf_en_msr;
uint64_t pv_eoi_en_msr;
+ /* Partition-wide HV MSRs, will be updated only on the first vcpu */
uint64_t msr_hv_hypercall;
uint64_t msr_hv_guest_os_id;
- uint64_t msr_hv_vapic;
uint64_t msr_hv_tsc;
+
+ /* Per-VCPU HV MSRs */
+ uint64_t msr_hv_vapic;
uint64_t msr_hv_crash_params[HV_CRASH_PARAMS];
uint64_t msr_hv_runtime;
uint64_t msr_hv_synic_control;
@@ -1719,19 +1719,23 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL,
env->msr_global_ctrl);
}
- if (has_msr_hv_hypercall) {
- kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
- env->msr_hv_guest_os_id);
- kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
- env->msr_hv_hypercall);
+ /* Sync partition-wide MSRs only on first VCPU to avoid races */
+ if (current_cpu == first_cpu) {
+ if (has_msr_hv_hypercall) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID,
+ env->msr_hv_guest_os_id);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
+ env->msr_hv_hypercall);
+ }
+ if (cpu->hyperv_time) {
+ kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
+ env->msr_hv_tsc);
+ }
}
if (cpu->hyperv_vapic) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
- if (cpu->hyperv_time) {
- kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc);
- }
if (has_msr_hv_crash) {
int j;