@@ -1826,6 +1826,8 @@ typedef struct CPUArchState {
uint64_t system_time_msr;
uint64_t wall_clock_msr;
+ uint64_t system_time_new_msr;
+ uint64_t wall_clock_new_msr;
uint64_t steal_time_msr;
uint64_t async_pf_en_msr;
uint64_t async_pf_int_msr;
@@ -3423,6 +3423,12 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr);
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr);
}
+ if (env->features[FEAT_KVM] & CPUID_KVM_CLOCK2) {
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME_NEW,
+ env->system_time_new_msr);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK_NEW,
+ env->wall_clock_new_msr);
+ }
if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, env->async_pf_int_msr);
}
@@ -3901,6 +3907,10 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0);
kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0);
}
+ if (env->features[FEAT_KVM] & CPUID_KVM_CLOCK2) {
+ kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME_NEW, 0);
+ kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK_NEW, 0);
+ }
if (env->features[FEAT_KVM] & CPUID_KVM_ASYNCPF_INT) {
kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_INT, 0);
}
@@ -4167,6 +4177,12 @@ static int kvm_get_msrs(X86CPU *cpu)
case MSR_KVM_WALL_CLOCK:
env->wall_clock_msr = msrs[i].data;
break;
+ case MSR_KVM_SYSTEM_TIME_NEW:
+ env->system_time_new_msr = msrs[i].data;
+ break;
+ case MSR_KVM_WALL_CLOCK_NEW:
+ env->wall_clock_new_msr = msrs[i].data;
+ break;
case MSR_MCG_STATUS:
env->mcg_status = msrs[i].data;
break;
MSR_KVM_SYSTEM_TIME_NEW and MSR_KVM_WALL_CLOCK_NEW are bound to kvmclock2 (KVM_FEATURE_CLOCKSOURCE2). Add the save/load support for these 2 MSRs just like kvmclock MSRs. Signed-off-by: Zhao Liu <zhao1.liu@intel.com> --- target/i386/cpu.h | 2 ++ target/i386/kvm/kvm.c | 16 ++++++++++++++++ 2 files changed, 18 insertions(+)