@@ -275,6 +275,7 @@ struct kvm_mmu {
struct kvm_vcpu_arch {
u64 host_tsc;
+ u64 tsc_msr_offset;
/*
* rip and regs accesses must go through
* kvm_{register,rip}_{read,write} functions.
@@ -16,3 +16,22 @@ struct kvm_timer_ops {
enum hrtimer_restart kvm_timer_fn(struct hrtimer *data);
+u64 kvm_get_ref_tsc(void);
+
+static inline u64 kvm_get_elapsed_tsc(struct kvm *kvm)
+{
+ return kvm_get_ref_tsc() - kvm->arch.vm_init_tsc;
+}
+
+static inline u64 kvm_get_cpu_tsc(struct kvm_vcpu *vcpu)
+{
+ return kvm_get_elapsed_tsc(vcpu->kvm) + vcpu->arch.tsc_msr_offset;
+}
+
+static inline void kvm_set_cpu_tsc(struct kvm_vcpu *vcpu, u64 data)
+{
+ u64 tsc_offset;
+
+ tsc_offset = data - kvm_get_cpu_tsc(vcpu);
+ vcpu->arch.tsc_msr_offset = tsc_offset;
+}
@@ -766,16 +766,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
int i;
if (unlikely(cpu != vcpu->cpu)) {
- u64 delta;
-
- /*
- * Make sure that the guest sees a monotonically
- * increasing TSC.
- */
- delta = vcpu->arch.host_tsc - native_read_tsc();
- svm->vmcb->control.tsc_offset += delta;
- if (is_nested(svm))
- svm->nested.hsave->control.tsc_offset += delta;
vcpu->cpu = cpu;
kvm_migrate_timers(vcpu);
svm->asid_generation = 0;
@@ -1826,7 +1816,6 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
- svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
@@ -2030,12 +2019,20 @@ static int task_switch_interception(struct vcpu_svm *svm)
return kvm_task_switch(&svm->vcpu, tss_selector, reason);
}
-static int rdtsc_interception(struct vcpu_svm *svm)
+static u64 get_tsc(struct vcpu_svm *svm)
{
u64 tsc;
- rdtscll(tsc);
- tsc += svm->vmcb->control.tsc_offset;
+ tsc = kvm_get_cpu_tsc(&svm->vcpu);
+ if (is_nested(svm))
+ tsc += svm->nested.hsave->control.tsc_offset;
+
+ return tsc;
+}
+
+static int rdtsc_interception(struct vcpu_svm *svm)
+{
+ u64 tsc = get_tsc(svm);
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, tsc & 0xffffffff);
tsc >>= 32;
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, tsc & 0xffffffff);
@@ -2095,14 +2092,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
switch (ecx) {
case MSR_IA32_TSC: {
- u64 tsc_offset;
-
- if (is_nested(svm))
- tsc_offset = svm->nested.hsave->control.tsc_offset;
- else
- tsc_offset = svm->vmcb->control.tsc_offset;
-
- *data = tsc_offset + native_read_tsc();
+ *data = get_tsc(svm);
break;
}
case MSR_K6_STAR:
@@ -2188,17 +2178,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
switch (ecx) {
case MSR_IA32_TSC: {
- u64 tsc_offset = data - native_read_tsc();
- u64 g_tsc_offset = 0;
-
if (is_nested(svm)) {
- g_tsc_offset = svm->vmcb->control.tsc_offset -
- svm->nested.hsave->control.tsc_offset;
+ u64 tsc_offset = data - kvm_get_cpu_tsc(vcpu);
svm->nested.hsave->control.tsc_offset = tsc_offset;
+ } else {
+ kvm_set_cpu_tsc(vcpu, data);
}
-
- svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset;
-
break;
}
case MSR_K6_STAR:
@@ -5402,7 +5402,7 @@ struct kvm *kvm_arch_create_vm(void)
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
- rdtscll(kvm->arch.vm_init_tsc);
+ kvm->arch.vm_init_tsc = kvm_get_ref_tsc();
return kvm;
}