@@ -1398,6 +1398,7 @@ struct kvm_arch {
u64 cur_tsc_offset;
u64 cur_tsc_generation;
int nr_vcpus_matched_tsc;
+ bool host_was_suspended;
u32 default_tsc_khz;
bool user_set_tsc;
@@ -4971,7 +4971,37 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Apply any externally detected TSC adjustments (due to suspend) */
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
- adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
+ unsigned long flags;
+ struct kvm *kvm;
+ bool advance;
+ u64 kernel_ns, l1_tsc, offset, tsc_now;
+
+ kvm = vcpu->kvm;
+ advance = kvm_get_time_and_clockread(&kernel_ns,
+ &tsc_now);
+ raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
+ /*
+ * Advance the guest's TSC to current time instead of only
+ * preventing it from going backwards, while making sure
+ * all the vCPUs use the same offset.
+ */
+ if (kvm->arch.host_was_suspended && advance) {
+ l1_tsc = nsec_to_cycles(vcpu,
+ vcpu->kvm->arch.kvmclock_offset +
+ kernel_ns);
+ offset = kvm_compute_l1_tsc_offset(vcpu,
+ l1_tsc);
+ kvm->arch.cur_tsc_offset = offset;
+ kvm_vcpu_write_tsc_offset(vcpu, offset);
+ } else if (advance)
+ kvm_vcpu_write_tsc_offset(vcpu,
+ vcpu->kvm->arch.cur_tsc_offset);
+ else
+ adjust_tsc_offset_host(vcpu,
+ vcpu->arch.tsc_offset_adjustment);
+ kvm->arch.host_was_suspended = 0;
+ raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock,
+ flags);
vcpu->arch.tsc_offset_adjustment = 0;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
}
@@ -12638,6 +12668,7 @@ int kvm_arch_enable_virtualization_cpu(void)
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
}
+ kvm->arch.host_was_suspended = 1;
/*
* We have to disable TSC offset matching.. if you were
* booting a VM while issuing an S4 host suspend....
Advance guest TSC to current time after suspend when the host TSCs went backwards. This makes the behavior consistent between suspends where host TSC resets and suspends where it doesn't, such as suspend-to-idle, where in the former case if the host TSC resets, the guests' would previously be "frozen" due to KVM's backwards TSC prevention, while in the latter case they would advance. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Suleiman Souhlal <suleiman@google.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-)