@@ -337,6 +337,7 @@ struct kvm_vcpu_arch {
unsigned int time_offset;
struct page *time_page;
u64 last_host_tsc;
+ u64 last_host_ns;
bool nmi_pending;
bool nmi_injected;
@@ -1810,6 +1810,19 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
/* Make sure TSC doesn't go backwards */
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
native_read_tsc() - vcpu->arch.last_host_tsc;
+
+ /* Subtract elapsed cycle time from the delta computation */
+ if (check_tsc_unstable() && vcpu->arch.last_host_ns) {
+ s64 delta;
+ struct timespec ts;
+ ktime_get_ts(&ts);
+ monotonic_to_bootbased(&ts);
+ delta = timespec_to_ns(&ts) - vcpu->arch.last_host_ns;
+ delta = delta * per_cpu(cpu_tsc_khz, cpu);
+ delta = delta / USEC_PER_SEC;
+ tsc_delta -= delta;
+ }
+
if (tsc_delta < 0 || check_tsc_unstable())
kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
kvm_migrate_timers(vcpu);
@@ -1832,8 +1845,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
* vcpu->cpu != cpu can not detect this condition. So set
* vcpu->cpu = -1 to force the recalibration above.
*/
- if (check_tsc_unstable())
+ if (check_tsc_unstable()) {
+ struct timespec ts;
+ ktime_get_ts(&ts);
+ monotonic_to_bootbased(&ts);
+ vcpu->arch.last_host_ns = timespec_to_ns(&ts);
vcpu->cpu = -1;
+ }
}
static int is_efer_nx(void)