@@ -339,6 +339,7 @@ struct kvm_vcpu_arch {
unsigned int time_offset;
struct page *time_page;
u64 last_host_tsc;
+ u64 last_host_ns;
bool nmi_pending;
bool nmi_injected;
@@ -1887,8 +1887,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
native_read_tsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
- if (check_tsc_unstable())
+ if (check_tsc_unstable()) {
+ /* Subtract elapsed cycle time */
+ u64 ns = !vcpu->arch.last_host_ns ? 0 :
+ get_kernel_ns() - vcpu->arch.last_host_ns;
+ tsc_delta -= nsec_to_cycles(ns);
kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
+ }
kvm_migrate_timers(vcpu);
kvm_request_guest_time_update(vcpu);
vcpu->cpu = cpu;
@@ -1900,6 +1905,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = native_read_tsc();
+ vcpu->arch.last_host_ns = get_kernel_ns();
}
static int is_efer_nx(void)