@@ -1456,11 +1456,11 @@ static u64 svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
g_tsc_offset = svm->vmcb->control.tsc_offset -
svm->nested.hsave->control.tsc_offset;
svm->nested.hsave->control.tsc_offset = l1_offset;
- } else
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- svm->vmcb->control.tsc_offset,
- l1_offset);
+ }
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+ svm->vmcb->control.tsc_offset,
+ l1_offset + g_tsc_offset);
svm->vmcb->control.tsc_offset = l1_offset + g_tsc_offset;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
@@ -3465,11 +3465,10 @@ static u64 vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING))
active_offset += vmcs12->tsc_offset;
- } else {
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- vmcs_read64(TSC_OFFSET), l1_offset);
}
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+ vmcs_read64(TSC_OFFSET), active_offset);
vmcs_write64(TSC_OFFSET, active_offset);
return active_offset;
}