@@ -968,6 +968,7 @@ struct kvm_arch {
int audit_point;
#endif
+ bool tsc_immutable;
bool backwards_tsc_observed;
bool boot_vcpu_runs_old_kvmclock;
u32 bsp_vcpu_id;
@@ -2040,7 +2040,9 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
u64 ratio;
/* Guest TSC same frequency as host TSC? */
- if (!scale) {
+ if (!scale || vcpu->kvm->arch.tsc_immutable) {
+ if (scale)
+ pr_warn_ratelimited("Guest TSC immutable, scaling not supported\n");
vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
return 0;
}
@@ -2216,6 +2218,9 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
bool already_matched;
bool synchronizing = false;
+ if (WARN_ON_ONCE(vcpu->kvm->arch.tsc_immutable))
+ return;
+
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_compute_tsc_offset(vcpu, data);
ns = get_kvmclock_base_ns();
@@ -2641,6 +2646,10 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
u8 pvclock_flags;
bool use_master_clock;
+ /* Unable to update guest time if the TSC is immutable. */
+ if (ka->tsc_immutable)
+ return 0;
+
kernel_ns = 0;
host_tsc = 0;
@@ -3915,7 +3924,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
- if (kvm_check_tsc_unstable()) {
+ if (kvm_check_tsc_unstable() &&
+ !vcpu->kvm->arch.tsc_immutable) {
u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_vcpu_write_tsc_offset(vcpu, offset);
@@ -3929,7 +3939,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration
*/
- if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
+ if ((!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) &&
+ !vcpu->kvm->arch.tsc_immutable)
kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != cpu)
kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
@@ -4888,10 +4899,11 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_SET_TSC_KHZ: {
- u32 user_tsc_khz;
+ u32 user_tsc_khz = (u32)arg;
r = -EINVAL;
- user_tsc_khz = (u32)arg;
+ if (vcpu->kvm->arch.tsc_immutable)
+ goto out;
if (kvm_has_tsc_control &&
user_tsc_khz >= kvm_max_guest_tsc_khz)
@@ -10013,9 +10025,12 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
if (mutex_lock_killable(&vcpu->mutex))
return;
- vcpu_load(vcpu);
- kvm_synchronize_tsc(vcpu, 0);
- vcpu_put(vcpu);
+
+ if (!kvm->arch.tsc_immutable) {
+ vcpu_load(vcpu);
+ kvm_synchronize_tsc(vcpu, 0);
+ vcpu_put(vcpu);
+ }
/* poll control enabled by default */
vcpu->arch.msr_kvm_poll_control = 1;
@@ -10209,6 +10224,10 @@ int kvm_arch_hardware_enable(void)
if (backwards_tsc) {
u64 delta_cyc = max_tsc - local_tsc;
list_for_each_entry(kvm, &vm_list, vm_list) {
+ if (vcpu->kvm->arch.tsc_immutable) {
+ pr_warn_ratelimited("Backwards TSC observed and guest with immutable TSC active\n");
+ continue;
+ }
kvm->arch.backwards_tsc_observed = true;
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.tsc_offset_adjustment += delta_cyc;