@@ -1179,11 +1179,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
return 0;
}
-static void kvm_request_clock_update(struct kvm_vcpu *v)
-{
- kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
-}
-
static inline bool kvm_unstable_smp_clock(struct kvm *kvm)
{
return check_tsc_unstable() && atomic_read(&kvm->online_vcpus) > 1;
@@ -1218,7 +1213,7 @@ static void kvm_update_tsc_trapping(struct kvm *kvm)
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.tsc_mode = best_tsc_mode(vcpu);
- kvm_request_clock_update(vcpu);
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
}
}
@@ -1559,7 +1554,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
/* Disable / enable trapping for kvmclock */
vcpu->arch.tsc_mode = best_tsc_mode(vcpu);
- kvm_request_clock_update(vcpu);
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
break;
}
case MSR_IA32_MCG_CTL:
@@ -4499,7 +4494,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
continue;
if (freq->new > kvm->arch.virtual_tsc_khz)
vcpu->arch.tsc_overrun = 1;
- kvm_request_clock_update(vcpu);
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != smp_processor_id())
send_ipi = 1;
}
@@ -5197,7 +5192,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/* Running on slower TSC without kvmclock, we must bump TSC */
if (vcpu->arch.tsc_rebase)
- kvm_request_clock_update(vcpu);
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
preempt_enable();
@@ -5791,7 +5786,7 @@ int kvm_arch_hardware_enable(void *garbage)
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
if (vcpu->cpu == smp_processor_id())
- kvm_request_clock_update(vcpu);
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
return kvm_x86_ops->hardware_enable(garbage);
}