===================================================================
@@ -113,7 +113,7 @@ static s64 __kpit_elapsed(struct kvm *kv
* itself with the initial count and continues counting
* from there.
*/
- remaining = hrtimer_expires_remaining(&ps->pit_timer.timer);
+ remaining = kvm_timer_remaining(&ps->pit_timer);
elapsed = ps->pit_timer.period - ktime_to_ns(remaining);
elapsed = mod_64(elapsed, ps->pit_timer.period);
@@ -229,7 +229,7 @@ int pit_has_pending_timer(struct kvm_vcp
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack)
- return atomic_read(&pit->pit_state.pit_timer.pending);
+ return kvm_timer_has_pending(&pit->pit_state.pit_timer);
return 0;
}
@@ -238,42 +238,17 @@ static void kvm_pit_ack_irq(struct kvm_i
struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
irq_ack_notifier);
spin_lock(&ps->inject_lock);
- if (atomic_dec_return(&ps->pit_timer.pending) < 0)
- atomic_inc(&ps->pit_timer.pending);
+ kvm_timer_ack(&ps->pit_timer);
ps->irq_ack = 1;
spin_unlock(&ps->inject_lock);
}
-void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
-{
- struct kvm_pit *pit = vcpu->kvm->arch.vpit;
- struct hrtimer *timer;
-
- if (!kvm_vcpu_is_bsp(vcpu) || !pit)
- return;
-
- timer = &pit->pit_state.pit_timer.timer;
- if (hrtimer_cancel(timer))
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
-}
-
static void destroy_pit_timer(struct kvm_timer *pt)
{
pr_debug("pit: execute del timer!\n");
- hrtimer_cancel(&pt->timer);
+ kvm_timer_cancel(pt);
}
-static bool kpit_is_periodic(struct kvm_timer *ktimer)
-{
- struct kvm_kpit_state *ps = container_of(ktimer, struct kvm_kpit_state,
- pit_timer);
- return ps->is_periodic;
-}
-
-static struct kvm_timer_ops kpit_ops = {
- .is_periodic = kpit_is_periodic,
-};
-
static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
{
struct kvm_timer *pt = &ps->pit_timer;
@@ -284,20 +259,10 @@ static void create_pit_timer(struct kvm_
pr_debug("pit: create pit timer, interval is %llu nsec\n", interval);
/* TODO The new value only affected after the retriggered */
- hrtimer_cancel(&pt->timer);
- pt->period = interval;
- ps->is_periodic = is_period;
-
- pt->timer.function = kvm_timer_fn;
- pt->t_ops = &kpit_ops;
- pt->kvm = ps->pit->kvm;
- pt->vcpu = pt->kvm->bsp_vcpu;
+ kvm_timer_cancel(pt);
- atomic_set(&pt->pending, 0);
ps->irq_ack = 1;
-
- hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval),
- HRTIMER_MODE_ABS);
+ kvm_timer_start(pt, interval, is_period);
}
static void pit_load_count(struct kvm *kvm, int channel, u32 val)
@@ -545,7 +510,7 @@ static int speaker_ioport_read(struct kv
return 0;
}
-void kvm_pit_reset(struct kvm_pit *pit)
+static void kvm_pit_reset(struct kvm_pit *pit)
{
int i;
struct kvm_kpit_channel_state *c;
@@ -559,7 +524,7 @@ void kvm_pit_reset(struct kvm_pit *pit)
}
mutex_unlock(&pit->pit_state.lock);
- atomic_set(&pit->pit_state.pit_timer.pending, 0);
+ kvm_timer_reset(&pit->pit_state.pit_timer);
pit->pit_state.irq_ack = 1;
}
@@ -568,7 +533,7 @@ static void pit_mask_notifer(struct kvm_
struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
if (!mask) {
- atomic_set(&pit->pit_state.pit_timer.pending, 0);
+ kvm_timer_reset(&pit->pit_state.pit_timer);
pit->pit_state.irq_ack = 1;
}
}
@@ -608,8 +573,8 @@ struct kvm_pit *kvm_create_pit(struct kv
pit_state = &pit->pit_state;
pit_state->pit = pit;
- hrtimer_init(&pit_state->pit_timer.timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ kvm_timer_init(kvm, &pit_state->pit_timer);
+
pit_state->irq_ack_notifier.gsi = 0;
pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
@@ -634,14 +599,11 @@ struct kvm_pit *kvm_create_pit(struct kv
void kvm_free_pit(struct kvm *kvm)
{
- struct hrtimer *timer;
-
if (kvm->arch.vpit) {
+ kvm_timer_cancel(&kvm->arch.vpit->pit_state.pit_timer);
kvm_unregister_irq_mask_notifier(kvm, 0,
&kvm->arch.vpit->mask_notifier);
mutex_lock(&kvm->arch.vpit->pit_state.lock);
- timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
- hrtimer_cancel(timer);
kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
kfree(kvm->arch.vpit);
@@ -686,7 +648,7 @@ void kvm_inject_pit_timer_irqs(struct kv
* last one has been acked.
*/
spin_lock(&ps->inject_lock);
- if (atomic_read(&ps->pit_timer.pending) && ps->irq_ack) {
+ if (kvm_timer_has_pending(&ps->pit_timer) && ps->irq_ack) {
ps->irq_ack = 0;
inject = 1;
}
===================================================================
@@ -1,18 +1,25 @@
struct kvm_timer {
struct hrtimer timer;
- s64 period; /* unit: ns */
- atomic_t pending; /* accumulated triggered timers */
+ s64 period; /* unit: ns */
+ atomic_t pending; /* accumulated triggered timers */
bool reinject;
- struct kvm_timer_ops *t_ops;
+ bool periodic;
struct kvm *kvm;
struct kvm_vcpu *vcpu;
};
-struct kvm_timer_ops {
- bool (*is_periodic)(struct kvm_timer *);
-};
+void kvm_timer_init(struct kvm *kvm, struct kvm_timer *ktimer);
+void kvm_timer_start(struct kvm_timer *ktimer, u64 interval, bool periodic);
+void kvm_timer_cancel(struct kvm_timer *ktimer);
+void kvm_timer_vcpu_bind(struct kvm_timer *ktimer, struct kvm_vcpu *vcpu);
+
+int kvm_timer_has_pending(struct kvm_timer *ktimer);
+void kvm_timer_ack(struct kvm_timer *ktimer);
+void kvm_timer_reset(struct kvm_timer *ktimer);
+
+void kvm_migrate_timer(struct kvm_timer *ktimer);
-enum hrtimer_restart kvm_timer_fn(struct hrtimer *data);
+ktime_t kvm_timer_remaining(struct kvm_timer *ktimer);
===================================================================
@@ -485,7 +485,7 @@ static u32 apic_get_tmcct(struct kvm_lap
if (apic_get_reg(apic, APIC_TMICT) == 0)
return 0;
- remaining = hrtimer_expires_remaining(&apic->lapic_timer.timer);
+ remaining = kvm_timer_remaining(&apic->lapic_timer);
if (ktime_to_ns(remaining) < 0)
remaining = ktime_set(0, 0);
@@ -601,28 +601,13 @@ static void update_divide_count(struct k
static void start_apic_timer(struct kvm_lapic *apic)
{
- ktime_t now = apic->lapic_timer.timer.base->get_time();
-
- apic->lapic_timer.period = apic_get_reg(apic, APIC_TMICT) *
+ u64 period = apic_get_reg(apic, APIC_TMICT) *
APIC_BUS_CYCLE_NS * apic->divide_count;
- atomic_set(&apic->lapic_timer.pending, 0);
- if (!apic->lapic_timer.period)
+ if (!period)
return;
- hrtimer_start(&apic->lapic_timer.timer,
- ktime_add_ns(now, apic->lapic_timer.period),
- HRTIMER_MODE_ABS);
-
- apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
- PRIx64 ", "
- "timer initial count 0x%x, period %lldns, "
- "expire @ 0x%016" PRIx64 ".\n", __func__,
- APIC_BUS_CYCLE_NS, ktime_to_ns(now),
- apic_get_reg(apic, APIC_TMICT),
- apic->lapic_timer.period,
- ktime_to_ns(ktime_add_ns(now,
- apic->lapic_timer.period)));
+ kvm_timer_start(&apic->lapic_timer, period, apic_lvtt_period(apic));
}
static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
@@ -706,8 +691,6 @@ static int apic_mmio_write(struct kvm_io
apic_set_reg(apic, APIC_LVTT + 0x10 * i,
lvt_val | APIC_LVT_MASKED);
}
- atomic_set(&apic->lapic_timer.pending, 0);
-
}
break;
@@ -738,7 +721,7 @@ static int apic_mmio_write(struct kvm_io
break;
case APIC_TMICT:
- hrtimer_cancel(&apic->lapic_timer.timer);
+ kvm_timer_cancel(&apic->lapic_timer);
apic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
return 0;
@@ -763,7 +746,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcp
if (!vcpu->arch.apic)
return;
- hrtimer_cancel(&vcpu->arch.apic->lapic_timer.timer);
+ kvm_timer_cancel(&vcpu->arch.apic->lapic_timer);
if (vcpu->arch.apic->regs_page)
__free_page(vcpu->arch.apic->regs_page);
@@ -834,7 +817,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vc
ASSERT(apic != NULL);
/* Stop the timer in case it's a reset to an active apic */
- hrtimer_cancel(&apic->lapic_timer.timer);
+ kvm_timer_cancel(&apic->lapic_timer);
apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
apic_set_reg(apic, APIC_LVR, APIC_VERSION);
@@ -860,7 +843,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vc
}
apic->irr_pending = false;
update_divide_count(apic);
- atomic_set(&apic->lapic_timer.pending, 0);
if (kvm_vcpu_is_bsp(vcpu))
vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
apic_update_ppr(apic);
@@ -889,19 +871,12 @@ int kvm_lapic_enabled(struct kvm_vcpu *v
*----------------------------------------------------------------------
*/
-static bool lapic_is_periodic(struct kvm_timer *ktimer)
-{
- struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic,
- lapic_timer);
- return apic_lvtt_period(apic);
-}
-
int apic_has_pending_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *lapic = vcpu->arch.apic;
if (lapic && apic_enabled(lapic) && apic_lvt_enabled(lapic, APIC_LVTT))
- return atomic_read(&lapic->lapic_timer.pending);
+ return kvm_timer_has_pending(&lapic->lapic_timer);
return 0;
}
@@ -928,10 +903,6 @@ void kvm_apic_nmi_wd_deliver(struct kvm_
kvm_apic_local_deliver(apic, APIC_LVT0);
}
-static struct kvm_timer_ops lapic_timer_ops = {
- .is_periodic = lapic_is_periodic,
-};
-
static const struct kvm_io_device_ops apic_mmio_ops = {
.read = apic_mmio_read,
.write = apic_mmio_write,
@@ -960,12 +931,8 @@ int kvm_create_lapic(struct kvm_vcpu *vc
memset(apic->regs, 0, PAGE_SIZE);
apic->vcpu = vcpu;
- hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
- apic->lapic_timer.timer.function = kvm_timer_fn;
- apic->lapic_timer.t_ops = &lapic_timer_ops;
- apic->lapic_timer.kvm = vcpu->kvm;
- apic->lapic_timer.vcpu = vcpu;
+ kvm_timer_init(vcpu->kvm, &apic->lapic_timer);
+ kvm_timer_vcpu_bind(&apic->lapic_timer, vcpu);
apic->base_address = APIC_DEFAULT_PHYS_BASE;
vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
@@ -1015,9 +982,9 @@ void kvm_inject_apic_timer_irqs(struct k
{
struct kvm_lapic *apic = vcpu->arch.apic;
- if (apic && atomic_read(&apic->lapic_timer.pending) > 0) {
+ if (apic && kvm_timer_has_pending(&apic->lapic_timer)) {
if (kvm_apic_local_deliver(apic, APIC_LVTT))
- atomic_dec(&apic->lapic_timer.pending);
+ kvm_timer_ack(&apic->lapic_timer);
}
}
@@ -1043,24 +1010,11 @@ void kvm_apic_post_state_restore(struct
MSR_IA32_APICBASE_BASE;
apic_set_reg(apic, APIC_LVR, APIC_VERSION);
apic_update_ppr(apic);
- hrtimer_cancel(&apic->lapic_timer.timer);
+ kvm_timer_cancel(&apic->lapic_timer);
update_divide_count(apic);
start_apic_timer(apic);
}
-void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- struct hrtimer *timer;
-
- if (!apic)
- return;
-
- timer = &apic->lapic_timer.timer;
- if (hrtimer_cancel(timer))
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
-}
-
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
===================================================================
@@ -23,7 +23,7 @@ static int __kvm_timer_fn(struct kvm_vcp
if (waitqueue_active(q))
wake_up_interruptible(q);
- if (ktimer->t_ops->is_periodic(ktimer)) {
+ if (ktimer->periodic) {
hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
restart_timer = 1;
}
@@ -31,7 +31,7 @@ static int __kvm_timer_fn(struct kvm_vcp
return restart_timer;
}
-enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
+static enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
{
int restart_timer;
struct kvm_vcpu *vcpu;
@@ -48,3 +48,58 @@ enum hrtimer_restart kvm_timer_fn(struct
return HRTIMER_NORESTART;
}
+void kvm_timer_init(struct kvm *kvm, struct kvm_timer *ktimer)
+{
+ ktimer->kvm = kvm;
+ hrtimer_init(&ktimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ ktimer->timer.function = kvm_timer_fn;
+}
+
+void kvm_timer_vcpu_bind(struct kvm_timer *ktimer, struct kvm_vcpu *vcpu)
+{
+ ktimer->vcpu = vcpu;
+}
+
+void kvm_timer_start(struct kvm_timer *ktimer, u64 interval, bool periodic)
+{
+ hrtimer_cancel(&ktimer->timer);
+ atomic_set(&ktimer->pending, 0);
+ ktimer->periodic = periodic;
+ ktimer->period = interval;
+ hrtimer_start(&ktimer->timer, ktime_add_ns(ktime_get(), interval),
+ HRTIMER_MODE_ABS);
+}
+
+void kvm_timer_cancel(struct kvm_timer *ktimer)
+{
+ hrtimer_cancel(&ktimer->timer);
+ atomic_set(&ktimer->pending, 0);
+}
+
+int kvm_timer_has_pending(struct kvm_timer *ktimer)
+{
+ return atomic_read(&ktimer->pending);
+}
+
+void kvm_timer_ack(struct kvm_timer *ktimer)
+{
+ if (atomic_dec_return(&ktimer->pending) < 0)
+ atomic_inc(&ktimer->pending);
+}
+
+void kvm_timer_reset(struct kvm_timer *ktimer)
+{
+ atomic_set(&ktimer->pending, 0);
+}
+
+void kvm_migrate_timer(struct kvm_timer *ktimer)
+{
+ if (hrtimer_cancel(&ktimer->timer))
+ hrtimer_start_expires(&ktimer->timer, HRTIMER_MODE_ABS);
+}
+
+ktime_t kvm_timer_remaining(struct kvm_timer *ktimer)
+{
+ return hrtimer_expires_remaining(&ktimer->timer);
+}
+
===================================================================
@@ -22,7 +22,6 @@ struct kvm_kpit_channel_state {
struct kvm_kpit_state {
struct kvm_kpit_channel_state channels[3];
struct kvm_timer pit_timer;
- bool is_periodic;
u32 speaker_data_on;
struct mutex lock;
struct kvm_pit *pit;
@@ -52,6 +51,5 @@ void kvm_inject_pit_timer_irqs(struct kv
void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val);
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags);
void kvm_free_pit(struct kvm *kvm);
-void kvm_pit_reset(struct kvm_pit *pit);
#endif
===================================================================
@@ -94,6 +94,26 @@ void kvm_inject_pending_timer_irqs(struc
}
EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
+static void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->arch.apic;
+
+ if (!apic)
+ return;
+
+ kvm_migrate_timer(&apic->lapic_timer);
+}
+
+static void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pit *pit = vcpu->kvm->arch.vpit;
+
+ if (!kvm_vcpu_is_bsp(vcpu) || !pit)
+ return;
+
+ kvm_migrate_timer(&pit->pit_state.pit_timer);
+}
+
void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
__kvm_migrate_apic_timer(vcpu);
===================================================================
@@ -94,8 +94,6 @@ void kvm_pic_reset(struct kvm_kpic_state
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
-void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
-void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu);
void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
int pit_has_pending_timer(struct kvm_vcpu *vcpu);
===================================================================
@@ -4565,6 +4565,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(st
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int r;
+ struct kvm *kvm = vcpu->kvm;
/* We do fxsave: this must be aligned. */
BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
@@ -4578,6 +4579,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu
if (r < 0)
goto free_vcpu;
+ if (kvm->arch.vpit && kvm_vcpu_is_bsp(vcpu))
+ kvm_timer_vcpu_bind(&kvm->arch.vpit->pit_state.pit_timer, vcpu);
+
return 0;
free_vcpu:
kvm_x86_ops->vcpu_free(vcpu);
Hide details of timer emulation behind an interface, and unify the hrtimer based implementation. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>