@@ -48,9 +48,6 @@ struct arch_timer_cpu {
/* Work queued with the above timer expires */
struct work_struct expired;
- /* Background timer active */
- bool armed;
-
/* Is the timer enabled */
bool enabled;
};
@@ -56,26 +56,16 @@ u64 kvm_phys_timer_read(void)
return timecounter->cc->read(timecounter->cc);
}
-static bool timer_is_armed(struct arch_timer_cpu *timer)
+static void soft_timer_start(struct hrtimer *hrt, u64 ns)
{
- return timer->armed;
-}
-
-/* timer_arm: as in "arm the timer", not as in ARM the company */
-static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
-{
- timer->armed = true;
- hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
+ hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
HRTIMER_MODE_ABS);
}
-static void timer_disarm(struct arch_timer_cpu *timer)
+static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
{
- if (timer_is_armed(timer)) {
- hrtimer_cancel(&timer->timer);
- cancel_work_sync(&timer->expired);
- timer->armed = false;
- }
+ hrtimer_cancel(hrt);
+ cancel_work_sync(work);
}
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
@@ -271,7 +261,7 @@ static void kvm_timer_emulate(struct kvm_vcpu *vcpu,
return;
/* The timer has not yet expired, schedule a background timer */
- timer_arm(timer, kvm_timer_compute_delta(timer_ctx));
+ soft_timer_start(&timer->timer, kvm_timer_compute_delta(timer_ctx));
}
/*
@@ -285,8 +275,6 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
- BUG_ON(timer_is_armed(timer));
-
/*
* No need to schedule a background timer if any guest timer has
* already expired, because kvm_vcpu_block will return before putting
@@ -306,13 +294,14 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
* The guest timers have not yet expired, schedule a background timer.
* Set the earliest expiration time among the guest timers.
*/
- timer_arm(timer, kvm_timer_earliest_exp(vcpu));
+ soft_timer_start(&timer->timer, kvm_timer_earliest_exp(vcpu));
}
void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- timer_disarm(timer);
+
+ soft_timer_cancel(&timer->timer, &timer->expired);
}
static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu)
@@ -448,7 +437,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
* This is to cancel the background timer for the physical timer
* emulation if it is set.
*/
- timer_disarm(timer);
+ soft_timer_cancel(&timer->timer, &timer->expired);
/*
* The guest could have modified the timer registers or the timer
@@ -615,7 +604,7 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- timer_disarm(timer);
+ soft_timer_cancel(&timer->timer, &timer->expired);
kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
}