new file mode 100644
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_csr.h>
+#include <asm/kvm_vcpu.h>
+
+/* low level hrtimer wake routine */
+enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
+{
+ struct kvm_vcpu *vcpu;
+
+ vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
+ _kvm_queue_irq(vcpu, INT_TI);
+ rcuwait_wake_up(&vcpu->wait);
+ return kvm_count_timeout(vcpu);
+}
+
+/*
+ * ktime_to_tick() - Scale ktime_t to a 64-bit stable timer.
+ *
+ * Caches the dynamic nanosecond bias in vcpu->arch.timer_dyn_bias.
+ */
+static unsigned long ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
+{
+ s64 now_ns, periods;
+ unsigned long delta;
+
+ now_ns = ktime_to_ns(now);
+ delta = now_ns + vcpu->arch.timer_dyn_bias;
+
+ if (delta >= vcpu->arch.timer_period_ns) {
+ /* If delta is out of safe range the bias needs adjusting */
+ periods = div64_s64(now_ns, vcpu->arch.timer_period_ns);
+ vcpu->arch.timer_dyn_bias = -periods * vcpu->arch.timer_period_ns;
+ /* Recalculate delta with new bias */
+ delta = now_ns + vcpu->arch.timer_dyn_bias;
+ }
+
+ /*
+ * We've ensured that:
+ * delta < timer_period_ns
+ */
+ return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
+}
+
+/**
+ * kvm_resume_hrtimer() - Resume hrtimer, updating expiry.
+ * @vcpu: Virtual CPU.
+ * @now: ktime at point of resume.
+ * @val: stable timer at point of resume.
+ *
+ * Resumes the timer and updates the timer expiry based on @now and @count.
+ */
+static void kvm_resume_hrtimer(struct kvm_vcpu *vcpu, ktime_t now,
+ unsigned long val)
+{
+ unsigned long delta;
+ ktime_t expire;
+
+ /* Stable timer decreased to zero or
+ * initialize to zero, set 4 second timer
+ */
+ delta = div_u64(val * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
+ expire = ktime_add_ns(now, delta);
+
+ /* Update hrtimer to use new timeout */
+ hrtimer_cancel(&vcpu->arch.swtimer);
+ hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
+}
+
+/**
+ * kvm_init_timer() - Initialise stable timer.
+ * @vcpu: Virtual CPU.
+ * @timer_hz: Frequency of timer.
+ *
+ * Initialise the timer to the specified frequency, zero it, and set it going if
+ * it's enabled.
+ */
+void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
+{
+ ktime_t now;
+ unsigned long ticks;
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ ticks = (unsigned long)MNSEC_PER_SEC * CSR_TCFG_VAL;
+ vcpu->arch.timer_mhz = timer_hz >> 20;
+ vcpu->arch.timer_period_ns = div_u64(ticks, vcpu->arch.timer_mhz);
+ vcpu->arch.timer_dyn_bias = 0;
+
+ /* Starting at 0 */
+ ticks = 0;
+ now = ktime_get();
+ vcpu->arch.timer_bias = ticks - ktime_to_tick(vcpu, now);
+ vcpu->arch.timer_bias &= CSR_TCFG_VAL;
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TVAL, ticks);
+}
+
+/**
+ * kvm_count_timeout() - Push timer forward on timeout.
+ * @vcpu: Virtual CPU.
+ *
+ * Handle an hrtimer event by push the hrtimer forward a period.
+ *
+ * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
+ */
+enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
+{
+ unsigned long cfg;
+
+ /* Add the Count period to the current expiry time */
+ cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
+ if (cfg & CSR_TCFG_PERIOD) {
+ hrtimer_add_expires_ns(&vcpu->arch.swtimer, cfg & CSR_TCFG_VAL);
+ return HRTIMER_RESTART;
+ } else
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * kvm_restore_timer() - Restore timer state.
+ * @vcpu: Virtual CPU.
+ *
+ * Restore soft timer state from saved context.
+ */
+void kvm_restore_timer(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+ ktime_t saved_ktime, now;
+ unsigned long val, new, delta;
+ int expired = 0;
+ unsigned long cfg;
+
+ /*
+ * Set guest stable timer cfg csr
+ */
+ cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
+ if (!(cfg & CSR_TCFG_EN)) {
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
+ kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
+ return;
+ }
+
+ now = ktime_get();
+ saved_ktime = vcpu->arch.stable_ktime_saved;
+ val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
+
+ /*hrtimer not expire */
+ delta = ktime_to_tick(vcpu, ktime_sub(now, saved_ktime));
+ if (delta >= val) {
+ expired = 1;
+ if (cfg & CSR_TCFG_PERIOD)
+ new = (delta - val) % (cfg & CSR_TCFG_VAL);
+ else
+ new = 1;
+ } else
+ new = val - delta;
+
+ new &= CSR_TCFG_VAL;
+ write_gcsr_timercfg(cfg);
+ write_gcsr_timertick(new);
+ if (expired)
+ _kvm_queue_irq(vcpu, INT_TI);
+}
+
+/*
+ * kvm_acquire_timer() - Switch to hard timer state.
+ * @vcpu: Virtual CPU.
+ *
+ * Restore hard timer state on top of existing soft timer state if possible.
+ *
+ * Since hard timer won't remain active over preemption, preemption should be
+ * disabled by the caller.
+ */
+void kvm_acquire_timer(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags, guestcfg;
+
+ guestcfg = read_csr_gcfg();
+ if (!(guestcfg & CSR_GCFG_TIT))
+ return;
+
+ /* enable guest access to hard timer */
+ write_csr_gcfg(guestcfg & ~CSR_GCFG_TIT);
+
+ /*
+ * Freeze the soft-timer and sync the guest stable timer with it. We do
+ * this with interrupts disabled to avoid latency.
+ */
+ local_irq_save(flags);
+ hrtimer_cancel(&vcpu->arch.swtimer);
+ local_irq_restore(flags);
+}
+
+
+/*
+ * _kvm_save_timer() - Switch to software emulation of guest timer.
+ * @vcpu: Virtual CPU.
+ *
+ * Save guest timer state and switch to software emulation of guest
+ * timer. The hard timer must already be in use, so preemption should be
+ * disabled.
+ */
+static ktime_t _kvm_save_timer(struct kvm_vcpu *vcpu, unsigned long *val)
+{
+ unsigned long end_time;
+ ktime_t before_time;
+
+ before_time = ktime_get();
+
+ /*
+ * Record a final stable timer which we will transfer to the soft-timer.
+ */
+ end_time = read_gcsr_timertick();
+ *val = end_time;
+
+ kvm_resume_hrtimer(vcpu, before_time, end_time);
+ return before_time;
+}
+
+/*
+ * kvm_save_timer() - Save guest timer state.
+ * @vcpu: Virtual CPU.
+ *
+ * Save guest timer state and switch to soft guest timer if hard timer was in
+ * use.
+ */
+void kvm_save_timer(struct kvm_vcpu *vcpu)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+ unsigned long guestcfg, val;
+ ktime_t save_ktime;
+
+ preempt_disable();
+ guestcfg = read_csr_gcfg();
+ if (!(guestcfg & CSR_GCFG_TIT)) {
+ /* disable guest use of hard timer */
+ write_csr_gcfg(guestcfg | CSR_GCFG_TIT);
+
+ /* save hard timer state */
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
+ if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) {
+ save_ktime = _kvm_save_timer(vcpu, &val);
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TVAL, val);
+ vcpu->arch.stable_ktime_saved = save_ktime;
+ if (val == CSR_TCFG_VAL)
+ _kvm_queue_irq(vcpu, INT_TI);
+ } else {
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
+ }
+ }
+
+ /* save timer-related state to vCPU context */
+ kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
+ preempt_enable();
+}
+
+void kvm_reset_timer(struct kvm_vcpu *vcpu)
+{
+ write_gcsr_timercfg(0);
+ kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
+ hrtimer_cancel(&vcpu->arch.swtimer);
+}