@@ -12,6 +12,10 @@
#include <linux/hrtimer.h>
struct kvm_guest_timer {
+ u64 frequency;
+ bool need_scale;
+ u64 scale_mult;
+ u64 scale_shift;
/* Time delta value */
u64 time_delta;
};
@@ -38,4 +42,9 @@ int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
int kvm_riscv_guest_timer_init(struct kvm *kvm);
+static inline bool kvm_riscv_need_scale(struct kvm_guest_timer *gt)
+{
+ return gt->need_scale;
+}
+
#endif
@@ -15,9 +15,38 @@
#include <asm/delay.h>
#include <asm/kvm_vcpu_timer.h>
+#define SCALE_SHIFT_VALUE 48
+#define SCALE_TOLERANCE_HZ 1000
+
+static void kvm_riscv_set_time_freq(struct kvm_guest_timer *gt, u64 freq)
+{
+ /*
+ * Guest time frequency and Host time frequency are identical
+ * if the error between them is limited within SCALE_TOLERANCE_HZ.
+ */
+ u64 diff = riscv_timebase > freq ?
+ riscv_timebase - freq : freq - riscv_timebase;
+ gt->need_scale = (diff >= SCALE_TOLERANCE_HZ);
+ if (gt->need_scale) {
+ gt->scale_shift = SCALE_SHIFT_VALUE;
+ gt->scale_mult = mul_u64_u32_div(1ULL << gt->scale_shift,
+ freq, riscv_timebase);
+ }
+ gt->frequency = freq;
+}
+
+static u64 kvm_riscv_scale_time(struct kvm_guest_timer *gt, u64 time)
+{
+ if (kvm_riscv_need_scale(gt))
+ return mul_u64_u64_shr(time, gt->scale_mult, gt->scale_shift);
+
+ return time;
+}
+
static u64 kvm_riscv_current_cycles(struct kvm_guest_timer *gt)
{
- return get_cycles64() + gt->time_delta;
+ u64 host_time = get_cycles64();
+ return kvm_riscv_scale_time(gt, host_time) + gt->time_delta;
}
static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
@@ -33,7 +62,7 @@ static u64 kvm_riscv_delta_cycles2ns(u64 cycles,
cycles_delta = cycles - cycles_now;
else
cycles_delta = 0;
- delta_ns = mul_u64_u64_div_u64(cycles_delta, NSEC_PER_SEC, riscv_timebase);
+ delta_ns = mul_u64_u64_div_u64(cycles_delta, NSEC_PER_SEC, gt->frequency);
local_irq_restore(flags);
return delta_ns;
@@ -106,7 +135,7 @@ int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
switch (reg_num) {
case KVM_REG_RISCV_TIMER_REG(frequency):
- reg_val = riscv_timebase;
+ reg_val = gt->frequency;
break;
case KVM_REG_RISCV_TIMER_REG(time):
reg_val = kvm_riscv_current_cycles(gt);
@@ -150,10 +179,10 @@ int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
switch (reg_num) {
case KVM_REG_RISCV_TIMER_REG(frequency):
- ret = -EOPNOTSUPP;
+ kvm_riscv_set_time_freq(gt, reg_val);
break;
case KVM_REG_RISCV_TIMER_REG(time):
- gt->time_delta = reg_val - get_cycles64();
+ gt->time_delta = reg_val - kvm_riscv_scale_time(gt, get_cycles64());
break;
case KVM_REG_RISCV_TIMER_REG(compare):
t->next_cycles = reg_val;
@@ -219,6 +248,7 @@ int kvm_riscv_guest_timer_init(struct kvm *kvm)
struct kvm_guest_timer *gt = &kvm->arch.timer;
gt->time_delta = -get_cycles64();
+ gt->frequency = riscv_timebase;
return 0;
}