@@ -7,6 +7,7 @@
#define __GZVM_ARCH_COMMON_H__
#include <linux/arm-smccc.h>
+#include <linux/clocksource.h>
enum {
GZVM_FUNC_CREATE_VM = 0,
@@ -85,6 +86,10 @@ int gzvm_hypcall_wrapper(unsigned long a0, unsigned long a1,
* @lr: The array of LRs(list registers).
* @vtimer_offset: The offset maintained by hypervisor that is host cycle count
* when guest VM startup.
+ * @vtimer_delay: The remaining time before the next timer tick is triggered
+ * while the VM is running.
+ * @vtimer_migrate: Indicates whether the guest virtual timer needs to be
+ * migrated to the host software timer.
*
* - Keep the same layout of hypervisor data struct.
* - Sync list registers back for acking virtual device interrupt status.
@@ -94,8 +99,18 @@ struct gzvm_vcpu_hwstate {
__le32 __pad;
__le64 lr[GIC_V3_NR_LRS];
__le64 vtimer_offset;
+ __le64 vtimer_delay;
+ __le32 vtimer_migrate;
};
+struct timecycle {
+ u32 mult;
+ u32 shift;
+};
+
+u32 gzvm_vtimer_get_clock_mult(void);
+u32 gzvm_vtimer_get_clock_shift(void);
+
static inline unsigned int
assemble_vm_vcpu_tuple(u16 vmid, u16 vcpuid)
{
@@ -11,6 +11,22 @@
#include <linux/soc/mediatek/gzvm_drv.h>
#include "gzvm_arch_common.h"
+u64 gzvm_vcpu_arch_get_timer_delay_ns(struct gzvm_vcpu *vcpu)
+{
+ u64 ns;
+
+ if (vcpu->hwstate->vtimer_migrate) {
+ ns = clocksource_cyc2ns(le64_to_cpu(vcpu->hwstate->vtimer_delay),
+ gzvm_vtimer_get_clock_mult(),
+ gzvm_vtimer_get_clock_shift());
+ } else {
+ ns = 0;
+ }
+
+ /* 0: no migrate, otherwise: migrate */
+ return ns;
+}
+
int gzvm_arch_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, __u64 reg_id,
bool is_write, __u64 *data)
{
@@ -15,6 +15,18 @@
#define PAR_PA47_MASK GENMASK_ULL(47, 12)
+static struct timecycle clock_scale_factor;
+
+u32 gzvm_vtimer_get_clock_mult(void)
+{
+ return clock_scale_factor.mult;
+}
+
+u32 gzvm_vtimer_get_clock_shift(void)
+{
+ return clock_scale_factor.shift;
+}
+
/**
* gzvm_hypcall_wrapper() - the wrapper for hvc calls
* @a0: arguments passed in registers 0
@@ -90,6 +102,18 @@ int gzvm_arch_probe(struct gzvm_version drv_version,
return 0;
}
+int gzvm_arch_drv_init(void)
+{
+ /* timecycle init mult shift */
+ clocks_calc_mult_shift(&clock_scale_factor.mult,
+ &clock_scale_factor.shift,
+ arch_timer_get_cntfrq(),
+ NSEC_PER_SEC,
+ 30);
+
+ return 0;
+}
+
int gzvm_arch_set_memregion(u16 vm_id, size_t buf_size,
phys_addr_t region)
{
@@ -233,6 +233,10 @@ static int gzvm_drv_probe(struct platform_device *pdev)
gzvm_drv.hyp_version.major, gzvm_drv.hyp_version.minor,
gzvm_drv.hyp_version.sub);
+ ret = gzvm_arch_drv_init();
+ if (ret)
+ return ret;
+
ret = misc_register(&gzvm_dev);
if (ret)
return ret;
@@ -16,6 +16,28 @@
/* maximum size needed for holding an integer */
#define ITOA_MAX_LEN 12
+static enum hrtimer_restart gzvm_vtimer_expire(struct hrtimer *hrt)
+{
+ return HRTIMER_NORESTART;
+}
+
+static void gzvm_vtimer_init(struct gzvm_vcpu *vcpu)
+{
+ /* gzvm_vtimer init based on hrtimer */
+ hrtimer_init(&vcpu->gzvm_vtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
+ vcpu->gzvm_vtimer.function = gzvm_vtimer_expire;
+}
+
+void gzvm_vtimer_set(struct gzvm_vcpu *vcpu, u64 ns)
+{
+ hrtimer_start(&vcpu->gzvm_vtimer, ktime_add_ns(ktime_get(), ns), HRTIMER_MODE_ABS_HARD);
+}
+
+void gzvm_vtimer_release(struct gzvm_vcpu *vcpu)
+{
+ hrtimer_cancel(&vcpu->gzvm_vtimer);
+}
+
static long gzvm_vcpu_update_one_reg(struct gzvm_vcpu *vcpu,
void __user *argp,
bool is_write)
@@ -193,6 +215,7 @@ static void gzvm_destroy_vcpu(struct gzvm_vcpu *vcpu)
if (!vcpu)
return;
+ hrtimer_cancel(&vcpu->gzvm_vtimer);
gzvm_arch_destroy_vcpu(vcpu->gzvm->vm_id, vcpu->vcpuid);
/* clean guest's data */
memset(vcpu->run, 0, GZVM_VCPU_RUN_MAP_SIZE);
@@ -271,6 +294,7 @@ int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid)
goto free_vcpu_run;
gzvm->vcpus[cpuid] = vcpu;
+ gzvm_vtimer_init(vcpu);
return ret;
free_vcpu_run:
@@ -129,6 +129,7 @@ struct gzvm_vcpu {
struct mutex lock;
struct gzvm_vcpu_run *run;
struct gzvm_vcpu_hwstate *hwstate;
+ struct hrtimer gzvm_vtimer;
};
struct gzvm_pinned_page {
@@ -242,11 +243,17 @@ int gzvm_vm_allocate_guest_page(struct gzvm *gzvm, struct gzvm_memslot *slot,
int gzvm_vm_ioctl_create_vcpu(struct gzvm *gzvm, u32 cpuid);
int gzvm_arch_vcpu_update_one_reg(struct gzvm_vcpu *vcpu, __u64 reg_id,
bool is_write, __u64 *data);
+int gzvm_arch_drv_init(void);
int gzvm_arch_create_vcpu(u16 vm_id, int vcpuid, void *run);
int gzvm_arch_vcpu_run(struct gzvm_vcpu *vcpu, __u64 *exit_reason);
int gzvm_arch_destroy_vcpu(u16 vm_id, int vcpuid);
int gzvm_arch_inform_exit(u16 vm_id);
+u64 gzvm_vcpu_arch_get_timer_delay_ns(struct gzvm_vcpu *vcpu);
+
+void gzvm_vtimer_set(struct gzvm_vcpu *vcpu, u64 ns);
+void gzvm_vtimer_release(struct gzvm_vcpu *vcpu);
+
int gzvm_find_memslot(struct gzvm *vm, u64 gpa);
int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu);
bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu);