@@ -1411,6 +1411,10 @@ struct kvm_arch {
struct delayed_work kvmclock_update_work;
struct delayed_work kvmclock_sync_work;
+ u64 old_master_kernel_ns;
+ u64 old_master_cycle_now;
+ s64 old_kvmclock_offset;
+
struct kvm_xen_hvm_config xen_hvm_config;
/* reads protected by irq_srcu, writes by irq_lock */
@@ -2819,7 +2819,7 @@ static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
* As with get_kvmclock_base_ns(), this counts from boot time, at the
* frequency of CLOCK_MONOTONIC_RAW (hence adding gtos->offs_boot).
*/
-static int do_kvmclock_base(s64 *t, u64 *tsc_timestamp)
+int do_kvmclock_base(s64 *t, u64 *tsc_timestamp)
{
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
unsigned long seq;
@@ -2861,6 +2861,27 @@ static int do_monotonic(s64 *t, u64 *tsc_timestamp)
return mode;
}
+u64 mydebug_get_kvmclock_ns(u64 master_kernel_ns, u64 master_cycle_now, s64 kvmclock_offset, u64 tsc)
+{
+ struct pvclock_vcpu_time_info hv_clock;
+ u64 ret;
+
+ hv_clock.tsc_timestamp = master_cycle_now;
+ hv_clock.system_time = master_kernel_ns + kvmclock_offset;
+
+ /* both __this_cpu_read() and rdtsc() should be on the same cpu */
+ get_cpu();
+
+ kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+ &hv_clock.tsc_shift,
+ &hv_clock.tsc_to_system_mul);
+ ret = __pvclock_read_cycles(&hv_clock, tsc);
+
+ put_cpu();
+
+ return ret;
+}
+
static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
{
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
@@ -2988,6 +3009,10 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
host_tsc_clocksource = kvm_get_time_and_clockread(
&ka->master_kernel_ns,
&ka->master_cycle_now);
+ ka->old_master_kernel_ns = ka->master_kernel_ns;
+ ka->old_master_cycle_now = ka->master_cycle_now;
+ printk("MYDEBUG: old_master_kernel_ns = %llu, old_master_cycle_now = %llu\n",
+ ka->old_master_kernel_ns, ka->old_master_cycle_now);
ka->use_master_clock = host_tsc_clocksource && vcpus_matched
&& !ka->backwards_tsc_observed
@@ -6989,6 +7014,8 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
else
now_raw_ns = get_kvmclock_base_ns();
ka->kvmclock_offset = data.clock - now_raw_ns;
+ ka->old_kvmclock_offset = ka->kvmclock_offset;
+ printk("MYDEBUG: old_kvmclock_offset = %lld\n", ka->old_kvmclock_offset);
kvm_end_pvclock_update(kvm);
return 0;
}
new file mode 120000
@@ -0,0 +1 @@
+/home/mlin/build.upstream/b.sh
\ No newline at end of file
@@ -399,6 +399,7 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity,
return mc->nobjs >= min ? 0 : -ENOMEM;
mc->objects[mc->nobjs++] = obj;
}
+
return 0;
}
@@ -998,6 +999,78 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
}
}
+extern int do_kvmclock_base(s64 *t, u64 *tsc_timestamp);
+extern u64 mydebug_get_kvmclock_ns(u64 master_kernel_ns, u64 master_cycle_now, s64 kvmclock_offset, u64 tsc);
+
+static ssize_t kvm_mydebug_pvclock_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct kvm *kvm = file->private_data;
+ struct kvm_arch *ka;
+ char buffer[256];
+ ssize_t ret, copied;
+ u64 new_master_kernel_ns;
+ u64 new_master_cycle_now;
+ u64 old_ns, new_ns;
+ u64 tsc;
+
+ if (!kvm) {
+ pr_err("file->private_data is NULL\n");
+ return -EINVAL;
+ }
+
+ ka = &kvm->arch;
+
+ do_kvmclock_base(&new_master_kernel_ns, &new_master_cycle_now);
+
+ tsc = rdtsc();
+
+ old_ns = mydebug_get_kvmclock_ns(ka->old_master_kernel_ns, ka->old_master_cycle_now, ka->old_kvmclock_offset, tsc);
+ new_ns = mydebug_get_kvmclock_ns(new_master_kernel_ns, new_master_cycle_now, ka->old_kvmclock_offset, tsc);
+
+ ret = snprintf(buffer, sizeof(buffer),
+ "old: master_kernel_ns: %llu\n"
+ "old: master_cycle_now: %llu\n"
+ "old: ns: %llu\n"
+ "new: master_kernel_ns: %llu\n"
+ "new: master_cycle_now: %llu\n"
+ "new: ns: %llu\n\n"
+ "tsc %llu\n"
+ "kvmclock_offset %lld\n"
+ "diff: ns: %lld\n",
+ ka->old_master_kernel_ns, ka->old_master_cycle_now, old_ns,
+ new_master_kernel_ns, new_master_cycle_now, new_ns,
+ tsc, ka->old_kvmclock_offset,
+ old_ns - new_ns
+ );
+
+ if (ret < 0)
+ return ret;
+
+ if ((size_t)ret > sizeof(buffer))
+ ret = sizeof(buffer);
+
+ if (*ppos >= ret)
+ return 0; /* EOF */
+
+ copied = min(len, (size_t)(ret - *ppos));
+
+ if (copy_to_user(buf, buffer + *ppos, copied)) {
+ pr_err("copy_to_user failed\n");
+ return -EFAULT;
+ }
+
+ *ppos += copied;
+
+ return copied;
+}
+
+static const struct file_operations kvm_pvclock_fops = {
+ .owner = THIS_MODULE,
+ .read = kvm_mydebug_pvclock_read,
+ .open = simple_open,
+};
+
static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
{
static DEFINE_MUTEX(kvm_debugfs_lock);
@@ -1063,6 +1136,8 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
&stat_fops_per_vm);
}
+ debugfs_create_file("pvclock", 0444, kvm->debugfs_dentry, kvm, &kvm_pvclock_fops);
+
kvm_arch_create_vm_debugfs(kvm);
return 0;
out_err: