diff mbox

[10/18] x86: kvm guest: pvclock vsyscall support

Message ID 20121024131621.748629922@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Marcelo Tosatti Oct. 24, 2012, 1:13 p.m. UTC
Allow hypervisor to update userspace visible copy of
pvclock data.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

Index: vsyscall/arch/x86/kernel/kvmclock.c
===================================================================
--- vsyscall.orig/arch/x86/kernel/kvmclock.c
+++ vsyscall/arch/x86/kernel/kvmclock.c
@@ -31,6 +31,9 @@  static int kvmclock = 1;
 static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
 static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
 
+/* set when the generic vsyscall pvclock elements are setup */
+bool vsyscall_clock_initializable = false;
+
 static int parse_no_kvmclock(char *arg)
 {
 	kvmclock = 0;
@@ -151,6 +154,28 @@  int kvm_register_clock(char *txt)
 	return ret;
 }
 
+static int kvm_register_vsyscall_clock(char *txt)
+{
+#ifdef CONFIG_PARAVIRT_CLOCK_VSYSCALL
+	int cpu = smp_processor_id();
+	int low, high, ret;
+	struct pvclock_vcpu_time_info *info;
+
+	info = pvclock_get_vsyscall_time_info(cpu);
+
+	low = (int)__pa(info) | 1;
+	high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
+	ret = native_write_msr_safe(MSR_KVM_USERSPACE_TIME, low, high);
+	printk(KERN_INFO "kvm-clock: cpu %d, msr %x:%x, %s\n",
+	       cpu, high, low, txt);
+
+	return ret;
+#else
+	return 0;
+#endif
+}
+
+
 static void kvm_save_sched_clock_state(void)
 {
 }
@@ -158,6 +183,8 @@  static void kvm_save_sched_clock_state(v
 static void kvm_restore_sched_clock_state(void)
 {
 	kvm_register_clock("primary cpu clock, resume");
+	if (vsyscall_clock_initializable)
+		kvm_register_vsyscall_clock("primary cpu vsyscall clock, resume");
 }
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -168,6 +195,8 @@  static void __cpuinit kvm_setup_secondar
 	 * we shouldn't fail.
 	 */
 	WARN_ON(kvm_register_clock("secondary cpu clock"));
+	if (vsyscall_clock_initializable)
+		kvm_register_vsyscall_clock("secondary cpu vsyscall clock");
 }
 #endif
 
@@ -182,6 +211,8 @@  static void __cpuinit kvm_setup_secondar
 #ifdef CONFIG_KEXEC
 static void kvm_crash_shutdown(struct pt_regs *regs)
 {
+	if (vsyscall_clock_initializable)
+		native_write_msr(MSR_KVM_USERSPACE_TIME, 0, 0);
 	native_write_msr(msr_kvm_system_time, 0, 0);
 	kvm_disable_steal_time();
 	native_machine_crash_shutdown(regs);
@@ -190,6 +221,8 @@  static void kvm_crash_shutdown(struct pt
 
 static void kvm_shutdown(void)
 {
+	if (vsyscall_clock_initializable)
+		native_write_msr(MSR_KVM_USERSPACE_TIME, 0, 0);
 	native_write_msr(msr_kvm_system_time, 0, 0);
 	kvm_disable_steal_time();
 	native_machine_shutdown();
@@ -233,3 +266,27 @@  void __init kvmclock_init(void)
 	if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
 		pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
 }
+
+int kvm_setup_vsyscall_timeinfo(void)
+{
+#ifdef CONFIG_PARAVIRT_CLOCK_VSYSCALL
+	int ret;
+	struct pvclock_vcpu_time_info *vcpu_time;
+	u8 flags;
+
+	vcpu_time = &get_cpu_var(hv_clock);
+	flags = pvclock_read_flags(vcpu_time);
+	put_cpu_var(hv_clock);
+
+	if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+		return 1;
+
+	if ((ret = pvclock_init_vsyscall()))
+		return ret;
+
+	kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
+	vsyscall_clock_initializable = true;
+#endif /* CONFIG_PARAVIRT_CLOCK_VSYSCALL */
+	return 0;
+}
+
Index: vsyscall/arch/x86/kernel/kvm.c
===================================================================
--- vsyscall.orig/arch/x86/kernel/kvm.c
+++ vsyscall/arch/x86/kernel/kvm.c
@@ -42,6 +42,7 @@ 
 #include <asm/apic.h>
 #include <asm/apicdef.h>
 #include <asm/hypervisor.h>
+#include <asm/kvm_guest.h>
 
 static int kvmapf = 1;
 
@@ -62,6 +63,15 @@  static int parse_no_stealacc(char *arg)
 
 early_param("no-steal-acc", parse_no_stealacc);
 
+static int kvmclock_vsyscall = 1;
+static int parse_no_kvmclock_vsyscall(char *arg)
+{
+        kvmclock_vsyscall = 0;
+        return 0;
+}
+
+early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+
 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
 static int has_steal_clock = 0;
@@ -468,6 +478,10 @@  void __init kvm_guest_init(void)
 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
 
+	if (kvm_para_has_feature(KVM_FEATURE_USERSPACE_CLOCKSOURCE)
+	    && kvmclock_vsyscall)
+		kvm_setup_vsyscall_timeinfo();
+
 #ifdef CONFIG_SMP
 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
 	register_cpu_notifier(&kvm_cpu_notifier);
Index: vsyscall/arch/x86/include/asm/kvm_guest.h
===================================================================
--- /dev/null
+++ vsyscall/arch/x86/include/asm/kvm_guest.h
@@ -0,0 +1,8 @@ 
+#ifndef _ASM_X86_KVM_GUEST_H
+#define _ASM_X86_KVM_GUEST_H
+
+extern bool vsyscall_clock_initializable;
+
+int kvm_setup_vsyscall_timeinfo(void);
+
+#endif /* _ASM_X86_KVM_GUEST_H */