@@ -23,6 +23,7 @@
#define KVM_FEATURE_ASYNC_PF 4
#define KVM_FEATURE_STEAL_TIME 5
#define KVM_FEATURE_PV_EOI 6
+#define KVM_FEATURE_VCPU_STATE 7
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.
@@ -39,6 +40,7 @@
#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
#define MSR_KVM_STEAL_TIME 0x4b564d03
#define MSR_KVM_PV_EOI_EN 0x4b564d04
+#define MSR_KVM_VCPU_STATE 0x4b564d05
struct kvm_steal_time {
__u64 steal;
@@ -51,6 +53,17 @@ struct kvm_steal_time {
#define KVM_STEAL_VALID_BITS ((-1ULL << (KVM_STEAL_ALIGNMENT_BITS + 1)))
#define KVM_STEAL_RESERVED_MASK (((1 << KVM_STEAL_ALIGNMENT_BITS) - 1 ) << 1)
+struct kvm_vcpu_state {
+ __u64 state;
+ __u32 pad[14];
+};
+/* bits in vcpu_state->state */
+#define KVM_VCPU_STATE_IN_GUEST_MODE 0
+#define KVM_VCPU_STATE_SHOULD_FLUSH 1
+
+#define KVM_VCPU_STATE_ALIGN_BITS 5
+#define KVM_VCPU_STATE_VALID_BITS ((-1ULL << (KVM_VCPU_STATE_ALIGN_BITS + 1)))
+
#define KVM_MAX_MMU_OP_BATCH 32
#define KVM_ASYNC_PF_ENABLED (1 << 0)
@@ -66,6 +66,9 @@ static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
+DEFINE_PER_CPU(struct kvm_vcpu_state, vcpu_state) __aligned(64);
+static int has_vcpu_state;
+
/*
* No need for any "IO delay" on KVM
*/
@@ -302,6 +305,22 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
apic_write(APIC_EOI, APIC_EOI_ACK);
}
+static void kvm_register_vcpu_state(void)
+{
+ int cpu = smp_processor_id();
+ struct kvm_vcpu_state *v_state;
+
+ if (!has_vcpu_state)
+ return;
+
+ v_state = &per_cpu(vcpu_state, cpu);
+ memset(v_state, 0, sizeof(*v_state));
+
+ wrmsrl(MSR_KVM_VCPU_STATE, (__pa(v_state) | KVM_MSR_ENABLED));
+ printk(KERN_INFO "kvm-vcpustate: cpu %d, msr %lx\n",
+ cpu, __pa(v_state));
+}
+
void __cpuinit kvm_guest_cpu_init(void)
{
if (!kvm_para_available())
@@ -330,6 +349,9 @@ void __cpuinit kvm_guest_cpu_init(void)
if (has_steal_clock)
kvm_register_steal_time();
+
+ if (has_vcpu_state)
+ kvm_register_vcpu_state();
}
static void kvm_pv_disable_apf(void)
@@ -393,6 +415,14 @@ void kvm_disable_steal_time(void)
wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}
+void kvm_disable_vcpu_state(void)
+{
+ if (!has_vcpu_state)
+ return;
+
+ wrmsr(MSR_KVM_VCPU_STATE, 0, 0);
+}
+
#ifdef CONFIG_SMP
static void __init kvm_smp_prepare_boot_cpu(void)
{
@@ -410,6 +440,7 @@ static void __cpuinit kvm_guest_cpu_online(void *dummy)
static void kvm_guest_cpu_offline(void *dummy)
{
+ kvm_disable_vcpu_state();
kvm_disable_steal_time();
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
wrmsrl(MSR_KVM_PV_EOI_EN, 0);
@@ -469,6 +500,11 @@ void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write);
+#ifdef CONFIG_PARAVIRT_TLB_FLUSH
+ if (kvm_para_has_feature(KVM_FEATURE_VCPU_STATE))
+ has_vcpu_state = 1;
+#endif
+
#ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
register_cpu_notifier(&kvm_cpu_notifier);