@@ -367,6 +367,7 @@ struct kvm_vcpu_arch {
cpumask_var_t wbinvd_dirty_mask;
u32 __user *apf_data;
+ bool apf_send_user_only;
u32 apf_memslot_ver;
u64 apf_msr_val;
u32 async_pf_id;
@@ -38,6 +38,7 @@
#define KVM_MAX_MMU_OP_BATCH 32
#define KVM_ASYNC_PF_ENABLED (1 << 0)
+#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
@@ -429,6 +429,9 @@ void __cpuinit kvm_guest_cpu_init(void)
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) {
u64 pa = __pa(&__get_cpu_var(apf_reason));
+#ifdef CONFIG_PREEMPT
+ pa |= KVM_ASYNC_PF_SEND_ALWAYS;
+#endif
if (native_write_msr_safe(MSR_KVM_ASYNC_PF_EN,
pa | KVM_ASYNC_PF_ENABLED, pa >> 32))
return;
@@ -1222,8 +1222,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
int offset = offset_in_page(gpa);
unsigned long addr;
- /* Bits 1:5 are resrved, Should be zero */
- if (data & 0x3e)
+ /* Bits 2:5 are resrved, Should be zero */
+ if (data & 0x3c)
return 1;
vcpu->arch.apf_msr_val = data;
@@ -1246,6 +1246,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 1;
}
vcpu->arch.apf_memslot_ver = vcpu->kvm->memslot_version;
+ vcpu->arch.apf_send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
kvm_async_pf_wakeup_all(vcpu);
return 0;
}