@@ -153,9 +153,10 @@ MSR_KVM_SYSTEM_TIME: 0x12
MSR_KVM_ASYNC_PF_EN: 0x4b564d02
data: Bits 63-6 hold 64-byte aligned physical address of a 32bit memory
- area which must be in guest RAM. Bits 5-1 are reserved and should be
+ area which must be in guest RAM. Bits 5-2 are reserved and should be
zero. Bit 0 is 1 when asynchronous page faults are enabled on the vcpu
- 0 when disabled.
+ 0 when disabled. Bit 2 is 1 if asynchronous page faults can be injected
+ when vcpu is in kernel mode.
Physical address points to 32 bit memory location that will be written
to by the hypervisor at the time of asynchronous page fault injection to
@@ -420,6 +420,7 @@ struct kvm_vcpu_arch {
struct gfn_to_hva_cache data;
u64 msr_val;
u32 id;
+ bool send_user_only;
} apf;
};
@@ -38,6 +38,7 @@
#define KVM_MAX_MMU_OP_BATCH 32
#define KVM_ASYNC_PF_ENABLED (1 << 0)
+#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
@@ -449,6 +449,9 @@ void __cpuinit kvm_guest_cpu_init(void)
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
u64 pa = __pa(&__get_cpu_var(apf_reason));
+#ifdef CONFIG_PREEMPT
+ pa |= KVM_ASYNC_PF_SEND_ALWAYS;
+#endif
if (native_write_msr_safe(MSR_KVM_ASYNC_PF_EN,
pa | KVM_ASYNC_PF_ENABLED, pa >> 32))
return;
@@ -1429,8 +1429,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
- /* Bits 1:5 are resrved, Should be zero */
- if (data & 0x3e)
+ /* Bits 2:5 are resrved, Should be zero */
+ if (data & 0x3c)
return 1;
vcpu->arch.apf.msr_val = data;
@@ -1444,6 +1444,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
return 1;
+ vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
kvm_async_pf_wakeup_all(vcpu);
return 0;
}