@@ -1004,6 +1004,8 @@ struct kvm_x86_ops {
bool (*has_emulated_msr)(int index);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
+ bool (*nested_pagefault)(struct kvm_vcpu *vcpu);
+
struct kvm *(*vm_alloc)(void);
void (*vm_free)(struct kvm *);
int (*vm_init)(struct kvm *kvm);
@@ -1593,4 +1595,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
#define put_smstate(type, buf, offset, val) \
*(type *)((buf) + (offset) - 0x7e00) = val
+bool kvm_mmu_nested_pagefault(struct kvm_vcpu *vcpu);
+
#endif /* _ASM_X86_KVM_HOST_H */
@@ -6169,3 +6169,8 @@ void kvm_mmu_module_exit(void)
unregister_shrinker(&mmu_shrinker);
mmu_audit_disable();
}
+
+bool kvm_mmu_nested_pagefault(struct kvm_vcpu *vcpu)
+{
+ return kvm_x86_ops->nested_pagefault(vcpu);
+}
@@ -7098,6 +7098,11 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
return -ENODEV;
}
+static bool svm_nested_pagefault(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -7109,6 +7114,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
.has_emulated_msr = svm_has_emulated_msr,
+ .nested_pagefault = svm_nested_pagefault,
+
.vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu,
.vcpu_reset = svm_vcpu_reset,
@@ -7682,6 +7682,13 @@ static __exit void hardware_unsetup(void)
free_kvm_area();
}
+static bool vmx_nested_pagefault(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.exit_qualification & EPT_VIOLATION_GVA_TRANSLATED)
+ return false;
+ return true;
+}
+
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -7693,6 +7700,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_accelerated_tpr = report_flexpriority,
.has_emulated_msr = vmx_has_emulated_msr,
+ .nested_pagefault = vmx_nested_pagefault,
+
.vm_init = vmx_vm_init,
.vm_alloc = vmx_vm_alloc,
.vm_free = vmx_vm_free,