@@ -1290,6 +1290,7 @@ struct kvm_x86_ops {
int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
u64 (*fault_gla)(struct kvm_vcpu *vcpu);
+ bool (*spt_fault)(struct kvm_vcpu *vcpu);
};
struct kvm_arch_async_pf {
@@ -7529,6 +7529,14 @@ static u64 svm_fault_gla(struct kvm_vcpu *vcpu)
return svm->vcpu.arch.cr2 ? svm->vcpu.arch.cr2 : ~0ull;
}
+static bool svm_spt_fault(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb *vmcb = get_host_vmcb(svm);
+
+ return (vmcb->control.exit_code == SVM_EXIT_NPF);
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -7679,6 +7687,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
.fault_gla = svm_fault_gla,
+ .spt_fault = svm_spt_fault,
};
static int __init svm_init(void)
@@ -7914,6 +7914,13 @@ static u64 vmx_fault_gla(struct kvm_vcpu *vcpu)
return ~0ull;
}
+static bool vmx_spt_fault(struct kvm_vcpu *vcpu)
+{
+ const struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ return (vmx->exit_reason == EXIT_REASON_EPT_VIOLATION);
+}
+
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -8076,6 +8083,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
.fault_gla = vmx_fault_gla,
+ .spt_fault = vmx_spt_fault,
};
static void vmx_cleanup_l1d_flush(void)