@@ -1256,6 +1256,7 @@ struct kvm_x86_ops {
u64 (*fault_gla)(struct kvm_vcpu *vcpu);
bool (*spt_fault)(struct kvm_vcpu *vcpu);
+ bool (*gpt_translation_fault)(struct kvm_vcpu *vcpu);
};
struct kvm_arch_async_pf {
@@ -7366,6 +7366,17 @@ static bool svm_spt_fault(struct kvm_vcpu *vcpu)
return (vmcb->control.exit_code == SVM_EXIT_NPF);
}
+static bool svm_gpt_translation_fault(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb *vmcb = get_host_vmcb(svm);
+
+ if (vmcb->control.exit_info_1 & PFERR_GUEST_PAGE_MASK)
+ return true;
+
+ return false;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -7513,6 +7524,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.fault_gla = svm_fault_gla,
.spt_fault = svm_spt_fault,
+ .gpt_translation_fault = svm_gpt_translation_fault,
};
static int __init svm_init(void)
@@ -7830,6 +7830,13 @@ static bool vmx_spt_fault(struct kvm_vcpu *vcpu)
return (vmx->exit_reason == EXIT_REASON_EPT_VIOLATION);
}
+static bool vmx_gpt_translation_fault(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.exit_qualification & EPT_VIOLATION_GVA_TRANSLATED)
+ return false;
+ return true;
+}
+
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -7990,6 +7997,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.fault_gla = vmx_fault_gla,
.spt_fault = vmx_spt_fault,
+ .gpt_translation_fault = vmx_gpt_translation_fault,
};
static void vmx_cleanup_l1d_flush(void)