@@ -134,6 +134,7 @@ KVM_X86_OP(msr_filter_changed)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
+KVM_X86_OP(fault_gva)
#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
@@ -1750,6 +1750,8 @@ struct kvm_x86_ops {
* Returns vCPU specific APICv inhibit reasons
*/
unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu);
+
+ u64 (*fault_gva)(struct kvm_vcpu *vcpu);
};
struct kvm_x86_nested_ops {
@@ -4906,6 +4906,13 @@ static int svm_vm_init(struct kvm *kvm)
return 0;
}
+static u64 svm_fault_gva(struct kvm_vcpu *vcpu)
+{
+ const struct vcpu_svm *svm = to_svm(vcpu);
+
+ return svm->vcpu.arch.cr2 ? svm->vcpu.arch.cr2 : ~0ull;
+}
+
static struct kvm_x86_ops svm_x86_ops __initdata = {
.name = KBUILD_MODNAME,
@@ -5037,6 +5044,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
.vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
+
+ .fault_gva = svm_fault_gva,
};
/*
@@ -8233,6 +8233,14 @@ static void vmx_vm_destroy(struct kvm *kvm)
free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm));
}
+static u64 vmx_fault_gva(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.exit_qualification & EPT_VIOLATION_GVA_IS_VALID)
+ return vmcs_readl(GUEST_LINEAR_ADDRESS);
+
+ return ~0ull;
+}
+
static struct kvm_x86_ops vmx_x86_ops __initdata = {
.name = KBUILD_MODNAME,
@@ -8373,6 +8381,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.complete_emulated_msr = kvm_complete_insn_gp,
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
+
+ .fault_gva = vmx_fault_gva,
};
static unsigned int vmx_handle_intel_pt_intr(void)