@@ -1564,6 +1564,9 @@ static void skip_emulated_instruction(st
vmx_set_interrupt_shadow(vcpu, 0);
}
+static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr,
+ bool has_error_code, u32 error_code);
+
static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
@@ -1571,6 +1574,9 @@ static void vmx_queue_exception(struct k
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK;
+ if (nested_vmx_check_exception(vcpu, nr, has_error_code, error_code))
+ return;
+
if (has_error_code) {
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
@@ -3670,6 +3676,9 @@ static void vmx_inject_nmi(struct kvm_vc
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (vmx->nested.nested_mode)
+ return;
+
if (!cpu_has_virtual_nmis()) {
/*
* Tracking the NMI-blocked state in software is built upon
@@ -6513,6 +6522,26 @@ static int nested_vmx_vmexit(struct kvm_
return 0;
}
+static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr,
+ bool has_error_code, u32 error_code)
+{
+ if (!to_vmx(vcpu)->nested.nested_mode)
+ return 0;
+ if (!nested_vmx_exit_handled(vcpu, false))
+ return 0;
+ nested_vmx_vmexit(vcpu, false);
+ if (!nested_map_current(vcpu))
+ return 1;
+ get_shadow_vmcs(vcpu)->vm_exit_reason = EXIT_REASON_EXCEPTION_NMI;
+ get_shadow_vmcs(vcpu)->vm_exit_intr_info = (nr
+ | INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK
+ | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0));
+ if (has_error_code)
+ get_shadow_vmcs(vcpu)->vm_exit_intr_error_code = error_code;
+ nested_unmap_current(vcpu);
+ return 1;
+}
+
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,