@@ -256,5 +256,12 @@ struct kvm_reinject_control {
#define KVM_X86_VCPU_MSRS 1000
#define KVM_X86_VCPU_CPUID 1001
#define KVM_X86_VCPU_LAPIC 1002
+#define KVM_X86_VCPU_NMI 1003
+
+struct kvm_nmi_state {
+ __u8 pending;
+ __u8 masked;
+ __u8 pad1[2];
+};
#endif /* _ASM_X86_KVM_H */
@@ -513,6 +513,8 @@ struct kvm_x86_ops {
unsigned char *hypercall_addr);
void (*set_irq)(struct kvm_vcpu *vcpu);
void (*set_nmi)(struct kvm_vcpu *vcpu);
+ int (*get_nmi_mask)(struct kvm_vcpu *vcpu);
+ void (*set_nmi_mask)(struct kvm_vcpu *vcpu, int masked);
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
@@ -2456,6 +2456,26 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
!(svm->vcpu.arch.hflags & HF_NMI_MASK);
}
+static int svm_get_nmi_mask(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
+}
+
+static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, int masked)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (masked) {
+ svm->vcpu.arch.hflags |= HF_NMI_MASK;
+ svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
+ } else {
+ svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
+ svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
+ }
+}
+
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -2897,6 +2917,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.queue_exception = svm_queue_exception,
.interrupt_allowed = svm_interrupt_allowed,
.nmi_allowed = svm_nmi_allowed,
+ .get_nmi_mask = svm_get_nmi_mask,
+ .set_nmi_mask = svm_set_nmi_mask,
.enable_nmi_window = enable_nmi_window,
.enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept,
@@ -2619,6 +2619,34 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
GUEST_INTR_STATE_NMI));
}
+static int vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
+{
+ if (!cpu_has_virtual_nmis())
+ return to_vmx(vcpu)->soft_vnmi_blocked;
+ else
+ return !!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+ GUEST_INTR_STATE_NMI);
+}
+
+static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, int masked)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!cpu_has_virtual_nmis()) {
+ if (vmx->soft_vnmi_blocked != masked) {
+ vmx->soft_vnmi_blocked = masked;
+ vmx->vnmi_blocked_time = 0;
+ }
+ } else {
+ if (masked)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ else
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ }
+}
+
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
@@ -3957,6 +3985,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.queue_exception = vmx_queue_exception,
.interrupt_allowed = vmx_interrupt_allowed,
.nmi_allowed = vmx_nmi_allowed,
+ .get_nmi_mask = vmx_get_nmi_mask,
+ .set_nmi_mask = vmx_set_nmi_mask,
.enable_nmi_window = enable_nmi_window,
.enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept,
@@ -4674,6 +4674,19 @@ out_free_lapic:
kfree(lapic);
break;
}
+ case KVM_X86_VCPU_NMI: {
+ struct kvm_nmi_state nmi;
+
+ vcpu_load(vcpu);
+ nmi.pending = vcpu->arch.nmi_pending;
+ nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
+ vcpu_put(vcpu);
+ r = -EFAULT;
+ if (copy_to_user(argp, &nmi, sizeof(struct kvm_nmi_state)))
+ break;
+ r = 0;
+ break;
+ }
default:
r = -EINVAL;
}
@@ -4721,6 +4734,19 @@ out_free_lapic:
kfree(lapic);
break;
}
+ case KVM_X86_VCPU_NMI: {
+ struct kvm_nmi_state nmi;
+
+ r = -EFAULT;
+ if (copy_from_user(&nmi, argp, sizeof(struct kvm_nmi_state)))
+ break;
+ vcpu_load(vcpu);
+ vcpu->arch.nmi_pending = nmi.pending;
+ kvm_x86_ops->set_nmi_mask(vcpu, nmi.masked);
+ vcpu_put(vcpu);
+ r = 0;
+ break;
+ }
default:
r = -EINVAL;
}