@@ -3621,13 +3621,28 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
- return !!(vcpu->arch.hflags & HF_NMI_MASK);
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (is_vnmi_enabled(svm))
+ return is_vnmi_mask_set(svm);
+ else
+ return !!(vcpu->arch.hflags & HF_NMI_MASK);
}
static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ if (is_vnmi_enabled(svm)) {
+ if (is_smm(vcpu)) {
+ if (masked)
+ set_vnmi_mask(svm);
+ else
+ clear_vnmi_mask(svm);
+ }
+ return;
+ }
+
if (masked) {
vcpu->arch.hflags |= HF_NMI_MASK;
if (!sev_es_guest(vcpu->kvm))
@@ -35,6 +35,7 @@ extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
extern int vgif;
extern bool intercept_smi;
+extern bool vnmi;
enum avic_modes {
AVIC_MODE_NONE = 0,
@@ -532,6 +533,57 @@ static inline bool is_x2apic_msrpm_offset(u32 offset)
(msr < (APIC_BASE_MSR + 0x100));
}
+static inline struct vmcb *get_vnmi_vmcb(struct vcpu_svm *svm)
+{
+ if (!vnmi)
+ return NULL;
+
+ if (is_guest_mode(&svm->vcpu))
+ return svm->nested.vmcb02.ptr;
+ else
+ return svm->vmcb01.ptr;
+}
+
+static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
+{
+ struct vmcb *vmcb = get_vnmi_vmcb(svm);
+
+ if (vmcb)
+ return !!(vmcb->control.int_ctl & V_NMI_ENABLE);
+ else
+ return false;
+}
+
+static inline bool is_vnmi_mask_set(struct vcpu_svm *svm)
+{
+ struct vmcb *vmcb = get_vnmi_vmcb(svm);
+
+ if (vmcb)
+ return !!(vmcb->control.int_ctl & V_NMI_MASK);
+ else
+ return false;
+}
+
+static inline void set_vnmi_mask(struct vcpu_svm *svm)
+{
+ struct vmcb *vmcb = get_vnmi_vmcb(svm);
+
+ if (vmcb)
+ vmcb->control.int_ctl |= V_NMI_MASK;
+ else
+ svm->vcpu.arch.hflags |= HF_GIF_MASK;
+}
+
+static inline void clear_vnmi_mask(struct vcpu_svm *svm)
+{
+ struct vmcb *vmcb = get_vnmi_vmcb(svm);
+
+ if (vmcb)
+ vmcb->control.int_ctl &= ~V_NMI_MASK;
+ else
+ svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
+}
+
/* svm.c */
#define MSR_INVALID 0xffffffffU
VMCB intr_ctrl bit12 (V_NMI_MASK) is set by the processor when handling NMI in guest and is cleared after the NMI is handled. Treat V_NMI_MASK as read-only in the hypervisor except for the SMM case where hypervisor before entring and after leaving SMM mode requires to set and unset V_NMI_MASK. Adding API(get_vnmi_vmcb) in order to return the correct vmcb for L1 or L2, and also API(clear/set_vnmi_mask) to clear and set mask. Signed-off-by: Santosh Shukla <santosh.shukla@amd.com> --- v3: * Handle SMM case * Added set/clear_vnmi_mask() API. v2: - Added get_vnmi_vmcb API to return vmcb for l1 and l2. - Use get_vnmi_vmcb to get correct vmcb in func - is_vnmi_enabled/_mask_set() - removed vnmi check from is_vnmi_enabled() func. arch/x86/kvm/svm/svm.c | 17 +++++++++++++- arch/x86/kvm/svm/svm.h | 52 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 1 deletion(-)