diff mbox series

[PULL,09/15] target/i386: Added consistency checks for EFER

Message ID 20210722153612.955537-10-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show
Series [PULL,01/15] qemu-config: never call the callback after an error, fix leak | expand

Commit Message

Paolo Bonzini July 22, 2021, 3:36 p.m. UTC
From: Lara Lazier <laramglazier@gmail.com>

EFER.SVME has to be set, and EFER reserved bits must
be zero.
In addition the combinations
 * EFER.LMA or EFER.LME is non-zero and the processor does not support LM
 * non-zero EFER.LME and CR0.PG and zero CR4.PAE
 * non-zero EFER.LME and CR0.PG and zero CR0.PE
 * non-zero EFER.LME, CR0.PG, CR4.PAE, CS.L and CS.D
are all invalid.
(AMD64 Architecture Programmer's Manual, V2, 15.5)

Signed-off-by: Lara Lazier <laramglazier@gmail.com>
Message-Id: <20210721152651.14683-3-laramglazier@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 target/i386/cpu.h                   |  5 ++++
 target/i386/tcg/sysemu/svm_helper.c | 39 +++++++++++++++++++++++++++++
 2 files changed, 44 insertions(+)
diff mbox series

Patch

diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 1f7e8d7f0a..6c50d3ab4f 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -475,6 +475,11 @@  typedef enum X86Seg {
 #define MSR_EFER_SVME  (1 << 12)
 #define MSR_EFER_FFXSR (1 << 14)
 
+#define MSR_EFER_RESERVED\
+        (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
+            | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
+            | MSR_EFER_FFXSR))
+
 #define MSR_STAR                        0xc0000081
 #define MSR_LSTAR                       0xc0000082
 #define MSR_CSTAR                       0xc0000083
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index d7d7a86aa9..4d64ec378e 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -75,6 +75,41 @@  static inline bool ctl_has_irq(uint32_t int_ctl)
     return (int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
 }
 
+static inline bool is_efer_invalid_state (CPUX86State *env)
+{
+    if (!(env->efer & MSR_EFER_SVME)) {
+        return true;
+    }
+
+    if (env->efer & MSR_EFER_RESERVED) {
+        return true;
+    }
+
+    if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
+            !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
+        return true;
+    }
+
+    if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+                                && !(env->cr[4] & CR4_PAE_MASK)) {
+        return true;
+    }
+
+    if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+                                && !(env->cr[0] & CR0_PE_MASK)) {
+        return true;
+    }
+
+    if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+                                && (env->cr[4] & CR4_PAE_MASK)
+                                && (env->segs[R_CS].flags & DESC_L_MASK)
+                                && (env->segs[R_CS].flags & DESC_B_MASK)) {
+        return true;
+    }
+
+    return false;
+}
+
 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
 {
     CPUState *cs = env_cpu(env);
@@ -291,6 +326,10 @@  void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
     }
 #endif
 
+    if (is_efer_invalid_state(env)) {
+        cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+    }
+
     switch (x86_ldub_phys(cs,
                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
     case TLB_CONTROL_DO_NOTHING: