@@ -224,6 +224,8 @@ typedef enum X86Seg {
#define CR0_NE_MASK (1U << 5)
#define CR0_WP_MASK (1U << 16)
#define CR0_AM_MASK (1U << 18)
+#define CR0_NW_MASK (1U << 29)
+#define CR0_CD_MASK (1U << 30)
#define CR0_PG_MASK (1U << 31)
#define CR4_VME_MASK (1U << 0)
@@ -136,6 +136,7 @@
#define SVM_NPTEXIT_GPT (1ULL << 33)
#define SVM_VMRUN_INTERCEPT (1ULL << 32)
+#define SVM_CR0_RESERVED_MASK 0xffffffff00000000U
struct QEMU_PACKED vmcb_control_area {
uint16_t intercept_cr_read;
@@ -73,6 +73,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
uint32_t event_inj;
uint32_t int_ctl;
uint32_t asid;
+ uint64_t new_cr0;
cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
@@ -192,13 +193,18 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
save.idtr.limit));
+ new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
+ if (new_cr0 & SVM_CR0_RESERVED_MASK) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+ if ((new_cr0 & CR0_NW_MASK) && !(new_cr0 & CR0_CD_MASK)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
/* clear exit_info_2 so we behave like the real hardware */
x86_stq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
- cpu_x86_update_cr0(env, x86_ldq_phys(cs,
- env->vm_vmcb + offsetof(struct vmcb,
- save.cr0)));
+ cpu_x86_update_cr0(env, new_cr0);
cpu_x86_update_cr4(env, x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb,
save.cr4)));
The combination of unset CD and set NW bit in CR0 is illegal. CR0[63:32] are also reserved and need to be zero. (AMD64 Architecture Programmer's Manual, V2, 15.5) Signed-off-by: Lara Lazier <laramglazier@gmail.com> --- target/i386/cpu.h | 2 ++ target/i386/svm.h | 1 + target/i386/tcg/sysemu/svm_helper.c | 12 +++++++++--- 3 files changed, 12 insertions(+), 3 deletions(-)