@@ -1519,6 +1519,8 @@ static const struct vex {
{ { 0xdf }, 3, T, R, pfx_66, WIG, Ln }, /* vaeskeygenassist */
{ { 0xf0 }, 3, T, R, pfx_f2, Wn, L0 }, /* rorx */
}, vex_map7[] = {
+ { { 0xf6, 0xc0 }, 6, F, N, pfx_f3, W0, L0 }, /* wrmsrns */
+ { { 0xf6, 0xc0 }, 6, F, N, pfx_f2, W0, L0 }, /* rdmsr */
{ { 0xf8, 0xc0 }, 6, F, N, pfx_f3, W0, L0 }, /* uwrmsr */
{ { 0xf8, 0xc0 }, 6, F, N, pfx_f2, W0, L0 }, /* urdmsr */
};
@@ -1569,6 +1569,30 @@ int main(int argc, char **argv)
(regs.rip != (unsigned long)&instr[0]) ||
(uintr_timer != 0x8877665544332211UL) )
goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing rdmsr $MSR_GS_BASE,%rdx...");
+ instr[0] = 0xc4; instr[1] = 0xe7; instr[2] = 0x7b; instr[3] = 0xf6; instr[4] = 0xc2;
+ *(uint32_t *)&instr[5] = MSR_GS_BASE;
+ regs.rip = (unsigned long)&instr[0];
+ regs.rdx = ~gs_base;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.rip != (unsigned long)&instr[9]) ||
+ (regs.rdx != gs_base) )
+ goto fail;
+ printf("okay\n");
+
+ printf("%-40s", "Testing wrmsrns %rsi,$MSR_SHADOW_GS_BASE...");
+ instr[0] = 0xc4; instr[1] = 0xe7; instr[2] = 0x7a; instr[3] = 0xf6; instr[4] = 0xc6;
+ *(uint32_t *)&instr[5] = MSR_SHADOW_GS_BASE;
+ regs.rip = (unsigned long)&instr[0];
+ regs.rsi = 0x665544332211UL;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.rip != (unsigned long)&instr[9]) ||
+ (gs_base_shadow != 0x665544332211UL) )
+ goto fail;
emulops.write_msr = NULL;
#endif
@@ -88,6 +88,7 @@ bool emul_test_init(void)
cpu_policy.feat.lkgs = true;
cpu_policy.feat.wrmsrns = true;
cpu_policy.feat.msrlist = true;
+ cpu_policy.feat.msr_imm = true;
cpu_policy.feat.user_msr = true;
cpu_policy.extd.clzero = true;
@@ -1262,8 +1262,9 @@ int x86emul_decode(struct x86_emulate_st
case vex_map7:
opcode |= MASK_INSR(7, X86EMUL_OPC_EXT_MASK);
/*
- * No table lookup here for now, as there's only a single
- * opcode point (0xf8) populated in map 7.
+ * No table lookup here for now, as there are only two
+ * (very similar) opcode points (0xf6, 0xf8) populated
+ * in map 7.
*/
d = DstMem | SrcImm | ModRM | Mov;
s->op_bytes = 8;
@@ -600,6 +600,7 @@ amd_like(const struct x86_emulate_ctxt *
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
#define vcpu_has_avx_ifma() (ctxt->cpuid->feat.avx_ifma)
#define vcpu_has_msrlist() (ctxt->cpuid->feat.msrlist)
+#define vcpu_has_msr_imm() (ctxt->cpuid->feat.msr_imm)
#define vcpu_has_avx_vnni_int8() (ctxt->cpuid->feat.avx_vnni_int8)
#define vcpu_has_avx_ne_convert() (ctxt->cpuid->feat.avx_ne_convert)
#define vcpu_has_avx_vnni_int16() (ctxt->cpuid->feat.avx_vnni_int16)
@@ -7037,6 +7037,34 @@ x86_emulate(
state->simd_size = simd_none;
break;
+ case X86EMUL_OPC_VEX_F3(7, 0xf6): /* wrmsrns r64,imm32 */
+ case X86EMUL_OPC_VEX_F2(7, 0xf6): /* rdmsr imm32,r64 */
+ generate_exception_if(!mode_64bit() || ea.type != OP_REG, X86_EXC_UD);
+ generate_exception_if(vex.l || vex.w, X86_EXC_UD);
+ generate_exception_if(vex.opcx && ((modrm_reg & 7) || vex.reg != 0xf),
+ X86_EXC_UD);
+ vcpu_must_have(msr_imm);
+ generate_exception_if(!mode_ring0(), X86_EXC_GP, 0);
+ if ( vex.pfx == vex_f2 )
+ {
+ /* urdmsr */
+ fail_if(!ops->read_msr);
+ if ( (rc = ops->read_msr(imm1, &msr_val, ctxt)) != X86EMUL_OKAY )
+ goto done;
+ dst.val = msr_val;
+ ASSERT(dst.type == OP_REG);
+ dst.bytes = 8;
+ }
+ else
+ {
+ /* wrmsrns */
+ fail_if(!ops->write_msr);
+ if ( (rc = ops->write_msr(imm1, dst.val, ctxt)) != X86EMUL_OKAY )
+ goto done;
+ dst.type = OP_NONE;
+ }
+ break;
+
case X86EMUL_OPC_F3(0x0f38, 0xf8): /* enqcmds r,m512 / uwrmsr r64,r32 */
case X86EMUL_OPC_F2(0x0f38, 0xf8): /* enqcmd r,m512 / urdmsr r32,r64 */
if ( ea.type == OP_MEM )
@@ -343,6 +343,7 @@ XEN_CPUFEATURE(BHI_CTRL, 13*32
XEN_CPUFEATURE(MCDT_NO, 13*32+ 5) /*A MCDT_NO */
/* Intel-defined CPU features, CPUID level 0x00000007:1.ecx, word 14 */
+XEN_CPUFEATURE(MSR_IMM, 14*32+ 5) /*s RDMSR/WRMSRNS with immediate operand */
/* Intel-defined CPU features, CPUID level 0x00000007:1.edx, word 15 */
XEN_CPUFEATURE(AVX_VNNI_INT8, 15*32+ 4) /*A AVX-VNNI-INT8 Instructions */
@@ -275,7 +275,7 @@ def crunch_numbers(state):
# NO_LMSL indicates the absense of Long Mode Segment Limits, which
# have been dropped in hardware.
LM: [CX16, PCID, LAHF_LM, PAGE1GB, PKU, NO_LMSL, AMX_TILE, CMPCCXADD,
- LKGS, MSRLIST, USER_MSR],
+ LKGS, MSRLIST, USER_MSR, MSR_IMM],
# AMD K6-2+ and K6-III processors shipped with 3DNow+, beyond the
# standard 3DNow in the earlier K6 processors.
Encoding-wise these are very similar to URDMSR/UWRMSR, so existing logic is easy to extend. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- RFC only for now, as the VMX part is missing: The existing intercepts can't be re-used unmodified, as those require the MSR index to be fetched from guest ECX. --- v7: New.