@@ -843,6 +843,9 @@ static const struct {
{ { 0x80 }, { 2, 2 }, T, R, pfx_66 }, /* invept */
{ { 0x81 }, { 2, 2 }, T, R, pfx_66 }, /* invvpid */
{ { 0x82 }, { 2, 2 }, T, R, pfx_66 }, /* invpcid */
+ { { 0x8a }, { 2, 2 }, T, R, pfx_no }, /* movrsb */
+ { { 0x8b }, { 2, 2 }, T, R, pfx_no }, /* movrs{d,q} */
+ { { 0x8b }, { 2, 2 }, T, R, pfx_66 }, /* movrsw */
{ { 0xc8 }, { 2, 2 }, T, R, pfx_no }, /* sha1nexte */
{ { 0xc9 }, { 2, 2 }, T, R, pfx_no }, /* sha1msg1 */
{ { 0xca }, { 2, 2 }, T, R, pfx_no }, /* sha1msg2 */
@@ -1864,6 +1864,29 @@ int main(int argc, char **argv)
}
else
printf("skipped\n");
+
+ {
+ /* For the non-SIMD forms the emulator doesn't itself use MOVRS. */
+ bool movrs = cpu_policy.feat.movrs;
+
+ cpu_policy.feat.movrs = true;
+
+ printf("%-40s", "Testing movrs 6(%rdi),%si...");
+ instr[0] = 0x66; instr[1] = 0x0f; instr[2] = 0x38;
+ instr[3] = 0x8b; instr[4] = 0x77; instr[5] = 0x06;
+ regs.rip = (unsigned long)&instr[0];
+ regs.rsi = 0x8888777766665555UL;
+ regs.rdi = (unsigned long)res;
+ res[1] = 0x88777788U;
+ rc = x86_emulate(&ctxt, &emulops);
+ if ( (rc != X86EMUL_OKAY) ||
+ (regs.eip != (unsigned long)&instr[6]) ||
+ (regs.rsi != 0x8888777766668877UL) )
+ goto fail;
+ printf("okay\n");
+
+ cpu_policy.feat.movrs = movrs;
+ }
#endif /* x86-64 */
printf("%-40s", "Testing shld $1,%ecx,(%edx)...");
@@ -901,7 +901,8 @@ decode_0f38(struct x86_emulate_state *s,
{
switch ( ctxt->opcode & X86EMUL_OPC_MASK )
{
- case 0x00 ... 0xef:
+ case 0x00 ... 0x89:
+ case 0x8c ... 0xef:
case 0xf2 ... 0xf5:
case 0xf7:
case 0xfa ... 0xff:
@@ -912,6 +913,13 @@ decode_0f38(struct x86_emulate_state *s,
ctxt->opcode |= MASK_INSR(s->vex.pfx, X86EMUL_OPC_PFX_MASK);
break;
+ case 0x8a ... 0x8b: /* movrs */
+ s->desc = DstReg | SrcMem | Mov;
+ if ( !(ctxt->opcode & 1) )
+ s->desc |= ByteOp;
+ s->simd_size = simd_none;
+ break;
+
case X86EMUL_OPC_VEX_66(0, 0x2d): /* vmaskmovpd */
s->simd_size = simd_packed_fp;
break;
@@ -600,6 +600,7 @@ amd_like(const struct x86_emulate_ctxt *
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
#define vcpu_has_avx_ifma() (ctxt->cpuid->feat.avx_ifma)
#define vcpu_has_msrlist() (ctxt->cpuid->feat.msrlist)
+#define vcpu_has_movrs() (ctxt->cpuid->feat.movrs)
#define vcpu_has_msr_imm() (ctxt->cpuid->feat.msr_imm)
#define vcpu_has_avx_vnni_int8() (ctxt->cpuid->feat.avx_vnni_int8)
#define vcpu_has_avx_ne_convert() (ctxt->cpuid->feat.avx_ne_convert)
@@ -6336,6 +6336,16 @@ x86_emulate(
fault_suppression = false;
goto avx512f_no_sae;
+#endif /* !X86EMUL_NO_SIMD */
+
+ case X86EMUL_OPC(0x0f38, 0x8a)
+ ... X86EMUL_OPC(0x0f38, 0x8b): /* movrs */
+ vcpu_must_have(movrs);
+ dst.val = src.val;
+ break;
+
+#ifndef X86EMUL_NO_SIMD
+
case X86EMUL_OPC_VEX_66(0x0f38, 0x8c): /* vpmaskmov{d,q} mem,{x,y}mm,{x,y}mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0x8e): /* vpmaskmov{d,q} {x,y}mm,{x,y}mm,mem */
generate_exception_if(ea.type != OP_MEM, X86_EXC_UD);
@@ -314,6 +314,7 @@ XEN_CPUFEATURE(NMI_SRC, 10*32+20) /
XEN_CPUFEATURE(AMX_FP16, 10*32+21) /* AMX FP16 instruction */
XEN_CPUFEATURE(AVX_IFMA, 10*32+23) /*A AVX-IFMA Instructions */
XEN_CPUFEATURE(MSRLIST, 10*32+27) /*s MSR list instructions */
+XEN_CPUFEATURE(MOVRS, 10*32+31) /*a MOV-read-shared instructions */
/* AMD-defined CPU features, CPUID level 0x80000021.eax, word 11 */
XEN_CPUFEATURE(NO_NEST_BP, 11*32+ 0) /*A No Nested Data Breakpoints */
@@ -275,7 +275,7 @@ def crunch_numbers(state):
# NO_LMSL indicates the absense of Long Mode Segment Limits, which
# have been dropped in hardware.
LM: [CX16, PCID, LAHF_LM, PAGE1GB, PKU, NO_LMSL, AMX_TILE, CMPCCXADD,
- LKGS, MSRLIST, USER_MSR, MSR_IMM],
+ LKGS, MSRLIST, USER_MSR, MSR_IMM, MOVRS],
# AMD K6-2+ and K6-III processors shipped with 3DNow+, beyond the
# standard 3DNow in the earlier K6 processors.
As we ignore cachability aspects of insns, they're treated like simple MOVs. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- v7: New.