@@ -646,6 +646,8 @@ static const struct test avx512_fp16_all
INSN(fnmsub231sh, 66, map6, bf, el, fp16, el),
INSN(fpclassph, , 0f3a, 66, vl, fp16, vl),
INSN(fpclasssh, , 0f3a, 67, el, fp16, el),
+ INSN(getexpph, 66, map6, 42, vl, fp16, vl),
+ INSN(getexpsh, 66, map6, 43, el, fp16, el),
INSN(getmantph, , 0f3a, 26, vl, fp16, vl),
INSN(getmantsh, , 0f3a, 27, el, fp16, el),
INSN(maxph, , map5, 5f, vl, fp16, vl),
@@ -656,10 +658,16 @@ static const struct test avx512_fp16_all
INSN(movsh, f3, map5, 11, el, fp16, el),
INSN(mulph, , map5, 59, vl, fp16, vl),
INSN(mulsh, f3, map5, 59, el, fp16, el),
+ INSN(rcpph, 66, map6, 4c, vl, fp16, vl),
+ INSN(rcpsh, 66, map6, 4d, el, fp16, el),
INSN(reduceph, , 0f3a, 56, vl, fp16, vl),
INSN(reducesh, , 0f3a, 57, el, fp16, el),
INSN(rndscaleph, , 0f3a, 08, vl, fp16, vl),
INSN(rndscalesh, , 0f3a, 0a, el, fp16, el),
+ INSN(rsqrtph, 66, map6, 4e, vl, fp16, vl),
+ INSN(rsqrtsh, 66, map6, 4f, el, fp16, el),
+ INSN(scalefph, 66, map6, 2c, vl, fp16, vl),
+ INSN(scalefsh, 66, map6, 2d, el, fp16, el),
INSN(sqrtph, , map5, 51, vl, fp16, vl),
INSN(sqrtsh, f3, map5, 51, el, fp16, el),
INSN(subph, , map5, 5c, vl, fp16, vl),
@@ -2050,6 +2050,14 @@ static const struct evex {
{ { 0x6e }, 2, T, R, pfx_66, WIG, L0 }, /* vmovw */
{ { 0x7e }, 2, T, W, pfx_66, WIG, L0 }, /* vmovw */
}, evex_map6[] = {
+ { { 0x2c }, 2, T, R, pfx_66, W0, Ln }, /* vscalefph */
+ { { 0x2d }, 2, T, R, pfx_66, W0, LIG }, /* vscalefsh */
+ { { 0x42 }, 2, T, R, pfx_66, W0, Ln }, /* vgetexpph */
+ { { 0x43 }, 2, T, R, pfx_66, W0, LIG }, /* vgetexpsh */
+ { { 0x4c }, 2, T, R, pfx_66, W0, Ln }, /* vrcpph */
+ { { 0x4d }, 2, T, R, pfx_66, W0, LIG }, /* vrcpsh */
+ { { 0x4e }, 2, T, R, pfx_66, W0, Ln }, /* vrsqrtph */
+ { { 0x4f }, 2, T, R, pfx_66, W0, LIG }, /* vrsqrtsh */
{ { 0x96 }, 2, T, R, pfx_66, W0, Ln }, /* vfmaddsub132ph */
{ { 0x97 }, 2, T, R, pfx_66, W0, Ln }, /* vfmsubadd132ph */
{ { 0x98 }, 2, T, R, pfx_66, W0, Ln }, /* vfmadd132ph */
@@ -358,7 +358,7 @@ static const struct ext0f38_table {
[0x2a] = { .simd_size = simd_packed_int, .two_op = 1, .d8s = d8s_vl },
[0x2b] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
[0x2c] = { .simd_size = simd_packed_fp, .d8s = d8s_vl },
- [0x2d] = { .simd_size = simd_packed_fp, .d8s = d8s_dq },
+ [0x2d] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
[0x2e ... 0x2f] = { .simd_size = simd_packed_fp, .to_mem = 1 },
[0x30] = { .simd_size = simd_other, .two_op = 1, .d8s = d8s_vl_by_2 },
[0x31] = { .simd_size = simd_other, .two_op = 1, .d8s = d8s_vl_by_4 },
@@ -909,8 +909,8 @@ decode_0f38(struct x86_emulate_state *s,
ctxt->opcode |= MASK_INSR(s->vex.pfx, X86EMUL_OPC_PFX_MASK);
break;
- case X86EMUL_OPC_EVEX_66(0, 0x2d): /* vscalefs{s,d} */
- s->simd_size = simd_scalar_vexw;
+ case X86EMUL_OPC_VEX_66(0, 0x2d): /* vmaskmovpd */
+ s->simd_size = simd_packed_fp;
break;
case X86EMUL_OPC_EVEX_66(0, 0x7a): /* vpbroadcastb */
@@ -7780,6 +7780,8 @@ x86_emulate(
generate_exception_if(evex.w, EXC_UD);
goto avx512f_all_fp;
+ case X86EMUL_OPC_EVEX_66(6, 0x2c): /* vscalefph [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ case X86EMUL_OPC_EVEX_66(6, 0x42): /* vgetexpph [xyz]mm/mem,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(6, 0x96): /* vfmaddsub132ph [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(6, 0x97): /* vfmsubadd132ph [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(6, 0x98): /* vfmadd132ph [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
@@ -7804,6 +7806,8 @@ x86_emulate(
avx512_vlen_check(false);
goto simd_zmm;
+ case X86EMUL_OPC_EVEX_66(6, 0x2d): /* vscalefsh xmm/m16,xmm,xmm{k} */
+ case X86EMUL_OPC_EVEX_66(6, 0x43): /* vgetexpsh xmm/m16,xmm,xmm{k} */
case X86EMUL_OPC_EVEX_66(6, 0x99): /* vfmadd132sh xmm/m16,xmm,xmm{k} */
case X86EMUL_OPC_EVEX_66(6, 0x9b): /* vfmsub132sh xmm/m16,xmm,xmm{k} */
case X86EMUL_OPC_EVEX_66(6, 0x9d): /* vfnmadd132sh xmm/m16,xmm,xmm{k} */
@@ -7823,6 +7827,19 @@ x86_emulate(
avx512_vlen_check(true);
goto simd_zmm;
+ case X86EMUL_OPC_EVEX_66(6, 0x4c): /* vrcpph [xyz]mm/mem,[xyz]mm{k} */
+ case X86EMUL_OPC_EVEX_66(6, 0x4e): /* vrsqrtph [xyz]mm/mem,[xyz]mm{k} */
+ host_and_vcpu_must_have(avx512_fp16);
+ generate_exception_if(evex.w, EXC_UD);
+ goto avx512f_no_sae;
+
+ case X86EMUL_OPC_EVEX_66(6, 0x4d): /* vrcpsh xmm/m16,xmm,xmm{k} */
+ case X86EMUL_OPC_EVEX_66(6, 0x4f): /* vrsqrtsh xmm/m16,xmm,xmm{k} */
+ host_and_vcpu_must_have(avx512_fp16);
+ generate_exception_if(evex.w || evex.brs, EXC_UD);
+ avx512_vlen_check(true);
+ goto simd_zmm;
+
case X86EMUL_OPC_XOP(08, 0x85): /* vpmacssww xmm,xmm/m128,xmm,xmm */
case X86EMUL_OPC_XOP(08, 0x86): /* vpmacsswd xmm,xmm/m128,xmm,xmm */
case X86EMUL_OPC_XOP(08, 0x87): /* vpmacssdql xmm,xmm/m128,xmm,xmm */
While, as before, this leverages that the Map6 encoding space is a very sparse clone of the "0f38" one, switch around the simd_size overriding for opcode 2D. This way less separate overrides are needed. Signed-off-by: Jan Beulich <jbeulich@suse.com>