@@ -558,6 +558,14 @@ static const struct test avx512_vbmi_all
static const struct test avx512_vbmi2_all[] = {
INSN(pcompress, 66, 0f38, 63, vl, bw, el),
INSN(pexpand, 66, 0f38, 62, vl, bw, el),
+ INSN(pshld, 66, 0f3a, 71, vl, dq, vl),
+ INSN(pshldv, 66, 0f38, 71, vl, dq, vl),
+ INSN(pshldvw, 66, 0f38, 70, vl, w, vl),
+ INSN(pshldw, 66, 0f3a, 70, vl, w, vl),
+ INSN(pshrd, 66, 0f3a, 73, vl, dq, vl),
+ INSN(pshrdv, 66, 0f38, 73, vl, dq, vl),
+ INSN(pshrdvw, 66, 0f38, 72, vl, w, vl),
+ INSN(pshrdw, 66, 0f3a, 72, vl, w, vl),
};
static const struct test avx512_vpopcntdq_all[] = {
@@ -487,6 +487,7 @@ static const struct ext0f38_table {
[0x62] = { .simd_size = simd_packed_int, .two_op = 1, .d8s = d8s_bw },
[0x63] = { .simd_size = simd_packed_int, .to_mem = 1, .two_op = 1, .d8s = d8s_bw },
[0x64 ... 0x66] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
+ [0x70 ... 0x73] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
[0x75 ... 0x76] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
[0x77] = { .simd_size = simd_packed_fp, .d8s = d8s_vl },
[0x78] = { .simd_size = simd_other, .two_op = 1 },
@@ -611,6 +612,7 @@ static const struct ext0f3a_table {
[0x6a ... 0x6b] = { .simd_size = simd_scalar_opc, .four_op = 1 },
[0x6c ... 0x6d] = { .simd_size = simd_packed_fp, .four_op = 1 },
[0x6e ... 0x6f] = { .simd_size = simd_scalar_opc, .four_op = 1 },
+ [0x70 ... 0x73] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
[0x78 ... 0x79] = { .simd_size = simd_packed_fp, .four_op = 1 },
[0x7a ... 0x7b] = { .simd_size = simd_scalar_opc, .four_op = 1 },
[0x7c ... 0x7d] = { .simd_size = simd_packed_fp, .four_op = 1 },
@@ -8993,6 +8995,16 @@ x86_emulate(
}
goto simd_zmm;
+ case X86EMUL_OPC_EVEX_66(0x0f38, 0x70): /* vpshldvw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ case X86EMUL_OPC_EVEX_66(0x0f38, 0x72): /* vpshrdvw [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ generate_exception_if(!evex.w, EXC_UD);
+ elem_bytes = 2;
+ /* fall through */
+ case X86EMUL_OPC_EVEX_66(0x0f38, 0x71): /* vpshldv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ case X86EMUL_OPC_EVEX_66(0x0f38, 0x73): /* vpshrdv{d,q} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ host_and_vcpu_must_have(avx512_vbmi2);
+ goto avx512f_no_sae;
+
case X86EMUL_OPC_EVEX_66(0x0f38, 0x75): /* vpermi2{b,w} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(0x0f38, 0x7d): /* vpermt2{b,w} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
case X86EMUL_OPC_EVEX_66(0x0f38, 0x8d): /* vperm{b,w} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
@@ -10293,6 +10305,16 @@ x86_emulate(
avx512_vlen_check(true);
goto simd_imm8_zmm;
+ case X86EMUL_OPC_EVEX_66(0x0f3a, 0x70): /* vpshldw $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ case X86EMUL_OPC_EVEX_66(0x0f3a, 0x72): /* vpshrdw $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ generate_exception_if(!evex.w, EXC_UD);
+ elem_bytes = 2;
+ /* fall through */
+ case X86EMUL_OPC_EVEX_66(0x0f3a, 0x71): /* vpshld{d,q} $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ case X86EMUL_OPC_EVEX_66(0x0f3a, 0x73): /* vpshrd{d,q} $imm8,[xyz]mm/mem,[xyz]mm,[xyz]mm{k} */
+ host_and_vcpu_must_have(avx512_vbmi2);
+ goto avx512f_imm8_no_sae;
+
case X86EMUL_OPC(0x0f3a, 0xcc): /* sha1rnds4 $imm8,xmm/m128,xmm */
host_and_vcpu_must_have(sha);
op_bytes = 16;
As in a few cases before, since the insns here and in particular their memory access patterns follow the usual scheme, I didn't think it was necessary to add a contrived test specifically for them, beyond the Disp8 scaling one. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- v7: Re-base over change earlier in the series. v6: New.