From patchwork Fri Mar 15 10:56:56 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jan Beulich X-Patchwork-Id: 10854513 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id B9F501390 for ; Fri, 15 Mar 2019 10:59:02 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 9DF702A947 for ; Fri, 15 Mar 2019 10:59:02 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 9209E2A949; Fri, 15 Mar 2019 10:59:02 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.2 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_MED autolearn=ham version=3.3.1 Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.wl.linuxfoundation.org (Postfix) with ESMTPS id 80C692A947 for ; Fri, 15 Mar 2019 10:59:01 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1h4kWM-0007WA-1S; Fri, 15 Mar 2019 10:57:02 +0000 Received: from us1-rack-dfw2.inumbo.com ([104.130.134.6]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1h4kWK-0007Vw-Os for xen-devel@lists.xenproject.org; Fri, 15 Mar 2019 10:57:00 +0000 X-Inumbo-ID: 0dd1094e-4711-11e9-bc90-bc764e045a96 Received: from prv1-mh.provo.novell.com (unknown [137.65.248.33]) by us1-rack-dfw2.inumbo.com (Halon) with ESMTPS id 0dd1094e-4711-11e9-bc90-bc764e045a96; Fri, 15 Mar 2019 10:56:58 +0000 (UTC) Received: from INET-PRV1-MTA by prv1-mh.provo.novell.com with Novell_GroupWise; Fri, 15 Mar 2019 04:56:58 -0600 Message-Id: <5C8B84F8020000780021F248@prv1-mh.provo.novell.com> X-Mailer: Novell GroupWise Internet Agent 18.1.0 Date: Fri, 15 Mar 2019 04:56:56 -0600 From: "Jan Beulich" To: "xen-devel" References: <5B6BF83602000078001DC548@prv1-mh.provo.novell.com> <5C8B7EC0020000780021F10B@prv1-mh.provo.novell.com> In-Reply-To: <5C8B7EC0020000780021F10B@prv1-mh.provo.novell.com> Mime-Version: 1.0 Content-Disposition: inline Subject: [Xen-devel] [PATCH v8 30/50] x86emul: support AVX512{F, _VBMI2} compress/expand insns X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: George Dunlap , Andrew Cooper , Wei Liu , Roger Pau Monne Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" X-Virus-Scanned: ClamAV using ClamSMTP Signed-off-by: Jan Beulich --- v7: Re-base. v6: Re-base. Add tests for the byte/word forms. v5: New. --- a/tools/tests/x86_emulator/evex-disp8.c +++ b/tools/tests/x86_emulator/evex-disp8.c @@ -109,6 +109,7 @@ static const struct test avx512f_all[] = INSN_FP(cmp, 0f, c2), INSN(comisd, 66, 0f, 2f, el, q, el), INSN(comiss, , 0f, 2f, el, d, el), + INSN(compress, 66, 0f38, 8a, vl, sd, el), INSN(cvtdq2pd, f3, 0f, e6, vl_2, d, vl), INSN(cvtdq2ps, , 0f, 5b, vl, d, vl), INSN(cvtpd2dq, f2, 0f, e6, vl, q, vl), @@ -140,6 +141,7 @@ static const struct test avx512f_all[] = INSN(cvtusi2sd, f2, 0f, 7b, el, dq64, el), INSN(cvtusi2ss, f3, 0f, 7b, el, dq64, el), INSN_FP(div, 0f, 5e), + INSN(expand, 66, 0f38, 88, vl, sd, el), INSN(fixupimm, 66, 0f3a, 54, vl, sd, vl), INSN(fixupimm, 66, 0f3a, 55, el, sd, el), INSN(fmadd132, 66, 0f38, 98, vl, sd, vl), @@ -214,6 +216,7 @@ static const struct test avx512f_all[] = INSN(pcmpgtd, 66, 0f, 66, vl, d, vl), INSN(pcmpgtq, 66, 0f38, 37, vl, q, vl), INSN(pcmpu, 66, 0f3a, 1e, vl, dq, vl), + INSN(pcompress, 66, 0f38, 8b, vl, dq, el), INSN(permi2, 66, 0f38, 76, vl, dq, vl), INSN(permi2, 66, 0f38, 77, vl, sd, vl), INSN(permilpd, 66, 0f38, 0d, vl, q, vl), @@ -222,6 +225,7 @@ static const struct test avx512f_all[] = INSN(permilps, 66, 0f3a, 04, vl, d, vl), INSN(permt2, 66, 0f38, 7e, vl, dq, vl), INSN(permt2, 66, 0f38, 7f, vl, sd, vl), + INSN(pexpand, 66, 0f38, 89, vl, dq, el), INSN(pmaxs, 66, 0f38, 3d, vl, dq, vl), INSN(pmaxu, 66, 0f38, 3f, vl, dq, vl), INSN(pmins, 66, 0f38, 39, vl, dq, vl), @@ -509,6 +513,11 @@ static const struct test avx512_vbmi_all INSN(permt2b, 66, 0f38, 7d, vl, b, vl), }; +static const struct test avx512_vbmi2_all[] = { + INSN(pcompress, 66, 0f38, 63, vl, bw, el), + INSN(pexpand, 66, 0f38, 62, vl, bw, el), +}; + static const unsigned char vl_all[] = { VL_512, VL_128, VL_256 }; static const unsigned char vl_128[] = { VL_128 }; static const unsigned char vl_no128[] = { VL_512, VL_256 }; @@ -865,4 +874,5 @@ void evex_disp8_test(void *instr, struct RUN(avx512dq, 512); RUN(avx512er, 512); RUN(avx512_vbmi, all); + RUN(avx512_vbmi2, all); } --- a/tools/tests/x86_emulator/test_x86_emulator.c +++ b/tools/tests/x86_emulator/test_x86_emulator.c @@ -3995,6 +3995,227 @@ int main(int argc, char **argv) else printf("skipped\n"); + /* + * The following compress/expand tests are not only making sure the + * accessed data is correct, but they also verify (by placing operands + * on the mapping boundaries) that elements controlled by clear mask + * bits don't get accessed. + */ + if ( stack_exec && cpu_has_avx512f ) + { + decl_insn(vpcompressd); + decl_insn(vpcompressq); + decl_insn(vpexpandd); + decl_insn(vpexpandq); + static const struct { + unsigned int d[16]; + } dsrc = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } }; + static const struct { + unsigned long long q[8]; + } qsrc = { { 0, 1, 2, 3, 4, 5, 6, 7 } }; + unsigned int *ptr = res + MMAP_SZ / sizeof(*res) - 32; + + printf("%-40s", "Testing vpcompressd %zmm1,24*4(%ecx){%k2}..."); + asm volatile ( "kmovw %1, %%k2\n\t" + "vmovdqu32 %2, %%zmm1\n" + put_insn(vpcompressd, + "vpcompressd %%zmm1, 24*4(%0)%{%%k2%}") + :: "c" (NULL), "r" (0x55aa), "m" (dsrc) ); + + memset(ptr, 0xdb, 32 * 4); + set_insn(vpcompressd); + regs.ecx = (unsigned long)ptr; + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(vpcompressd) || + memcmp(ptr, ptr + 8, 16 * 4) ) + goto fail; + for ( i = 0; i < 4; ++i ) + if ( ptr[24 + i] != 2 * i + 1 ) + goto fail; + for ( ; i < 8; ++i ) + if ( ptr[24 + i] != 2 * i ) + goto fail; + printf("okay\n"); + + printf("%-40s", "Testing vpexpandd 8*4(%edx),%zmm3{%k2}{z}..."); + asm volatile ( "vpternlogd $0x81, %%zmm3, %%zmm3, %%zmm3\n" + put_insn(vpexpandd, + "vpexpandd 8*4(%0), %%zmm3%{%%k2%}%{z%}") + :: "d" (NULL) ); + set_insn(vpexpandd); + regs.edx = (unsigned long)(ptr + 16); + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(vpexpandd) ) + goto fail; + asm ( "vmovdqa32 %%zmm1, %%zmm2%{%%k2%}%{z%}\n\t" + "vpcmpeqd %%zmm2, %%zmm3, %%k0\n\t" + "kmovw %%k0, %0" + : "=r" (rc) ); + if ( rc != 0xffff ) + goto fail; + printf("okay\n"); + + printf("%-40s", "Testing vpcompressq %zmm4,12*8(%edx){%k3}..."); + asm volatile ( "kmovw %1, %%k3\n\t" + "vmovdqu64 %2, %%zmm4\n" + put_insn(vpcompressq, + "vpcompressq %%zmm4, 12*8(%0)%{%%k3%}") + :: "d" (NULL), "r" (0x5a), "m" (qsrc) ); + + memset(ptr, 0xdb, 16 * 8); + set_insn(vpcompressq); + regs.edx = (unsigned long)ptr; + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(vpcompressq) || + memcmp(ptr, ptr + 8, 8 * 8) ) + goto fail; + for ( i = 0; i < 2; ++i ) + { + if ( ptr[(12 + i) * 2] != 2 * i + 1 || + ptr[(12 + i) * 2 + 1] ) + goto fail; + } + for ( ; i < 4; ++i ) + { + if ( ptr[(12 + i) * 2] != 2 * i || + ptr[(12 + i) * 2 + 1] ) + goto fail; + } + printf("okay\n"); + + printf("%-40s", "Testing vpexpandq 4*8(%ecx),%zmm5{%k3}{z}..."); + asm volatile ( "vpternlogq $0x81, %%zmm5, %%zmm5, %%zmm5\n" + put_insn(vpexpandq, + "vpexpandq 4*8(%0), %%zmm5%{%%k3%}%{z%}") + :: "c" (NULL) ); + set_insn(vpexpandq); + regs.ecx = (unsigned long)(ptr + 16); + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(vpexpandq) ) + goto fail; + asm ( "vmovdqa64 %%zmm4, %%zmm6%{%%k3%}%{z%}\n\t" + "vpcmpeqq %%zmm5, %%zmm6, %%k0\n\t" + "kmovw %%k0, %0" + : "=r" (rc) ); + if ( rc != 0xff ) + goto fail; + printf("okay\n"); + } + +#if __GNUC__ > 7 /* can't check for __AVX512VBMI2__ here */ + if ( stack_exec && cpu_has_avx512_vbmi2 ) + { + decl_insn(vpcompressb); + decl_insn(vpcompressw); + decl_insn(vpexpandb); + decl_insn(vpexpandw); + static const struct { + unsigned char b[64]; + } bsrc = { { 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63 } }; + static const struct { + unsigned short w[32]; + } wsrc = { { 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 } }; + unsigned char *ptr = (void *)res + MMAP_SZ - 128; + unsigned long long w = 0x55555555aaaaaaaaULL; + + printf("%-40s", "Testing vpcompressb %zmm1,96*1(%ecx){%k2}..."); + asm volatile ( "kmovq %1, %%k2\n\t" + "vmovdqu8 %2, %%zmm1\n" + put_insn(vpcompressb, + "vpcompressb %%zmm1, 96*1(%0)%{%%k2%}") + :: "c" (NULL), "m" (w), "m" (bsrc) ); + + memset(ptr, 0xdb, 128 * 1); + set_insn(vpcompressb); + regs.ecx = (unsigned long)ptr; + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(vpcompressb) || + memcmp(ptr, ptr + 32, 64 * 1) ) + goto fail; + for ( i = 0; i < 16; ++i ) + if ( ptr[96 + i] != 2 * i + 1 ) + goto fail; + for ( ; i < 32; ++i ) + if ( ptr[96 + i] != 2 * i ) + goto fail; + printf("okay\n"); + + printf("%-40s", "Testing vpexpandb 32*1(%edx),%zmm3{%k2}{z}..."); + asm volatile ( "vpternlogd $0x81, %%zmm3, %%zmm3, %%zmm3\n" + put_insn(vpexpandb, + "vpexpandb 32*1(%0), %%zmm3%{%%k2%}%{z%}") + :: "d" (NULL) ); + set_insn(vpexpandb); + regs.edx = (unsigned long)(ptr + 64); + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(vpexpandb) ) + goto fail; + asm ( "vmovdqu8 %%zmm1, %%zmm2%{%%k2%}%{z%}\n\t" + "vpcmpeqb %%zmm2, %%zmm3, %%k0\n\t" + "kmovq %%k0, %0" + : "=m" (w) ); + if ( w != 0xffffffffffffffffULL ) + goto fail; + printf("okay\n"); + + printf("%-40s", "Testing vpcompressw %zmm4,48*2(%edx){%k3}..."); + asm volatile ( "kmovd %1, %%k3\n\t" + "vmovdqu16 %2, %%zmm4\n" + put_insn(vpcompressw, + "vpcompressw %%zmm4, 48*2(%0)%{%%k3%}") + :: "d" (NULL), "r" (0x5555aaaa), "m" (wsrc) ); + + memset(ptr, 0xdb, 64 * 2); + set_insn(vpcompressw); + regs.edx = (unsigned long)ptr; + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(vpcompressw) || + memcmp(ptr, ptr + 32, 32 * 2) ) + goto fail; + for ( i = 0; i < 8; ++i ) + { + if ( ptr[(48 + i) * 2] != 2 * i + 1 || + ptr[(48 + i) * 2 + 1] ) + goto fail; + } + for ( ; i < 16; ++i ) + { + if ( ptr[(48 + i) * 2] != 2 * i || + ptr[(48 + i) * 2 + 1] ) + goto fail; + } + printf("okay\n"); + + printf("%-40s", "Testing vpexpandw 16*2(%ecx),%zmm5{%k3}{z}..."); + asm volatile ( "vpternlogd $0x81, %%zmm5, %%zmm5, %%zmm5\n" + put_insn(vpexpandw, + "vpexpandw 16*2(%0), %%zmm5%{%%k3%}%{z%}") + :: "c" (NULL) ); + set_insn(vpexpandw); + regs.ecx = (unsigned long)(ptr + 64); + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(vpexpandw) ) + goto fail; + asm ( "vmovdqu16 %%zmm4, %%zmm6%{%%k3%}%{z%}\n\t" + "vpcmpeqw %%zmm5, %%zmm6, %%k0\n\t" + "kmovq %%k0, %0" + : "=m" (w) ); + if ( w != 0xffffffff ) + goto fail; + printf("okay\n"); + } +#endif + #undef decl_insn #undef put_insn #undef set_insn --- a/tools/tests/x86_emulator/x86-emulate.h +++ b/tools/tests/x86_emulator/x86-emulate.h @@ -59,6 +59,9 @@ (type *)((char *)mptr__ - offsetof(type, member)); \ }) +#define hweight32 __builtin_popcount +#define hweight64 __builtin_popcountll + #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63)) extern uint32_t mxcsr_mask; @@ -138,6 +141,7 @@ static inline bool xcr0_mask(uint64_t ma #define cpu_has_avx512bw (cp.feat.avx512bw && xcr0_mask(0xe6)) #define cpu_has_avx512vl (cp.feat.avx512vl && xcr0_mask(0xe6)) #define cpu_has_avx512_vbmi (cp.feat.avx512_vbmi && xcr0_mask(0xe6)) +#define cpu_has_avx512_vbmi2 (cp.feat.avx512_vbmi2 && xcr0_mask(0xe6)) #define cpu_has_xgetbv1 (cpu_has_xsave && cp.xstate.xgetbv1) --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -482,6 +482,8 @@ static const struct ext0f38_table { [0x59] = { .simd_size = simd_other, .two_op = 1, .d8s = 3 }, [0x5a] = { .simd_size = simd_128, .two_op = 1, .d8s = 4 }, [0x5b] = { .simd_size = simd_256, .two_op = 1, .d8s = d8s_vl_by_2 }, + [0x62] = { .simd_size = simd_packed_int, .two_op = 1, .d8s = d8s_bw }, + [0x63] = { .simd_size = simd_packed_int, .to_mem = 1, .two_op = 1, .d8s = d8s_bw }, [0x75 ... 0x76] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, [0x77] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, [0x78] = { .simd_size = simd_other, .two_op = 1 }, @@ -489,6 +491,10 @@ static const struct ext0f38_table { [0x7a ... 0x7c] = { .simd_size = simd_none, .two_op = 1 }, [0x7d ... 0x7e] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, [0x7f] = { .simd_size = simd_packed_fp, .d8s = d8s_vl }, + [0x88] = { .simd_size = simd_packed_fp, .two_op = 1, .d8s = d8s_dq }, + [0x89] = { .simd_size = simd_packed_int, .two_op = 1, .d8s = d8s_dq }, + [0x8a] = { .simd_size = simd_packed_fp, .to_mem = 1, .two_op = 1, .d8s = d8s_dq }, + [0x8b] = { .simd_size = simd_packed_int, .to_mem = 1, .two_op = 1, .d8s = d8s_dq }, [0x8c] = { .simd_size = simd_packed_int }, [0x8d] = { .simd_size = simd_packed_int, .d8s = d8s_vl }, [0x8e] = { .simd_size = simd_packed_int, .to_mem = 1 }, @@ -1900,6 +1906,7 @@ static bool vcpu_has( #define vcpu_has_avx512bw() vcpu_has( 7, EBX, 30, ctxt, ops) #define vcpu_has_avx512vl() vcpu_has( 7, EBX, 31, ctxt, ops) #define vcpu_has_avx512_vbmi() vcpu_has( 7, ECX, 1, ctxt, ops) +#define vcpu_has_avx512_vbmi2() vcpu_has( 7, ECX, 6, ctxt, ops) #define vcpu_has_rdpid() vcpu_has( 7, ECX, 22, ctxt, ops) #define vcpu_has_clzero() vcpu_has(0x80000008, EBX, 0, ctxt, ops) @@ -8905,6 +8912,36 @@ x86_emulate( generate_exception_if(ea.type != OP_MEM || !vex.l || vex.w, EXC_UD); goto simd_0f_avx2; + case X86EMUL_OPC_EVEX_66(0x0f38, 0x62): /* vpexpand{b,w} [xyz]mm/mem,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, 0x63): /* vpcompress{b,w} [xyz]mm,[xyz]mm/mem{k} */ + host_and_vcpu_must_have(avx512_vbmi2); + elem_bytes = 1 << evex.w; + /* fall through */ + case X86EMUL_OPC_EVEX_66(0x0f38, 0x88): /* vexpandp{s,d} [xyz]mm/mem,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, 0x89): /* vpexpand{d,q} [xyz]mm/mem,[xyz]mm{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, 0x8a): /* vcompressp{s,d} [xyz]mm,[xyz]mm/mem{k} */ + case X86EMUL_OPC_EVEX_66(0x0f38, 0x8b): /* vpcompress{d,q} [xyz]mm,[xyz]mm/mem{k} */ + host_and_vcpu_must_have(avx512f); + generate_exception_if(evex.brs, EXC_UD); + avx512_vlen_check(false); + /* + * For the respective code below the main switch() to work we need to + * compact op_mask here: Memory accesses are non-sparse even if the + * mask register has sparsely set bits. + */ + if ( likely(fault_suppression) ) + { + n = 1 << ((b & 8 ? 2 : 4) + evex.lr - evex.w); + EXPECT(elem_bytes > 0); + ASSERT(op_bytes == n * elem_bytes); + op_mask &= ~0ULL >> (64 - n); + n = hweight64(op_mask); + op_bytes = n * elem_bytes; + if ( n ) + op_mask = ~0ULL >> (64 - n); + } + goto simd_zmm; + case X86EMUL_OPC_EVEX_66(0x0f38, 0x75): /* vpermi2{b,w} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ case X86EMUL_OPC_EVEX_66(0x0f38, 0x7d): /* vpermt2{b,w} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ case X86EMUL_OPC_EVEX_66(0x0f38, 0x8d): /* vperm{b,w} [xyz]mm/mem,[xyz]mm,[xyz]mm{k} */ --- a/xen/include/asm-x86/cpufeature.h +++ b/xen/include/asm-x86/cpufeature.h @@ -109,6 +109,7 @@ /* CPUID level 0x00000007:0.ecx */ #define cpu_has_avx512_vbmi boot_cpu_has(X86_FEATURE_AVX512_VBMI) +#define cpu_has_avx512_vbmi2 boot_cpu_has(X86_FEATURE_AVX512_VBMI2) #define cpu_has_rdpid boot_cpu_has(X86_FEATURE_RDPID) /* CPUID level 0x80000007.edx */ --- a/xen/include/public/arch-x86/cpufeatureset.h +++ b/xen/include/public/arch-x86/cpufeatureset.h @@ -228,6 +228,7 @@ XEN_CPUFEATURE(AVX512_VBMI, 6*32+ 1) / XEN_CPUFEATURE(UMIP, 6*32+ 2) /*S User Mode Instruction Prevention */ XEN_CPUFEATURE(PKU, 6*32+ 3) /*H Protection Keys for Userspace */ XEN_CPUFEATURE(OSPKE, 6*32+ 4) /*! OS Protection Keys Enable */ +XEN_CPUFEATURE(AVX512_VBMI2, 6*32+ 6) /*A Additional AVX-512 Vector Byte Manipulation Instrs */ XEN_CPUFEATURE(AVX512_VPOPCNTDQ, 6*32+14) /*A POPCNT for vectors of DW/QW */ XEN_CPUFEATURE(RDPID, 6*32+22) /*A RDPID instruction */ --- a/xen/tools/gen-cpuid.py +++ b/xen/tools/gen-cpuid.py @@ -266,10 +266,10 @@ def crunch_numbers(state): AVX512BW, AVX512VL, AVX512_4VNNIW, AVX512_4FMAPS, AVX512_VPOPCNTDQ], - # AVX512 extensions acting solely on vectors of bytes/words are made + # AVX512 extensions acting (solely) on vectors of bytes/words are made # dependents of AVX512BW (as to requiring wider than 16-bit mask # registers), despite the SDM not formally making this connection. - AVX512BW: [AVX512_VBMI], + AVX512BW: [AVX512_VBMI, AVX512_VBMI2], # The features: # * Single Thread Indirect Branch Predictors