static const char *const str_7a1[32] =
{
[ 0] = "sha512", [ 1] = "sm3",
-
+ [ 2] = "sm4",
[ 4] = "avx-vnni", [ 5] = "avx512-bf16",
[10] = "fzrm", [11] = "fsrs",
{ { 0xd3 }, 2, T, R, pfx_f3, W0, Ln }, /* vpdpwsuds */
{ { 0xda }, 2, T, R, pfx_no, W0, L0 }, /* vsm3msg1 */
{ { 0xda }, 2, T, R, pfx_66, W0, L0 }, /* vsm3msg2 */
+ { { 0xda }, 2, T, R, pfx_f3, W0, Ln }, /* vsm4key4 */
+ { { 0xda }, 2, T, R, pfx_f2, W0, Ln }, /* vsm4rnds4 */
{ { 0xdb }, 2, T, R, pfx_66, WIG, L0 }, /* vaesimc */
{ { 0xdc }, 2, T, R, pfx_66, WIG, Ln }, /* vaesenc */
{ { 0xdd }, 2, T, R, pfx_66, WIG, Ln }, /* vaesenclast */
#define cpu_has_avx512_fp16 (cp.feat.avx512_fp16 && xcr0_mask(0xe6))
#define cpu_has_sha512 (cp.feat.sha512 && xcr0_mask(6))
#define cpu_has_sm3 (cp.feat.sm3 && xcr0_mask(6))
+#define cpu_has_sm4 (cp.feat.sm4 && xcr0_mask(6))
#define cpu_has_avx_vnni (cp.feat.avx_vnni && xcr0_mask(6))
#define cpu_has_avx512_bf16 (cp.feat.avx512_bf16 && xcr0_mask(0xe6))
#define cpu_has_avx_ifma (cp.feat.avx_ifma && xcr0_mask(6))
/* CPUID level 0x00000007:1.eax */
#define cpu_has_sha512 boot_cpu_has(X86_FEATURE_SHA512)
#define cpu_has_sm3 boot_cpu_has(X86_FEATURE_SM3)
+#define cpu_has_sm4 boot_cpu_has(X86_FEATURE_SM4)
#define cpu_has_avx_vnni boot_cpu_has(X86_FEATURE_AVX_VNNI)
#define cpu_has_avx512_bf16 boot_cpu_has(X86_FEATURE_AVX512_BF16)
#define cpu_has_avx_ifma boot_cpu_has(X86_FEATURE_AVX_IFMA)
#define vcpu_has_avx512_fp16() (ctxt->cpuid->feat.avx512_fp16)
#define vcpu_has_sha512() (ctxt->cpuid->feat.sha512)
#define vcpu_has_sm3() (ctxt->cpuid->feat.sm3)
+#define vcpu_has_sm4() (ctxt->cpuid->feat.sm4)
#define vcpu_has_avx_vnni() (ctxt->cpuid->feat.avx_vnni)
#define vcpu_has_avx512_bf16() (ctxt->cpuid->feat.avx512_bf16)
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
host_and_vcpu_must_have(sm3);
goto simd_0f_ymm;
+ case X86EMUL_OPC_VEX_F3(0x0f38, 0xda): /* vsm4key4 [xy]mm/mem,[xy]mm,[xy]mm */
+ case X86EMUL_OPC_VEX_F2(0x0f38, 0xda): /* vsm4rnds4 [xy]mm/mem,[xy]mm,[xy]mm */
+ host_and_vcpu_must_have(sm4);
+ generate_exception_if(vex.w, X86_EXC_UD);
+ op_bytes = 16 << vex.l;
+ goto simd_0f_ymm;
+
case X86EMUL_OPC_VEX_66(0x0f38, 0xdc): /* vaesenc {x,y}mm/mem,{x,y}mm,{x,y}mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0xdd): /* vaesenclast {x,y}mm/mem,{x,y}mm,{x,y}mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0xde): /* vaesdec {x,y}mm/mem,{x,y}mm,{x,y}mm */
/* Intel-defined CPU features, CPUID level 0x00000007:1.eax, word 10 */
XEN_CPUFEATURE(SHA512, 10*32+ 0) /*A SHA512 Instructions */
XEN_CPUFEATURE(SM3, 10*32+ 1) /*A SM3 Instructions */
+XEN_CPUFEATURE(SM4, 10*32+ 2) /*A SM4 Instructions */
XEN_CPUFEATURE(AVX_VNNI, 10*32+ 4) /*A AVX-VNNI Instructions */
XEN_CPUFEATURE(AVX512_BF16, 10*32+ 5) /*A AVX512 BFloat16 Instructions */
XEN_CPUFEATURE(FZRM, 10*32+10) /*A Fast Zero-length REP MOVSB */
# enabled. Certain later extensions, acting on 256-bit vectors of
# integers, better depend on AVX2 than AVX.
AVX2: [AVX512F, VAES, VPCLMULQDQ, AVX_VNNI, AVX_IFMA, AVX_VNNI_INT8,
- AVX_VNNI_INT16, SHA512],
+ AVX_VNNI_INT16, SHA512, SM4],
# AVX512F is taken to mean hardware support for 512bit registers
# (which in practice depends on the EVEX prefix to encode) as well