static const char *const str_7a1[32] =
{
- [ 0] = "sha512",
+ [ 0] = "sha512", [ 1] = "sm3",
[ 4] = "avx-vnni", [ 5] = "avx512-bf16",
{ { 0xd3 }, 2, T, R, pfx_no, W0, Ln }, /* vpdpwuuds */
{ { 0xd3 }, 2, T, R, pfx_66, W0, Ln }, /* vpdpwusds */
{ { 0xd3 }, 2, T, R, pfx_f3, W0, Ln }, /* vpdpwsuds */
+ { { 0xda }, 2, T, R, pfx_no, W0, L0 }, /* vsm3msg1 */
+ { { 0xda }, 2, T, R, pfx_66, W0, L0 }, /* vsm3msg2 */
{ { 0xdb }, 2, T, R, pfx_66, WIG, L0 }, /* vaesimc */
{ { 0xdc }, 2, T, R, pfx_66, WIG, Ln }, /* vaesenc */
{ { 0xdd }, 2, T, R, pfx_66, WIG, Ln }, /* vaesenclast */
{ { 0x7f }, 3, T, R, pfx_66, Wn, LIG }, /* vfnmsubsd */
{ { 0xce }, 3, T, R, pfx_66, W1, Ln }, /* vgf2p8affineqb */
{ { 0xcf }, 3, T, R, pfx_66, W1, Ln }, /* vgf2p8affineinvqb */
+ { { 0xde }, 3, T, R, pfx_66, W0, L0 }, /* vsm3rnds2 */
{ { 0xdf }, 3, T, R, pfx_66, WIG, Ln }, /* vaeskeygenassist */
{ { 0xf0 }, 3, T, R, pfx_f2, Wn, L0 }, /* rorx */
};
#define cpu_has_serialize cp.feat.serialize
#define cpu_has_avx512_fp16 (cp.feat.avx512_fp16 && xcr0_mask(0xe6))
#define cpu_has_sha512 (cp.feat.sha512 && xcr0_mask(6))
+#define cpu_has_sm3 (cp.feat.sm3 && xcr0_mask(6))
#define cpu_has_avx_vnni (cp.feat.avx_vnni && xcr0_mask(6))
#define cpu_has_avx512_bf16 (cp.feat.avx512_bf16 && xcr0_mask(0xe6))
#define cpu_has_avx_ifma (cp.feat.avx_ifma && xcr0_mask(6))
/* CPUID level 0x00000007:1.eax */
#define cpu_has_sha512 boot_cpu_has(X86_FEATURE_SHA512)
+#define cpu_has_sm3 boot_cpu_has(X86_FEATURE_SM3)
#define cpu_has_avx_vnni boot_cpu_has(X86_FEATURE_AVX_VNNI)
#define cpu_has_avx512_bf16 boot_cpu_has(X86_FEATURE_AVX512_BF16)
#define cpu_has_avx_ifma boot_cpu_has(X86_FEATURE_AVX_IFMA)
[0xd3] = { .simd_size = simd_other },
[0xd6] = { .simd_size = simd_other, .d8s = d8s_vl },
[0xd7] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
+ [0xda] = { .simd_size = simd_other },
[0xdb] = { .simd_size = simd_packed_int, .two_op = 1 },
[0xdc ... 0xdf] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
[0xf0] = { .two_op = 1 },
[0xc2] = { .simd_size = simd_any_fp, .d8s = d8s_vl },
[0xcc] = { .simd_size = simd_other },
[0xce ... 0xcf] = { .simd_size = simd_packed_int, .d8s = d8s_vl },
+ [0xde] = { .simd_size = simd_other },
[0xdf] = { .simd_size = simd_packed_int, .two_op = 1 },
[0xf0] = {},
};
#define vcpu_has_tsxldtrk() (ctxt->cpuid->feat.tsxldtrk)
#define vcpu_has_avx512_fp16() (ctxt->cpuid->feat.avx512_fp16)
#define vcpu_has_sha512() (ctxt->cpuid->feat.sha512)
+#define vcpu_has_sm3() (ctxt->cpuid->feat.sm3)
#define vcpu_has_avx_vnni() (ctxt->cpuid->feat.avx_vnni)
#define vcpu_has_avx512_bf16() (ctxt->cpuid->feat.avx512_bf16)
#define vcpu_has_wrmsrns() (ctxt->cpuid->feat.wrmsrns)
op_bytes = 16 << vex.l;
goto simd_0f_ymm;
+ case X86EMUL_OPC_VEX (0x0f38, 0xda): /* vsm3msg1 xmm/mem,xmm,xmm */
+ case X86EMUL_OPC_VEX_66(0x0f38, 0xda): /* vsm3msg2 xmm/mem,xmm,xmm */
+ generate_exception_if(vex.w || vex.l, X86_EXC_UD);
+ host_and_vcpu_must_have(sm3);
+ goto simd_0f_ymm;
+
case X86EMUL_OPC_VEX_66(0x0f38, 0xdc): /* vaesenc {x,y}mm/mem,{x,y}mm,{x,y}mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0xdd): /* vaesenclast {x,y}mm/mem,{x,y}mm,{x,y}mm */
case X86EMUL_OPC_VEX_66(0x0f38, 0xde): /* vaesdec {x,y}mm/mem,{x,y}mm,{x,y}mm */
fault_suppression = false;
goto avx512f_imm8_no_sae;
+ case X86EMUL_OPC_VEX_66(0x0f3a, 0xde): /* vsm3rnds2 $imm8,xmm/mem,xmm,xmm */
+ host_and_vcpu_must_have(sm3);
+ generate_exception_if(vex.w || vex.l, X86_EXC_UD);
+ op_bytes = 16;
+ goto simd_0f_imm8_ymm;
+
case X86EMUL_OPC_66(0x0f3a, 0xdf): /* aeskeygenassist $imm8,xmm/m128,xmm */
case X86EMUL_OPC_VEX_66(0x0f3a, 0xdf): /* vaeskeygenassist $imm8,xmm/m128,xmm */
host_and_vcpu_must_have(aesni);
/* Intel-defined CPU features, CPUID level 0x00000007:1.eax, word 10 */
XEN_CPUFEATURE(SHA512, 10*32+ 0) /*A SHA512 Instructions */
+XEN_CPUFEATURE(SM3, 10*32+ 1) /*A SM3 Instructions */
XEN_CPUFEATURE(AVX_VNNI, 10*32+ 4) /*A AVX-VNNI Instructions */
XEN_CPUFEATURE(AVX512_BF16, 10*32+ 5) /*A AVX512 BFloat16 Instructions */
XEN_CPUFEATURE(FZRM, 10*32+10) /*A Fast Zero-length REP MOVSB */
# for the XOP prefix). VEX/XOP-encoded GPR instructions, such as
# those from the BMI{1,2}, TBM and LWP sets function fine in the
# absence of any enabled xstate.
- AVX: [FMA, FMA4, F16C, AVX2, XOP, AVX_NE_CONVERT],
+ AVX: [FMA, FMA4, F16C, AVX2, XOP, AVX_NE_CONVERT, SM3],
# This dependency exists solely for the shadow pagetable code. If the
# host doesn't have NX support, the shadow pagetable code can't handle