[0xc1] = { DstMem|SrcReg|ModRM },
[0xc2] = { DstImplicit|SrcImmByte|ModRM, simd_any_fp, d8s_vl },
[0xc3] = { DstMem|SrcReg|ModRM|Mov },
- [0xc4] = { DstReg|SrcImmByte|ModRM, simd_packed_int, 1 },
+ [0xc4] = { DstImplicit|SrcImmByte|ModRM, simd_none, 1 },
[0xc5] = { DstReg|SrcImmByte|ModRM|Mov },
[0xc6] = { DstImplicit|SrcImmByte|ModRM, simd_packed_fp, d8s_vl },
[0xc7] = { ImplicitOps|ModRM },
/* fall through */
case X86EMUL_OPC_VEX_66(0, 0xc4): /* vpinsrw */
case X86EMUL_OPC_EVEX_66(0, 0xc4): /* vpinsrw */
- state->desc = DstReg | SrcMem16;
+ state->desc = DstImplicit | SrcMem16;
break;
case 0xf0:
generate_exception_if(vex.l, EXC_UD);
memcpy(mmvalp, &src.val, 2);
ea.type = OP_MEM;
+ state->simd_size = simd_other;
goto simd_0f_int_imm8;
#ifndef X86EMUL_NO_SIMD
host_and_vcpu_must_have(avx512bw);
if ( !mode_64bit() )
evex.w = 0;
- memcpy(mmvalp, &src.val, op_bytes);
+ memcpy(mmvalp, &src.val, src.bytes);
ea.type = OP_MEM;
- op_bytes = src.bytes;
d = SrcMem16; /* Fake for the common SIMD code below. */
state->simd_size = simd_other;
goto avx512f_imm8_no_sae;
case X86EMUL_OPC_66(0x0f3a, 0x20): /* pinsrb $imm8,r32/m8,xmm */
case X86EMUL_OPC_66(0x0f3a, 0x22): /* pinsr{d,q} $imm8,r/m,xmm */
host_and_vcpu_must_have(sse4_1);
- get_fpu(X86EMUL_FPU_xmm);
- memcpy(mmvalp, &src.val, op_bytes);
+ memcpy(mmvalp, &src.val, src.bytes);
ea.type = OP_MEM;
- op_bytes = src.bytes;
d = SrcMem16; /* Fake for the common SIMD code below. */
state->simd_size = simd_other;
goto simd_0f3a_common;
generate_exception_if(vex.l, EXC_UD);
if ( !mode_64bit() )
vex.w = 0;
- memcpy(mmvalp, &src.val, op_bytes);
+ memcpy(mmvalp, &src.val, src.bytes);
ea.type = OP_MEM;
- op_bytes = src.bytes;
d = SrcMem16; /* Fake for the common SIMD code below. */
state->simd_size = simd_other;
goto simd_0f_int_imm8;