static void gen_CRC32(DisasContext *s, X86DecodedInsn *decode)
{
MemOp ot = decode->op[2].ot;
+ TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_crc32(s->T0, s->tmp2_i32, s->T1, tcg_constant_i32(8 << ot));
+ tcg_gen_trunc_tl_i32(tmp, s->T0);
+ gen_helper_crc32(s->T0, tmp, s->T1, tcg_constant_i32(8 << ot));
}
static void gen_CVTPI2Px(DisasContext *s, X86DecodedInsn *decode)
static void gen_LDMXCSR(DisasContext *s, X86DecodedInsn *decode)
{
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(tmp, s->T0);
+ gen_helper_ldmxcsr(tcg_env, tmp);
}
static void gen_lxx_seg(DisasContext *s, X86DecodedInsn *decode, int seg)
static void gen_MOVMSK(DisasContext *s, X86DecodedInsn *decode)
{
typeof(gen_helper_movmskps_ymm) *ps, *pd, *fn;
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
ps = s->vex_l ? gen_helper_movmskps_ymm : gen_helper_movmskps_xmm;
pd = s->vex_l ? gen_helper_movmskpd_ymm : gen_helper_movmskpd_xmm;
fn = s->prefix & PREFIX_DATA ? pd : ps;
- fn(s->tmp2_i32, tcg_env, OP_PTR2);
- tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
+ fn(tmp, tcg_env, OP_PTR2);
+ tcg_gen_extu_i32_tl(s->T0, tmp);
}
static void gen_MOVQ(DisasContext *s, X86DecodedInsn *decode)
switch (ot) {
case MO_32:
#ifdef TARGET_X86_64
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
- tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
- s->tmp2_i32, s->tmp3_i32);
- tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32);
- tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32);
- break;
+ {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(t0, s->T0);
+ tcg_gen_trunc_tl_i32(t1, s->T1);
+ tcg_gen_mulu2_i32(t0, t1, t0, t1);
+ tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], t0);
+ tcg_gen_extu_i32_tl(s->T0, t1);
+ break;
+ }
case MO_64:
#endif
switch (ot) {
case MO_32:
#ifdef TARGET_X86_64
- tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
- tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b);
- tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
- break;
+ {
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(tmp, s->T0);
+ tcg_gen_rotri_i32(tmp, tmp, b);
+ tcg_gen_extu_i32_tl(s->T0, tmp);
+ break;
+ }
case MO_64:
#endif
}
return;
}
- in = s->tmp2_i32;
+ in = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(in, s->T1);
#else
in = s->T1;
return;
}
- out = s->tmp2_i32;
+ out = tcg_temp_new_i32();
#else
out = s->T0;
#endif
gen_pextr(s, decode, MO_32);
}
-static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode)
+static void gen_vinsertps(DisasContext *s, X86DecodedInsn *decode, TCGv_i32 tmp)
{
int val = decode->immediate;
int dest_word = (val >> 4) & 3;
}
if (new_mask != (val & 15)) {
- tcg_gen_st_i32(s->tmp2_i32, tcg_env,
+ tcg_gen_st_i32(tmp, tcg_env,
vector_elem_offset(&decode->op[0], MO_32, dest_word));
}
static void gen_VINSERTPS_r(DisasContext *s, X86DecodedInsn *decode)
{
int val = decode->immediate;
- tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(tmp, tcg_env,
vector_elem_offset(&decode->op[2], MO_32, (val >> 6) & 3));
- gen_vinsertps(s, decode);
+ gen_vinsertps(s, decode, tmp);
}
static void gen_VINSERTPS_m(DisasContext *s, X86DecodedInsn *decode)
{
- tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
- gen_vinsertps(s, decode);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_qemu_ld_i32(tmp, s->A0, s->mem_index, MO_LEUL);
+ gen_vinsertps(s, decode, tmp);
}
static void gen_VINSERTx128(DisasContext *s, X86DecodedInsn *decode)
static void gen_VMOVSS(DisasContext *s, X86DecodedInsn *decode)
{
int vec_len = vector_len(s, decode);
+ TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_ld_i32(tmp, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
tcg_gen_gvec_mov(MO_64, decode->op[0].offset, decode->op[1].offset, vec_len, vec_len);
- tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_st_i32(tmp, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
}
static void gen_VMOVSS_ld(DisasContext *s, X86DecodedInsn *decode)
{
int vec_len = vector_len(s, decode);
+ TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
+ tcg_gen_qemu_ld_i32(tmp, s->A0, s->mem_index, MO_LEUL);
tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
- tcg_gen_st_i32(s->tmp2_i32, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_st_i32(tmp, OP_PTR0, offsetof(ZMMReg, ZMM_L(0)));
}
static void gen_VMOVSS_st(DisasContext *s, X86DecodedInsn *decode)
{
- tcg_gen_ld_i32(s->tmp2_i32, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
- tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_ld_i32(tmp, OP_PTR2, offsetof(ZMMReg, ZMM_L(0)));
+ tcg_gen_qemu_st_i32(tmp, s->A0, s->mem_index, MO_LEUL);
}
static void gen_VPMASKMOV_st(DisasContext *s, X86DecodedInsn *decode)
/* TCG local register indexes (only used inside old micro ops) */
TCGv_i32 tmp2_i32;
- TCGv_i32 tmp3_i32;
TCGv_i64 tmp1_i64;
sigjmp_buf jmpbuf;
static void gen_ins(DisasContext *s, MemOp ot, TCGv dshift)
{
+ TCGv_i32 port = tcg_temp_new_i32();
+
gen_string_movl_A0_EDI(s);
/* Note: we must do this dummy write first to be restartable in
case of page fault. */
tcg_gen_movi_tl(s->T0, 0);
gen_op_st_v(s, ot, s->T0, s->A0);
- tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
- tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
- gen_helper_in_func(ot, s->T0, s->tmp2_i32);
+ tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]);
+ tcg_gen_andi_i32(port, port, 0xffff);
+ gen_helper_in_func(ot, s->T0, port);
gen_op_st_v(s, ot, s->T0, s->A0);
gen_op_add_reg(s, s->aflag, R_EDI, dshift);
- gen_bpt_io(s, s->tmp2_i32, ot);
+ gen_bpt_io(s, port, ot);
}
static void gen_outs(DisasContext *s, MemOp ot, TCGv dshift)
{
+ TCGv_i32 port = tcg_temp_new_i32();
+ TCGv_i32 value = tcg_temp_new_i32();
+
gen_string_movl_A0_ESI(s);
gen_op_ld_v(s, ot, s->T0, s->A0);
- tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
- tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
- tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
- gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
+ tcg_gen_trunc_tl_i32(port, cpu_regs[R_EDX]);
+ tcg_gen_andi_i32(port, port, 0xffff);
+ tcg_gen_trunc_tl_i32(value, s->T0);
+ gen_helper_out_func(ot, port, value);
gen_op_add_reg(s, s->aflag, R_ESI, dshift);
- gen_bpt_io(s, s->tmp2_i32, ot);
+ gen_bpt_io(s, port, ot);
}
#define REP_MAX 65535
TCGCond cond, TCGv_i64 bndv)
{
TCGv ea = gen_lea_modrm_1(s, decode->mem, false);
+ TCGv_i32 t32 = tcg_temp_new_i32();
+ TCGv_i64 t64 = tcg_temp_new_i64();
- tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
+ tcg_gen_extu_tl_i64(t64, ea);
if (!CODE64(s)) {
- tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
+ tcg_gen_ext32u_i64(t64, t64);
}
- tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
- tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
- gen_helper_bndck(tcg_env, s->tmp2_i32);
+ tcg_gen_setcond_i64(cond, t64, t64, bndv);
+ tcg_gen_extrl_i64_i32(t32, t64);
+ gen_helper_bndck(tcg_env, t32);
}
/* generate modrm load of memory or register. */
static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
{
if (PE(s) && !VM86(s)) {
- tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
- gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
+ TCGv_i32 sel = tcg_temp_new_i32();
+
+ tcg_gen_trunc_tl_i32(sel, src);
+ gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), sel);
/* abort translation because the addseg value may change or
because ss32 may change. For R_SS, translation must always
stop as a special handling must be done to disable hardware
dc->tmp1_i64 = tcg_temp_new_i64();
dc->tmp2_i32 = tcg_temp_new_i32();
- dc->tmp3_i32 = tcg_temp_new_i32();
dc->cc_srcT = tcg_temp_new();
}