4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003-2005 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "tcg/tcg-op.h"
27 #include "tcg/tcg-op-gvec.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
31 #include "fpu/softfloat.h"
34 #define HELPER_H "helper.h"
35 #include "exec/helper-info.c.inc"
39 # define gen_helper_rdpsr(D, E) qemu_build_not_reached()
40 # define gen_helper_rdasr17(D, E) qemu_build_not_reached()
41 # define gen_helper_rett(E) qemu_build_not_reached()
42 # define gen_helper_power_down(E) qemu_build_not_reached()
43 # define gen_helper_wrpsr(E, S) qemu_build_not_reached()
45 # define gen_helper_clear_softint(E, S) qemu_build_not_reached()
46 # define gen_helper_done(E) qemu_build_not_reached()
47 # define gen_helper_flushw(E) qemu_build_not_reached()
48 # define gen_helper_fmul8x16a(D, S1, S2) qemu_build_not_reached()
49 # define gen_helper_rdccr(D, E) qemu_build_not_reached()
50 # define gen_helper_rdcwp(D, E) qemu_build_not_reached()
51 # define gen_helper_restored(E) qemu_build_not_reached()
52 # define gen_helper_retry(E) qemu_build_not_reached()
53 # define gen_helper_saved(E) qemu_build_not_reached()
54 # define gen_helper_set_softint(E, S) qemu_build_not_reached()
55 # define gen_helper_tick_get_count(D, E, T, C) qemu_build_not_reached()
56 # define gen_helper_tick_set_count(P, S) qemu_build_not_reached()
57 # define gen_helper_tick_set_limit(P, S) qemu_build_not_reached()
58 # define gen_helper_wrccr(E, S) qemu_build_not_reached()
59 # define gen_helper_wrcwp(E, S) qemu_build_not_reached()
60 # define gen_helper_wrgl(E, S) qemu_build_not_reached()
61 # define gen_helper_write_softint(E, S) qemu_build_not_reached()
62 # define gen_helper_wrpil(E, S) qemu_build_not_reached()
63 # define gen_helper_wrpstate(E, S) qemu_build_not_reached()
64 # define gen_helper_cmask8 ({ qemu_build_not_reached(); NULL; })
65 # define gen_helper_cmask16 ({ qemu_build_not_reached(); NULL; })
66 # define gen_helper_cmask32 ({ qemu_build_not_reached(); NULL; })
67 # define gen_helper_fcmpeq8 ({ qemu_build_not_reached(); NULL; })
68 # define gen_helper_fcmpeq16 ({ qemu_build_not_reached(); NULL; })
69 # define gen_helper_fcmpeq32 ({ qemu_build_not_reached(); NULL; })
70 # define gen_helper_fcmpgt8 ({ qemu_build_not_reached(); NULL; })
71 # define gen_helper_fcmpgt16 ({ qemu_build_not_reached(); NULL; })
72 # define gen_helper_fcmpgt32 ({ qemu_build_not_reached(); NULL; })
73 # define gen_helper_fcmple8 ({ qemu_build_not_reached(); NULL; })
74 # define gen_helper_fcmple16 ({ qemu_build_not_reached(); NULL; })
75 # define gen_helper_fcmple32 ({ qemu_build_not_reached(); NULL; })
76 # define gen_helper_fcmpne8 ({ qemu_build_not_reached(); NULL; })
77 # define gen_helper_fcmpne16 ({ qemu_build_not_reached(); NULL; })
78 # define gen_helper_fcmpne32 ({ qemu_build_not_reached(); NULL; })
79 # define gen_helper_fcmpule8 ({ qemu_build_not_reached(); NULL; })
80 # define gen_helper_fcmpule16 ({ qemu_build_not_reached(); NULL; })
81 # define gen_helper_fcmpule32 ({ qemu_build_not_reached(); NULL; })
82 # define gen_helper_fcmpugt8 ({ qemu_build_not_reached(); NULL; })
83 # define gen_helper_fcmpugt16 ({ qemu_build_not_reached(); NULL; })
84 # define gen_helper_fcmpugt32 ({ qemu_build_not_reached(); NULL; })
85 # define gen_helper_fdtox ({ qemu_build_not_reached(); NULL; })
86 # define gen_helper_fexpand ({ qemu_build_not_reached(); NULL; })
87 # define gen_helper_fmul8sux16 ({ qemu_build_not_reached(); NULL; })
88 # define gen_helper_fmul8ulx16 ({ qemu_build_not_reached(); NULL; })
89 # define gen_helper_fmul8x16 ({ qemu_build_not_reached(); NULL; })
90 # define gen_helper_fpmerge ({ qemu_build_not_reached(); NULL; })
91 # define gen_helper_fqtox ({ qemu_build_not_reached(); NULL; })
92 # define gen_helper_fslas16 ({ qemu_build_not_reached(); NULL; })
93 # define gen_helper_fslas32 ({ qemu_build_not_reached(); NULL; })
94 # define gen_helper_fstox ({ qemu_build_not_reached(); NULL; })
95 # define gen_helper_fxtod ({ qemu_build_not_reached(); NULL; })
96 # define gen_helper_fxtoq ({ qemu_build_not_reached(); NULL; })
97 # define gen_helper_fxtos ({ qemu_build_not_reached(); NULL; })
98 # define gen_helper_pdist ({ qemu_build_not_reached(); NULL; })
99 # define gen_helper_xmulx ({ qemu_build_not_reached(); NULL; })
100 # define gen_helper_xmulxhi ({ qemu_build_not_reached(); NULL; })
101 # define MAXTL_MASK 0
104 /* Dynamic PC, must exit to main loop. */
106 /* Dynamic PC, one of two values according to jump_pc[T2]. */
108 /* Dynamic PC, may lookup next TB. */
109 #define DYNAMIC_PC_LOOKUP 3
111 #define DISAS_EXIT DISAS_TARGET_0
113 /* global register indexes */
114 static TCGv_ptr cpu_regwptr;
115 static TCGv cpu_pc, cpu_npc;
116 static TCGv cpu_regs[32];
119 static TCGv cpu_cond;
120 static TCGv cpu_cc_N;
121 static TCGv cpu_cc_V;
122 static TCGv cpu_icc_Z;
123 static TCGv cpu_icc_C;
124 #ifdef TARGET_SPARC64
125 static TCGv cpu_xcc_Z;
126 static TCGv cpu_xcc_C;
127 static TCGv_i32 cpu_fprs;
130 # define cpu_fprs ({ qemu_build_not_reached(); (TCGv)NULL; })
131 # define cpu_gsr ({ qemu_build_not_reached(); (TCGv)NULL; })
134 #ifdef TARGET_SPARC64
135 #define cpu_cc_Z cpu_xcc_Z
136 #define cpu_cc_C cpu_xcc_C
138 #define cpu_cc_Z cpu_icc_Z
139 #define cpu_cc_C cpu_icc_C
140 #define cpu_xcc_Z ({ qemu_build_not_reached(); NULL; })
141 #define cpu_xcc_C ({ qemu_build_not_reached(); NULL; })
144 /* Floating point comparison registers */
145 static TCGv_i32 cpu_fcc[TARGET_FCCREGS];
147 #define env_field_offsetof(X) offsetof(CPUSPARCState, X)
148 #ifdef TARGET_SPARC64
149 # define env32_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
150 # define env64_field_offsetof(X) env_field_offsetof(X)
152 # define env32_field_offsetof(X) env_field_offsetof(X)
153 # define env64_field_offsetof(X) ({ qemu_build_not_reached(); 0; })
156 typedef struct DisasCompare {
162 typedef struct DisasDelayException {
163 struct DisasDelayException *next;
166 /* Saved state at parent insn. */
169 } DisasDelayException;
171 typedef struct DisasContext {
172 DisasContextBase base;
173 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
174 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
176 /* Used when JUMP_PC value is used. */
178 target_ulong jump_pc[2];
183 bool address_mask_32bit;
184 #ifndef CONFIG_USER_ONLY
186 #ifdef TARGET_SPARC64
192 #ifdef TARGET_SPARC64
196 DisasDelayException *delay_excp_list;
199 // This function uses non-native bit order
200 #define GET_FIELD(X, FROM, TO) \
201 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
203 // This function uses the order in the manuals, i.e. bit 0 is 2^0
204 #define GET_FIELD_SP(X, FROM, TO) \
205 GET_FIELD(X, 31 - (TO), 31 - (FROM))
207 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
208 #define GET_FIELD_SPs(x,a,b) sign_extend (GET_FIELD_SP(x,a,b), ((b) - (a) + 1))
210 #define UA2005_HTRAP_MASK 0xff
211 #define V8_TRAP_MASK 0x7f
213 #define IS_IMM (insn & (1<<13))
215 static void gen_update_fprs_dirty(DisasContext *dc, int rd)
217 #if defined(TARGET_SPARC64)
218 int bit = (rd < 32) ? 1 : 2;
219 /* If we know we've already set this bit within the TB,
220 we can avoid setting it again. */
221 if (!(dc->fprs_dirty & bit)) {
222 dc->fprs_dirty |= bit;
223 tcg_gen_ori_i32(cpu_fprs, cpu_fprs, bit);
228 /* floating point registers moves */
230 static int gen_offset_fpr_F(unsigned int reg)
234 tcg_debug_assert(reg < 32);
235 ret= offsetof(CPUSPARCState, fpr[reg / 2]);
237 ret += offsetof(CPU_DoubleU, l.lower);
239 ret += offsetof(CPU_DoubleU, l.upper);
244 static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src)
246 TCGv_i32 ret = tcg_temp_new_i32();
247 tcg_gen_ld_i32(ret, tcg_env, gen_offset_fpr_F(src));
251 static void gen_store_fpr_F(DisasContext *dc, unsigned int dst, TCGv_i32 v)
253 tcg_gen_st_i32(v, tcg_env, gen_offset_fpr_F(dst));
254 gen_update_fprs_dirty(dc, dst);
257 static int gen_offset_fpr_D(unsigned int reg)
259 tcg_debug_assert(reg < 64);
260 tcg_debug_assert(reg % 2 == 0);
261 return offsetof(CPUSPARCState, fpr[reg / 2]);
264 static TCGv_i64 gen_load_fpr_D(DisasContext *dc, unsigned int src)
266 TCGv_i64 ret = tcg_temp_new_i64();
267 tcg_gen_ld_i64(ret, tcg_env, gen_offset_fpr_D(src));
271 static void gen_store_fpr_D(DisasContext *dc, unsigned int dst, TCGv_i64 v)
273 tcg_gen_st_i64(v, tcg_env, gen_offset_fpr_D(dst));
274 gen_update_fprs_dirty(dc, dst);
277 static TCGv_i128 gen_load_fpr_Q(DisasContext *dc, unsigned int src)
279 TCGv_i128 ret = tcg_temp_new_i128();
280 TCGv_i64 h = gen_load_fpr_D(dc, src);
281 TCGv_i64 l = gen_load_fpr_D(dc, src + 2);
283 tcg_gen_concat_i64_i128(ret, l, h);
287 static void gen_store_fpr_Q(DisasContext *dc, unsigned int dst, TCGv_i128 v)
289 TCGv_i64 h = tcg_temp_new_i64();
290 TCGv_i64 l = tcg_temp_new_i64();
292 tcg_gen_extr_i128_i64(l, h, v);
293 gen_store_fpr_D(dc, dst, h);
294 gen_store_fpr_D(dc, dst + 2, l);
298 #ifdef CONFIG_USER_ONLY
299 #define supervisor(dc) 0
300 #define hypervisor(dc) 0
302 #ifdef TARGET_SPARC64
303 #define hypervisor(dc) (dc->hypervisor)
304 #define supervisor(dc) (dc->supervisor | dc->hypervisor)
306 #define supervisor(dc) (dc->supervisor)
307 #define hypervisor(dc) 0
311 #if !defined(TARGET_SPARC64)
312 # define AM_CHECK(dc) false
313 #elif defined(TARGET_ABI32)
314 # define AM_CHECK(dc) true
315 #elif defined(CONFIG_USER_ONLY)
316 # define AM_CHECK(dc) false
318 # define AM_CHECK(dc) ((dc)->address_mask_32bit)
321 static void gen_address_mask(DisasContext *dc, TCGv addr)
324 tcg_gen_andi_tl(addr, addr, 0xffffffffULL);
328 static target_ulong address_mask_i(DisasContext *dc, target_ulong addr)
330 return AM_CHECK(dc) ? (uint32_t)addr : addr;
333 static TCGv gen_load_gpr(DisasContext *dc, int reg)
337 return cpu_regs[reg];
339 TCGv t = tcg_temp_new();
340 tcg_gen_movi_tl(t, 0);
345 static void gen_store_gpr(DisasContext *dc, int reg, TCGv v)
349 tcg_gen_mov_tl(cpu_regs[reg], v);
353 static TCGv gen_dest_gpr(DisasContext *dc, int reg)
357 return cpu_regs[reg];
359 return tcg_temp_new();
363 static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc)
365 return translator_use_goto_tb(&s->base, pc) &&
366 translator_use_goto_tb(&s->base, npc);
369 static void gen_goto_tb(DisasContext *s, int tb_num,
370 target_ulong pc, target_ulong npc)
372 if (use_goto_tb(s, pc, npc)) {
373 /* jump to same page: we can use a direct jump */
374 tcg_gen_goto_tb(tb_num);
375 tcg_gen_movi_tl(cpu_pc, pc);
376 tcg_gen_movi_tl(cpu_npc, npc);
377 tcg_gen_exit_tb(s->base.tb, tb_num);
379 /* jump to another page: we can use an indirect jump */
380 tcg_gen_movi_tl(cpu_pc, pc);
381 tcg_gen_movi_tl(cpu_npc, npc);
382 tcg_gen_lookup_and_goto_ptr();
386 static TCGv gen_carry32(void)
388 if (TARGET_LONG_BITS == 64) {
389 TCGv t = tcg_temp_new();
390 tcg_gen_extract_tl(t, cpu_icc_C, 32, 1);
396 static void gen_op_addcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
398 TCGv z = tcg_constant_tl(0);
401 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
402 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
404 tcg_gen_add2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
406 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
407 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src2);
408 tcg_gen_andc_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
409 if (TARGET_LONG_BITS == 64) {
411 * Carry-in to bit 32 is result ^ src1 ^ src2.
412 * We already have the src xor term in Z, from computation of V.
414 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
415 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
417 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
418 tcg_gen_mov_tl(dst, cpu_cc_N);
421 static void gen_op_addcc(TCGv dst, TCGv src1, TCGv src2)
423 gen_op_addcc_int(dst, src1, src2, NULL);
426 static void gen_op_taddcc(TCGv dst, TCGv src1, TCGv src2)
428 TCGv t = tcg_temp_new();
430 /* Save the tag bits around modification of dst. */
431 tcg_gen_or_tl(t, src1, src2);
433 gen_op_addcc(dst, src1, src2);
435 /* Incorprate tag bits into icc.V */
436 tcg_gen_andi_tl(t, t, 3);
437 tcg_gen_neg_tl(t, t);
438 tcg_gen_ext32u_tl(t, t);
439 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
442 static void gen_op_addc(TCGv dst, TCGv src1, TCGv src2)
444 tcg_gen_add_tl(dst, src1, src2);
445 tcg_gen_add_tl(dst, dst, gen_carry32());
448 static void gen_op_addccc(TCGv dst, TCGv src1, TCGv src2)
450 gen_op_addcc_int(dst, src1, src2, gen_carry32());
453 static void gen_op_addxc(TCGv dst, TCGv src1, TCGv src2)
455 tcg_gen_add_tl(dst, src1, src2);
456 tcg_gen_add_tl(dst, dst, cpu_cc_C);
459 static void gen_op_addxccc(TCGv dst, TCGv src1, TCGv src2)
461 gen_op_addcc_int(dst, src1, src2, cpu_cc_C);
464 static void gen_op_subcc_int(TCGv dst, TCGv src1, TCGv src2, TCGv cin)
466 TCGv z = tcg_constant_tl(0);
469 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, cin, z);
470 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, cpu_cc_N, cpu_cc_C, src2, z);
472 tcg_gen_sub2_tl(cpu_cc_N, cpu_cc_C, src1, z, src2, z);
474 tcg_gen_neg_tl(cpu_cc_C, cpu_cc_C);
475 tcg_gen_xor_tl(cpu_cc_Z, src1, src2);
476 tcg_gen_xor_tl(cpu_cc_V, cpu_cc_N, src1);
477 tcg_gen_and_tl(cpu_cc_V, cpu_cc_V, cpu_cc_Z);
478 #ifdef TARGET_SPARC64
479 tcg_gen_xor_tl(cpu_icc_C, cpu_cc_Z, cpu_cc_N);
480 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
482 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
483 tcg_gen_mov_tl(dst, cpu_cc_N);
486 static void gen_op_subcc(TCGv dst, TCGv src1, TCGv src2)
488 gen_op_subcc_int(dst, src1, src2, NULL);
491 static void gen_op_tsubcc(TCGv dst, TCGv src1, TCGv src2)
493 TCGv t = tcg_temp_new();
495 /* Save the tag bits around modification of dst. */
496 tcg_gen_or_tl(t, src1, src2);
498 gen_op_subcc(dst, src1, src2);
500 /* Incorprate tag bits into icc.V */
501 tcg_gen_andi_tl(t, t, 3);
502 tcg_gen_neg_tl(t, t);
503 tcg_gen_ext32u_tl(t, t);
504 tcg_gen_or_tl(cpu_cc_V, cpu_cc_V, t);
507 static void gen_op_subc(TCGv dst, TCGv src1, TCGv src2)
509 tcg_gen_sub_tl(dst, src1, src2);
510 tcg_gen_sub_tl(dst, dst, gen_carry32());
513 static void gen_op_subccc(TCGv dst, TCGv src1, TCGv src2)
515 gen_op_subcc_int(dst, src1, src2, gen_carry32());
518 static void gen_op_subxc(TCGv dst, TCGv src1, TCGv src2)
520 tcg_gen_sub_tl(dst, src1, src2);
521 tcg_gen_sub_tl(dst, dst, cpu_cc_C);
524 static void gen_op_subxccc(TCGv dst, TCGv src1, TCGv src2)
526 gen_op_subcc_int(dst, src1, src2, cpu_cc_C);
529 static void gen_op_mulscc(TCGv dst, TCGv src1, TCGv src2)
531 TCGv zero = tcg_constant_tl(0);
532 TCGv one = tcg_constant_tl(1);
533 TCGv t_src1 = tcg_temp_new();
534 TCGv t_src2 = tcg_temp_new();
535 TCGv t0 = tcg_temp_new();
537 tcg_gen_ext32u_tl(t_src1, src1);
538 tcg_gen_ext32u_tl(t_src2, src2);
544 tcg_gen_movcond_tl(TCG_COND_TSTEQ, t_src2, cpu_y, one, zero, t_src2);
548 * y = (b2 << 31) | (y >> 1);
550 tcg_gen_extract_tl(t0, cpu_y, 1, 31);
551 tcg_gen_deposit_tl(cpu_y, t0, src1, 31, 1);
554 tcg_gen_xor_tl(t0, cpu_cc_N, cpu_cc_V);
557 * src1 = (b1 << 31) | (src1 >> 1)
559 tcg_gen_andi_tl(t0, t0, 1u << 31);
560 tcg_gen_shri_tl(t_src1, t_src1, 1);
561 tcg_gen_or_tl(t_src1, t_src1, t0);
563 gen_op_addcc(dst, t_src1, t_src2);
566 static void gen_op_multiply(TCGv dst, TCGv src1, TCGv src2, int sign_ext)
568 #if TARGET_LONG_BITS == 32
570 tcg_gen_muls2_tl(dst, cpu_y, src1, src2);
572 tcg_gen_mulu2_tl(dst, cpu_y, src1, src2);
575 TCGv t0 = tcg_temp_new_i64();
576 TCGv t1 = tcg_temp_new_i64();
579 tcg_gen_ext32s_i64(t0, src1);
580 tcg_gen_ext32s_i64(t1, src2);
582 tcg_gen_ext32u_i64(t0, src1);
583 tcg_gen_ext32u_i64(t1, src2);
586 tcg_gen_mul_i64(dst, t0, t1);
587 tcg_gen_shri_i64(cpu_y, dst, 32);
591 static void gen_op_umul(TCGv dst, TCGv src1, TCGv src2)
593 /* zero-extend truncated operands before multiplication */
594 gen_op_multiply(dst, src1, src2, 0);
597 static void gen_op_smul(TCGv dst, TCGv src1, TCGv src2)
599 /* sign-extend truncated operands before multiplication */
600 gen_op_multiply(dst, src1, src2, 1);
603 static void gen_op_umulxhi(TCGv dst, TCGv src1, TCGv src2)
605 TCGv discard = tcg_temp_new();
606 tcg_gen_mulu2_tl(discard, dst, src1, src2);
609 static void gen_op_fpmaddx(TCGv_i64 dst, TCGv_i64 src1,
610 TCGv_i64 src2, TCGv_i64 src3)
612 TCGv_i64 t = tcg_temp_new_i64();
614 tcg_gen_mul_i64(t, src1, src2);
615 tcg_gen_add_i64(dst, src3, t);
618 static void gen_op_fpmaddxhi(TCGv_i64 dst, TCGv_i64 src1,
619 TCGv_i64 src2, TCGv_i64 src3)
621 TCGv_i64 l = tcg_temp_new_i64();
622 TCGv_i64 h = tcg_temp_new_i64();
623 TCGv_i64 z = tcg_constant_i64(0);
625 tcg_gen_mulu2_i64(l, h, src1, src2);
626 tcg_gen_add2_i64(l, dst, l, h, src3, z);
629 static void gen_op_sdiv(TCGv dst, TCGv src1, TCGv src2)
631 #ifdef TARGET_SPARC64
632 gen_helper_sdiv(dst, tcg_env, src1, src2);
633 tcg_gen_ext32s_tl(dst, dst);
635 TCGv_i64 t64 = tcg_temp_new_i64();
636 gen_helper_sdiv(t64, tcg_env, src1, src2);
637 tcg_gen_trunc_i64_tl(dst, t64);
641 static void gen_op_udivcc(TCGv dst, TCGv src1, TCGv src2)
645 #ifdef TARGET_SPARC64
648 t64 = tcg_temp_new_i64();
651 gen_helper_udiv(t64, tcg_env, src1, src2);
653 #ifdef TARGET_SPARC64
654 tcg_gen_ext32u_tl(cpu_cc_N, t64);
655 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
656 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
657 tcg_gen_movi_tl(cpu_icc_C, 0);
659 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
661 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
662 tcg_gen_movi_tl(cpu_cc_C, 0);
663 tcg_gen_mov_tl(dst, cpu_cc_N);
666 static void gen_op_sdivcc(TCGv dst, TCGv src1, TCGv src2)
670 #ifdef TARGET_SPARC64
673 t64 = tcg_temp_new_i64();
676 gen_helper_sdiv(t64, tcg_env, src1, src2);
678 #ifdef TARGET_SPARC64
679 tcg_gen_ext32s_tl(cpu_cc_N, t64);
680 tcg_gen_shri_tl(cpu_cc_V, t64, 32);
681 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
682 tcg_gen_movi_tl(cpu_icc_C, 0);
684 tcg_gen_extr_i64_tl(cpu_cc_N, cpu_cc_V, t64);
686 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
687 tcg_gen_movi_tl(cpu_cc_C, 0);
688 tcg_gen_mov_tl(dst, cpu_cc_N);
691 static void gen_op_taddcctv(TCGv dst, TCGv src1, TCGv src2)
693 gen_helper_taddcctv(dst, tcg_env, src1, src2);
696 static void gen_op_tsubcctv(TCGv dst, TCGv src1, TCGv src2)
698 gen_helper_tsubcctv(dst, tcg_env, src1, src2);
701 static void gen_op_popc(TCGv dst, TCGv src1, TCGv src2)
703 tcg_gen_ctpop_tl(dst, src2);
706 static void gen_op_lzcnt(TCGv dst, TCGv src)
708 tcg_gen_clzi_tl(dst, src, TARGET_LONG_BITS);
711 #ifndef TARGET_SPARC64
712 static void gen_helper_array8(TCGv dst, TCGv src1, TCGv src2)
714 g_assert_not_reached();
718 static void gen_op_array16(TCGv dst, TCGv src1, TCGv src2)
720 gen_helper_array8(dst, src1, src2);
721 tcg_gen_shli_tl(dst, dst, 1);
724 static void gen_op_array32(TCGv dst, TCGv src1, TCGv src2)
726 gen_helper_array8(dst, src1, src2);
727 tcg_gen_shli_tl(dst, dst, 2);
730 static void gen_op_fpack16(TCGv_i32 dst, TCGv_i64 src)
732 #ifdef TARGET_SPARC64
733 gen_helper_fpack16(dst, cpu_gsr, src);
735 g_assert_not_reached();
739 static void gen_op_fpackfix(TCGv_i32 dst, TCGv_i64 src)
741 #ifdef TARGET_SPARC64
742 gen_helper_fpackfix(dst, cpu_gsr, src);
744 g_assert_not_reached();
748 static void gen_op_fpack32(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
750 #ifdef TARGET_SPARC64
751 gen_helper_fpack32(dst, cpu_gsr, src1, src2);
753 g_assert_not_reached();
757 static void gen_op_fpadds16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
761 for (int i = 0; i < 2; i++) {
762 TCGv_i32 u = tcg_temp_new_i32();
763 TCGv_i32 v = tcg_temp_new_i32();
765 tcg_gen_sextract_i32(u, src1, i * 16, 16);
766 tcg_gen_sextract_i32(v, src2, i * 16, 16);
767 tcg_gen_add_i32(u, u, v);
768 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
769 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
772 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
775 static void gen_op_fpsubs16s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
779 for (int i = 0; i < 2; i++) {
780 TCGv_i32 u = tcg_temp_new_i32();
781 TCGv_i32 v = tcg_temp_new_i32();
783 tcg_gen_sextract_i32(u, src1, i * 16, 16);
784 tcg_gen_sextract_i32(v, src2, i * 16, 16);
785 tcg_gen_sub_i32(u, u, v);
786 tcg_gen_smax_i32(u, u, tcg_constant_i32(INT16_MIN));
787 tcg_gen_smin_i32(u, u, tcg_constant_i32(INT16_MAX));
790 tcg_gen_deposit_i32(d, t[0], t[1], 16, 16);
793 static void gen_op_fpadds32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
795 TCGv_i32 r = tcg_temp_new_i32();
796 TCGv_i32 t = tcg_temp_new_i32();
797 TCGv_i32 v = tcg_temp_new_i32();
798 TCGv_i32 z = tcg_constant_i32(0);
800 tcg_gen_add_i32(r, src1, src2);
801 tcg_gen_xor_i32(t, src1, src2);
802 tcg_gen_xor_i32(v, r, src2);
803 tcg_gen_andc_i32(v, v, t);
805 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
806 tcg_gen_addi_i32(t, t, INT32_MAX);
808 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
811 static void gen_op_fpsubs32s(TCGv_i32 d, TCGv_i32 src1, TCGv_i32 src2)
813 TCGv_i32 r = tcg_temp_new_i32();
814 TCGv_i32 t = tcg_temp_new_i32();
815 TCGv_i32 v = tcg_temp_new_i32();
816 TCGv_i32 z = tcg_constant_i32(0);
818 tcg_gen_sub_i32(r, src1, src2);
819 tcg_gen_xor_i32(t, src1, src2);
820 tcg_gen_xor_i32(v, r, src1);
821 tcg_gen_and_i32(v, v, t);
823 tcg_gen_setcond_i32(TCG_COND_GE, t, r, z);
824 tcg_gen_addi_i32(t, t, INT32_MAX);
826 tcg_gen_movcond_i32(TCG_COND_LT, d, v, z, t, r);
829 static void gen_op_faligndata_i(TCGv_i64 dst, TCGv_i64 s1,
830 TCGv_i64 s2, TCGv gsr)
832 #ifdef TARGET_SPARC64
837 shift = tcg_temp_new();
839 tcg_gen_andi_tl(shift, gsr, 7);
840 tcg_gen_shli_tl(shift, shift, 3);
841 tcg_gen_shl_tl(t1, s1, shift);
844 * A shift of 64 does not produce 0 in TCG. Divide this into a
845 * shift of (up to 63) followed by a constant shift of 1.
847 tcg_gen_xori_tl(shift, shift, 63);
848 tcg_gen_shr_tl(t2, s2, shift);
849 tcg_gen_shri_tl(t2, t2, 1);
851 tcg_gen_or_tl(dst, t1, t2);
853 g_assert_not_reached();
857 static void gen_op_faligndata_g(TCGv_i64 dst, TCGv_i64 s1, TCGv_i64 s2)
859 gen_op_faligndata_i(dst, s1, s2, cpu_gsr);
862 static void gen_op_bshuffle(TCGv_i64 dst, TCGv_i64 src1, TCGv_i64 src2)
864 #ifdef TARGET_SPARC64
865 gen_helper_bshuffle(dst, cpu_gsr, src1, src2);
867 g_assert_not_reached();
871 static void gen_op_pdistn(TCGv dst, TCGv_i64 src1, TCGv_i64 src2)
873 #ifdef TARGET_SPARC64
874 gen_helper_pdist(dst, tcg_constant_i64(0), src1, src2);
876 g_assert_not_reached();
880 static void gen_op_fmul8x16al(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
882 tcg_gen_ext16s_i32(src2, src2);
883 gen_helper_fmul8x16a(dst, src1, src2);
886 static void gen_op_fmul8x16au(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
888 tcg_gen_sari_i32(src2, src2, 16);
889 gen_helper_fmul8x16a(dst, src1, src2);
892 static void gen_op_fmuld8ulx16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
894 TCGv_i32 t0 = tcg_temp_new_i32();
895 TCGv_i32 t1 = tcg_temp_new_i32();
896 TCGv_i32 t2 = tcg_temp_new_i32();
898 tcg_gen_ext8u_i32(t0, src1);
899 tcg_gen_ext16s_i32(t1, src2);
900 tcg_gen_mul_i32(t0, t0, t1);
902 tcg_gen_extract_i32(t1, src1, 16, 8);
903 tcg_gen_sextract_i32(t2, src2, 16, 16);
904 tcg_gen_mul_i32(t1, t1, t2);
906 tcg_gen_concat_i32_i64(dst, t0, t1);
909 static void gen_op_fmuld8sux16(TCGv_i64 dst, TCGv_i32 src1, TCGv_i32 src2)
911 TCGv_i32 t0 = tcg_temp_new_i32();
912 TCGv_i32 t1 = tcg_temp_new_i32();
913 TCGv_i32 t2 = tcg_temp_new_i32();
916 * The insn description talks about extracting the upper 8 bits
917 * of the signed 16-bit input rs1, performing the multiply, then
918 * shifting left by 8 bits. Instead, zap the lower 8 bits of
919 * the rs1 input, which avoids the need for two shifts.
921 tcg_gen_ext16s_i32(t0, src1);
922 tcg_gen_andi_i32(t0, t0, ~0xff);
923 tcg_gen_ext16s_i32(t1, src2);
924 tcg_gen_mul_i32(t0, t0, t1);
926 tcg_gen_sextract_i32(t1, src1, 16, 16);
927 tcg_gen_andi_i32(t1, t1, ~0xff);
928 tcg_gen_sextract_i32(t2, src2, 16, 16);
929 tcg_gen_mul_i32(t1, t1, t2);
931 tcg_gen_concat_i32_i64(dst, t0, t1);
934 #ifdef TARGET_SPARC64
935 static void gen_vec_fchksm16(unsigned vece, TCGv_vec dst,
936 TCGv_vec src1, TCGv_vec src2)
938 TCGv_vec a = tcg_temp_new_vec_matching(dst);
939 TCGv_vec c = tcg_temp_new_vec_matching(dst);
941 tcg_gen_add_vec(vece, a, src1, src2);
942 tcg_gen_cmp_vec(TCG_COND_LTU, vece, c, a, src1);
943 /* Vector cmp produces -1 for true, so subtract to add carry. */
944 tcg_gen_sub_vec(vece, dst, a, c);
947 static void gen_op_fchksm16(unsigned vece, uint32_t dofs, uint32_t aofs,
948 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
950 static const TCGOpcode vecop_list[] = {
951 INDEX_op_cmp_vec, INDEX_op_add_vec, INDEX_op_sub_vec,
953 static const GVecGen3 op = {
954 .fni8 = gen_helper_fchksm16,
955 .fniv = gen_vec_fchksm16,
956 .opt_opc = vecop_list,
959 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
962 static void gen_vec_fmean16(unsigned vece, TCGv_vec dst,
963 TCGv_vec src1, TCGv_vec src2)
965 TCGv_vec t = tcg_temp_new_vec_matching(dst);
967 tcg_gen_or_vec(vece, t, src1, src2);
968 tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(dst, vece, 1));
969 tcg_gen_sari_vec(vece, src1, src1, 1);
970 tcg_gen_sari_vec(vece, src2, src2, 1);
971 tcg_gen_add_vec(vece, dst, src1, src2);
972 tcg_gen_add_vec(vece, dst, dst, t);
975 static void gen_op_fmean16(unsigned vece, uint32_t dofs, uint32_t aofs,
976 uint32_t bofs, uint32_t oprsz, uint32_t maxsz)
978 static const TCGOpcode vecop_list[] = {
979 INDEX_op_add_vec, INDEX_op_sari_vec,
981 static const GVecGen3 op = {
982 .fni8 = gen_helper_fmean16,
983 .fniv = gen_vec_fmean16,
984 .opt_opc = vecop_list,
987 tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &op);
990 #define gen_op_fchksm16 ({ qemu_build_not_reached(); NULL; })
991 #define gen_op_fmean16 ({ qemu_build_not_reached(); NULL; })
994 static void finishing_insn(DisasContext *dc)
997 * From here, there is no future path through an unwinding exception.
998 * If the current insn cannot raise an exception, the computation of
999 * cpu_cond may be able to be elided.
1001 if (dc->cpu_cond_live) {
1002 tcg_gen_discard_tl(cpu_cond);
1003 dc->cpu_cond_live = false;
1007 static void gen_generic_branch(DisasContext *dc)
1009 TCGv npc0 = tcg_constant_tl(dc->jump_pc[0]);
1010 TCGv npc1 = tcg_constant_tl(dc->jump_pc[1]);
1011 TCGv c2 = tcg_constant_tl(dc->jump.c2);
1013 tcg_gen_movcond_tl(dc->jump.cond, cpu_npc, dc->jump.c1, c2, npc0, npc1);
1016 /* call this function before using the condition register as it may
1017 have been set for a jump */
1018 static void flush_cond(DisasContext *dc)
1020 if (dc->npc == JUMP_PC) {
1021 gen_generic_branch(dc);
1022 dc->npc = DYNAMIC_PC_LOOKUP;
1026 static void save_npc(DisasContext *dc)
1031 gen_generic_branch(dc);
1032 dc->npc = DYNAMIC_PC_LOOKUP;
1035 case DYNAMIC_PC_LOOKUP:
1038 g_assert_not_reached();
1041 tcg_gen_movi_tl(cpu_npc, dc->npc);
1045 static void save_state(DisasContext *dc)
1047 tcg_gen_movi_tl(cpu_pc, dc->pc);
1051 static void gen_exception(DisasContext *dc, int which)
1055 gen_helper_raise_exception(tcg_env, tcg_constant_i32(which));
1056 dc->base.is_jmp = DISAS_NORETURN;
1059 static TCGLabel *delay_exceptionv(DisasContext *dc, TCGv_i32 excp)
1061 DisasDelayException *e = g_new0(DisasDelayException, 1);
1063 e->next = dc->delay_excp_list;
1064 dc->delay_excp_list = e;
1066 e->lab = gen_new_label();
1069 /* Caller must have used flush_cond before branch. */
1070 assert(e->npc != JUMP_PC);
1076 static TCGLabel *delay_exception(DisasContext *dc, int excp)
1078 return delay_exceptionv(dc, tcg_constant_i32(excp));
1081 static void gen_check_align(DisasContext *dc, TCGv addr, int mask)
1083 TCGv t = tcg_temp_new();
1086 tcg_gen_andi_tl(t, addr, mask);
1089 lab = delay_exception(dc, TT_UNALIGNED);
1090 tcg_gen_brcondi_tl(TCG_COND_NE, t, 0, lab);
1093 static void gen_mov_pc_npc(DisasContext *dc)
1100 gen_generic_branch(dc);
1101 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1102 dc->pc = DYNAMIC_PC_LOOKUP;
1105 case DYNAMIC_PC_LOOKUP:
1106 tcg_gen_mov_tl(cpu_pc, cpu_npc);
1110 g_assert_not_reached();
1117 static void gen_compare(DisasCompare *cmp, bool xcc, unsigned int cond,
1122 cmp->c1 = t1 = tcg_temp_new();
1126 case 0x0: /* never */
1127 cmp->cond = TCG_COND_NEVER;
1128 cmp->c1 = tcg_constant_tl(0);
1131 case 0x1: /* eq: Z */
1132 cmp->cond = TCG_COND_EQ;
1133 if (TARGET_LONG_BITS == 32 || xcc) {
1134 tcg_gen_mov_tl(t1, cpu_cc_Z);
1136 tcg_gen_ext32u_tl(t1, cpu_icc_Z);
1140 case 0x2: /* le: Z | (N ^ V) */
1143 * cc_Z || (N ^ V) < 0 NE
1144 * cc_Z && !((N ^ V) < 0) EQ
1145 * cc_Z & ~((N ^ V) >> TLB) EQ
1147 cmp->cond = TCG_COND_EQ;
1148 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1149 tcg_gen_sextract_tl(t1, t1, xcc ? 63 : 31, 1);
1150 tcg_gen_andc_tl(t1, xcc ? cpu_cc_Z : cpu_icc_Z, t1);
1151 if (TARGET_LONG_BITS == 64 && !xcc) {
1152 tcg_gen_ext32u_tl(t1, t1);
1156 case 0x3: /* lt: N ^ V */
1157 cmp->cond = TCG_COND_LT;
1158 tcg_gen_xor_tl(t1, cpu_cc_N, cpu_cc_V);
1159 if (TARGET_LONG_BITS == 64 && !xcc) {
1160 tcg_gen_ext32s_tl(t1, t1);
1164 case 0x4: /* leu: Z | C */
1167 * cc_Z == 0 || cc_C != 0 NE
1168 * cc_Z != 0 && cc_C == 0 EQ
1169 * cc_Z & (cc_C ? 0 : -1) EQ
1170 * cc_Z & (cc_C - 1) EQ
1172 cmp->cond = TCG_COND_EQ;
1173 if (TARGET_LONG_BITS == 32 || xcc) {
1174 tcg_gen_subi_tl(t1, cpu_cc_C, 1);
1175 tcg_gen_and_tl(t1, t1, cpu_cc_Z);
1177 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1178 tcg_gen_subi_tl(t1, t1, 1);
1179 tcg_gen_and_tl(t1, t1, cpu_icc_Z);
1180 tcg_gen_ext32u_tl(t1, t1);
1184 case 0x5: /* ltu: C */
1185 cmp->cond = TCG_COND_NE;
1186 if (TARGET_LONG_BITS == 32 || xcc) {
1187 tcg_gen_mov_tl(t1, cpu_cc_C);
1189 tcg_gen_extract_tl(t1, cpu_icc_C, 32, 1);
1193 case 0x6: /* neg: N */
1194 cmp->cond = TCG_COND_LT;
1195 if (TARGET_LONG_BITS == 32 || xcc) {
1196 tcg_gen_mov_tl(t1, cpu_cc_N);
1198 tcg_gen_ext32s_tl(t1, cpu_cc_N);
1202 case 0x7: /* vs: V */
1203 cmp->cond = TCG_COND_LT;
1204 if (TARGET_LONG_BITS == 32 || xcc) {
1205 tcg_gen_mov_tl(t1, cpu_cc_V);
1207 tcg_gen_ext32s_tl(t1, cpu_cc_V);
1212 cmp->cond = tcg_invert_cond(cmp->cond);
1216 static void gen_fcompare(DisasCompare *cmp, unsigned int cc, unsigned int cond)
1218 TCGv_i32 fcc = cpu_fcc[cc];
1232 tcond = TCG_COND_NEVER;
1234 case 0x1: /* fbne : !0 */
1235 tcond = TCG_COND_NE;
1237 case 0x2: /* fblg : 1 or 2 */
1238 /* fcc in {1,2} - 1 -> fcc in {0,1} */
1239 c1 = tcg_temp_new_i32();
1240 tcg_gen_addi_i32(c1, fcc, -1);
1242 tcond = TCG_COND_LEU;
1244 case 0x3: /* fbul : 1 or 3 */
1245 c1 = tcg_temp_new_i32();
1246 tcg_gen_andi_i32(c1, fcc, 1);
1247 tcond = TCG_COND_NE;
1249 case 0x4: /* fbl : 1 */
1251 tcond = TCG_COND_EQ;
1253 case 0x5: /* fbug : 2 or 3 */
1255 tcond = TCG_COND_GEU;
1257 case 0x6: /* fbg : 2 */
1259 tcond = TCG_COND_EQ;
1261 case 0x7: /* fbu : 3 */
1263 tcond = TCG_COND_EQ;
1267 tcond = tcg_invert_cond(tcond);
1272 cmp->c1 = tcg_temp_new();
1273 tcg_gen_extu_i32_tl(cmp->c1, c1);
1276 static bool gen_compare_reg(DisasCompare *cmp, int cond, TCGv r_src)
1278 static const TCGCond cond_reg[4] = {
1279 TCG_COND_NEVER, /* reserved */
1286 if ((cond & 3) == 0) {
1289 tcond = cond_reg[cond & 3];
1291 tcond = tcg_invert_cond(tcond);
1295 cmp->c1 = tcg_temp_new();
1297 tcg_gen_mov_tl(cmp->c1, r_src);
1301 static void gen_op_clear_ieee_excp_and_FTT(void)
1303 tcg_gen_st_i32(tcg_constant_i32(0), tcg_env,
1304 offsetof(CPUSPARCState, fsr_cexc_ftt));
1307 static void gen_op_fmovs(TCGv_i32 dst, TCGv_i32 src)
1309 gen_op_clear_ieee_excp_and_FTT();
1310 tcg_gen_mov_i32(dst, src);
1313 static void gen_op_fnegs(TCGv_i32 dst, TCGv_i32 src)
1315 gen_op_clear_ieee_excp_and_FTT();
1316 tcg_gen_xori_i32(dst, src, 1u << 31);
1319 static void gen_op_fabss(TCGv_i32 dst, TCGv_i32 src)
1321 gen_op_clear_ieee_excp_and_FTT();
1322 tcg_gen_andi_i32(dst, src, ~(1u << 31));
1325 static void gen_op_fmovd(TCGv_i64 dst, TCGv_i64 src)
1327 gen_op_clear_ieee_excp_and_FTT();
1328 tcg_gen_mov_i64(dst, src);
1331 static void gen_op_fnegd(TCGv_i64 dst, TCGv_i64 src)
1333 gen_op_clear_ieee_excp_and_FTT();
1334 tcg_gen_xori_i64(dst, src, 1ull << 63);
1337 static void gen_op_fabsd(TCGv_i64 dst, TCGv_i64 src)
1339 gen_op_clear_ieee_excp_and_FTT();
1340 tcg_gen_andi_i64(dst, src, ~(1ull << 63));
1343 static void gen_op_fnegq(TCGv_i128 dst, TCGv_i128 src)
1345 TCGv_i64 l = tcg_temp_new_i64();
1346 TCGv_i64 h = tcg_temp_new_i64();
1348 tcg_gen_extr_i128_i64(l, h, src);
1349 tcg_gen_xori_i64(h, h, 1ull << 63);
1350 tcg_gen_concat_i64_i128(dst, l, h);
1353 static void gen_op_fabsq(TCGv_i128 dst, TCGv_i128 src)
1355 TCGv_i64 l = tcg_temp_new_i64();
1356 TCGv_i64 h = tcg_temp_new_i64();
1358 tcg_gen_extr_i128_i64(l, h, src);
1359 tcg_gen_andi_i64(h, h, ~(1ull << 63));
1360 tcg_gen_concat_i64_i128(dst, l, h);
1363 static void gen_op_fmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1365 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1368 static void gen_op_fmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1370 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(0));
1373 static void gen_op_fmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1375 int op = float_muladd_negate_c;
1376 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1379 static void gen_op_fmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1381 int op = float_muladd_negate_c;
1382 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1385 static void gen_op_fnmsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1387 int op = float_muladd_negate_c | float_muladd_negate_result;
1388 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1391 static void gen_op_fnmsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1393 int op = float_muladd_negate_c | float_muladd_negate_result;
1394 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1397 static void gen_op_fnmadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2, TCGv_i32 s3)
1399 int op = float_muladd_negate_result;
1400 gen_helper_fmadds(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1403 static void gen_op_fnmaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2, TCGv_i64 s3)
1405 int op = float_muladd_negate_result;
1406 gen_helper_fmaddd(d, tcg_env, s1, s2, s3, tcg_constant_i32(op));
1409 /* Use muladd to compute (1 * src1) + src2 / 2 with one rounding. */
1410 static void gen_op_fhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1412 TCGv_i32 one = tcg_constant_i32(float32_one);
1413 int op = float_muladd_halve_result;
1414 gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1417 static void gen_op_fhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1419 TCGv_i64 one = tcg_constant_i64(float64_one);
1420 int op = float_muladd_halve_result;
1421 gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1424 /* Use muladd to compute (1 * src1) - src2 / 2 with one rounding. */
1425 static void gen_op_fhsubs(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1427 TCGv_i32 one = tcg_constant_i32(float32_one);
1428 int op = float_muladd_negate_c | float_muladd_halve_result;
1429 gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1432 static void gen_op_fhsubd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1434 TCGv_i64 one = tcg_constant_i64(float64_one);
1435 int op = float_muladd_negate_c | float_muladd_halve_result;
1436 gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1439 /* Use muladd to compute -((1 * src1) + src2 / 2) with one rounding. */
1440 static void gen_op_fnhadds(TCGv_i32 d, TCGv_i32 s1, TCGv_i32 s2)
1442 TCGv_i32 one = tcg_constant_i32(float32_one);
1443 int op = float_muladd_negate_result | float_muladd_halve_result;
1444 gen_helper_fmadds(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1447 static void gen_op_fnhaddd(TCGv_i64 d, TCGv_i64 s1, TCGv_i64 s2)
1449 TCGv_i64 one = tcg_constant_i64(float64_one);
1450 int op = float_muladd_negate_result | float_muladd_halve_result;
1451 gen_helper_fmaddd(d, tcg_env, one, s1, s2, tcg_constant_i32(op));
1454 static void gen_op_fpexception_im(DisasContext *dc, int ftt)
1457 * CEXC is only set when succesfully completing an FPop,
1458 * or when raising FSR_FTT_IEEE_EXCP, i.e. check_ieee_exception.
1459 * Thus we can simply store FTT into this field.
1461 tcg_gen_st_i32(tcg_constant_i32(ftt), tcg_env,
1462 offsetof(CPUSPARCState, fsr_cexc_ftt));
1463 gen_exception(dc, TT_FP_EXCP);
1466 static int gen_trap_ifnofpu(DisasContext *dc)
1468 #if !defined(CONFIG_USER_ONLY)
1469 if (!dc->fpu_enabled) {
1470 gen_exception(dc, TT_NFPU_INSN);
1499 * For asi == -1, treat as non-asi.
1500 * For ask == -2, treat as immediate offset (v8 error, v9 %asi).
1502 static DisasASI resolve_asi(DisasContext *dc, int asi, MemOp memop)
1504 ASIType type = GET_ASI_HELPER;
1505 int mem_idx = dc->mem_idx;
1508 /* Artificial "non-asi" case. */
1509 type = GET_ASI_DIRECT;
1513 #ifndef TARGET_SPARC64
1514 /* Before v9, all asis are immediate and privileged. */
1516 gen_exception(dc, TT_ILL_INSN);
1517 type = GET_ASI_EXCP;
1518 } else if (supervisor(dc)
1519 /* Note that LEON accepts ASI_USERDATA in user mode, for
1520 use with CASA. Also note that previous versions of
1521 QEMU allowed (and old versions of gcc emitted) ASI_P
1522 for LEON, which is incorrect. */
1523 || (asi == ASI_USERDATA
1524 && (dc->def->features & CPU_FEATURE_CASA))) {
1526 case ASI_USERDATA: /* User data access */
1527 mem_idx = MMU_USER_IDX;
1528 type = GET_ASI_DIRECT;
1530 case ASI_KERNELDATA: /* Supervisor data access */
1531 mem_idx = MMU_KERNEL_IDX;
1532 type = GET_ASI_DIRECT;
1534 case ASI_USERTXT: /* User text access */
1535 mem_idx = MMU_USER_IDX;
1536 type = GET_ASI_CODE;
1538 case ASI_KERNELTXT: /* Supervisor text access */
1539 mem_idx = MMU_KERNEL_IDX;
1540 type = GET_ASI_CODE;
1542 case ASI_M_BYPASS: /* MMU passthrough */
1543 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1544 mem_idx = MMU_PHYS_IDX;
1545 type = GET_ASI_DIRECT;
1547 case ASI_M_BCOPY: /* Block copy, sta access */
1548 mem_idx = MMU_KERNEL_IDX;
1549 type = GET_ASI_BCOPY;
1551 case ASI_M_BFILL: /* Block fill, stda access */
1552 mem_idx = MMU_KERNEL_IDX;
1553 type = GET_ASI_BFILL;
1557 /* MMU_PHYS_IDX is used when the MMU is disabled to passthrough the
1558 * permissions check in get_physical_address(..).
1560 mem_idx = (dc->mem_idx == MMU_PHYS_IDX) ? MMU_PHYS_IDX : mem_idx;
1562 gen_exception(dc, TT_PRIV_INSN);
1563 type = GET_ASI_EXCP;
1569 /* With v9, all asis below 0x80 are privileged. */
1570 /* ??? We ought to check cpu_has_hypervisor, but we didn't copy
1571 down that bit into DisasContext. For the moment that's ok,
1572 since the direct implementations below doesn't have any ASIs
1573 in the restricted [0x30, 0x7f] range, and the check will be
1574 done properly in the helper. */
1575 if (!supervisor(dc) && asi < 0x80) {
1576 gen_exception(dc, TT_PRIV_ACT);
1577 type = GET_ASI_EXCP;
1580 case ASI_REAL: /* Bypass */
1581 case ASI_REAL_IO: /* Bypass, non-cacheable */
1582 case ASI_REAL_L: /* Bypass LE */
1583 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1584 case ASI_TWINX_REAL: /* Real address, twinx */
1585 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1586 case ASI_QUAD_LDD_PHYS:
1587 case ASI_QUAD_LDD_PHYS_L:
1588 mem_idx = MMU_PHYS_IDX;
1590 case ASI_N: /* Nucleus */
1591 case ASI_NL: /* Nucleus LE */
1594 case ASI_NUCLEUS_QUAD_LDD:
1595 case ASI_NUCLEUS_QUAD_LDD_L:
1596 if (hypervisor(dc)) {
1597 mem_idx = MMU_PHYS_IDX;
1599 mem_idx = MMU_NUCLEUS_IDX;
1602 case ASI_AIUP: /* As if user primary */
1603 case ASI_AIUPL: /* As if user primary LE */
1604 case ASI_TWINX_AIUP:
1605 case ASI_TWINX_AIUP_L:
1606 case ASI_BLK_AIUP_4V:
1607 case ASI_BLK_AIUP_L_4V:
1611 mem_idx = MMU_USER_IDX;
1613 case ASI_AIUS: /* As if user secondary */
1614 case ASI_AIUSL: /* As if user secondary LE */
1615 case ASI_TWINX_AIUS:
1616 case ASI_TWINX_AIUS_L:
1617 case ASI_BLK_AIUS_4V:
1618 case ASI_BLK_AIUS_L_4V:
1622 mem_idx = MMU_USER_SECONDARY_IDX;
1624 case ASI_S: /* Secondary */
1625 case ASI_SL: /* Secondary LE */
1628 case ASI_BLK_COMMIT_S:
1636 if (mem_idx == MMU_USER_IDX) {
1637 mem_idx = MMU_USER_SECONDARY_IDX;
1638 } else if (mem_idx == MMU_KERNEL_IDX) {
1639 mem_idx = MMU_KERNEL_SECONDARY_IDX;
1642 case ASI_P: /* Primary */
1643 case ASI_PL: /* Primary LE */
1646 case ASI_BLK_COMMIT_P:
1675 type = GET_ASI_DIRECT;
1677 case ASI_TWINX_REAL:
1678 case ASI_TWINX_REAL_L:
1681 case ASI_TWINX_AIUP:
1682 case ASI_TWINX_AIUP_L:
1683 case ASI_TWINX_AIUS:
1684 case ASI_TWINX_AIUS_L:
1689 case ASI_QUAD_LDD_PHYS:
1690 case ASI_QUAD_LDD_PHYS_L:
1691 case ASI_NUCLEUS_QUAD_LDD:
1692 case ASI_NUCLEUS_QUAD_LDD_L:
1693 type = GET_ASI_DTWINX;
1695 case ASI_BLK_COMMIT_P:
1696 case ASI_BLK_COMMIT_S:
1697 case ASI_BLK_AIUP_4V:
1698 case ASI_BLK_AIUP_L_4V:
1701 case ASI_BLK_AIUS_4V:
1702 case ASI_BLK_AIUS_L_4V:
1709 type = GET_ASI_BLOCK;
1716 type = GET_ASI_SHORT;
1723 type = GET_ASI_SHORT;
1726 /* The little-endian asis all have bit 3 set. */
1734 return (DisasASI){ type, asi, mem_idx, memop };
1737 #if defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1738 static void gen_helper_ld_asi(TCGv_i64 r, TCGv_env e, TCGv a,
1739 TCGv_i32 asi, TCGv_i32 mop)
1741 g_assert_not_reached();
1744 static void gen_helper_st_asi(TCGv_env e, TCGv a, TCGv_i64 r,
1745 TCGv_i32 asi, TCGv_i32 mop)
1747 g_assert_not_reached();
1751 static void gen_ld_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1756 case GET_ASI_DTWINX: /* Reserved for ldda. */
1757 gen_exception(dc, TT_ILL_INSN);
1759 case GET_ASI_DIRECT:
1760 tcg_gen_qemu_ld_tl(dst, addr, da->mem_idx, da->memop | MO_ALIGN);
1764 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
1766 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
1767 TCGv_i64 t64 = tcg_temp_new_i64();
1769 gen_helper_ld_code(t64, tcg_env, addr, tcg_constant_i32(oi));
1770 tcg_gen_trunc_i64_tl(dst, t64);
1774 g_assert_not_reached();
1779 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1780 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1783 #ifdef TARGET_SPARC64
1784 gen_helper_ld_asi(dst, tcg_env, addr, r_asi, r_mop);
1787 TCGv_i64 t64 = tcg_temp_new_i64();
1788 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1789 tcg_gen_trunc_i64_tl(dst, t64);
1797 static void gen_st_asi(DisasContext *dc, DisasASI *da, TCGv src, TCGv addr)
1803 case GET_ASI_DTWINX: /* Reserved for stda. */
1804 if (TARGET_LONG_BITS == 32) {
1805 gen_exception(dc, TT_ILL_INSN);
1807 } else if (!(dc->def->features & CPU_FEATURE_HYPV)) {
1808 /* Pre OpenSPARC CPUs don't have these */
1809 gen_exception(dc, TT_ILL_INSN);
1812 /* In OpenSPARC T1+ CPUs TWINX ASIs in store are ST_BLKINIT_ ASIs */
1815 case GET_ASI_DIRECT:
1816 tcg_gen_qemu_st_tl(src, addr, da->mem_idx, da->memop | MO_ALIGN);
1820 assert(TARGET_LONG_BITS == 32);
1822 * Copy 32 bytes from the address in SRC to ADDR.
1824 * From Ross RT625 hyperSPARC manual, section 4.6:
1825 * "Block Copy and Block Fill will work only on cache line boundaries."
1827 * It does not specify if an unaliged address is truncated or trapped.
1828 * Previous qemu behaviour was to truncate to 4 byte alignment, which
1829 * is obviously wrong. The only place I can see this used is in the
1830 * Linux kernel which begins with page alignment, advancing by 32,
1831 * so is always aligned. Assume truncation as the simpler option.
1833 * Since the loads and stores are paired, allow the copy to happen
1834 * in the host endianness. The copy need not be atomic.
1837 MemOp mop = MO_128 | MO_ATOM_IFALIGN_PAIR;
1838 TCGv saddr = tcg_temp_new();
1839 TCGv daddr = tcg_temp_new();
1840 TCGv_i128 tmp = tcg_temp_new_i128();
1842 tcg_gen_andi_tl(saddr, src, -32);
1843 tcg_gen_andi_tl(daddr, addr, -32);
1844 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1845 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1846 tcg_gen_addi_tl(saddr, saddr, 16);
1847 tcg_gen_addi_tl(daddr, daddr, 16);
1848 tcg_gen_qemu_ld_i128(tmp, saddr, da->mem_idx, mop);
1849 tcg_gen_qemu_st_i128(tmp, daddr, da->mem_idx, mop);
1855 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1856 TCGv_i32 r_mop = tcg_constant_i32(da->memop | MO_ALIGN);
1859 #ifdef TARGET_SPARC64
1860 gen_helper_st_asi(tcg_env, addr, src, r_asi, r_mop);
1863 TCGv_i64 t64 = tcg_temp_new_i64();
1864 tcg_gen_extu_tl_i64(t64, src);
1865 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
1869 /* A write to a TLB register may alter page maps. End the TB. */
1870 dc->npc = DYNAMIC_PC;
1876 static void gen_swap_asi(DisasContext *dc, DisasASI *da,
1877 TCGv dst, TCGv src, TCGv addr)
1882 case GET_ASI_DIRECT:
1883 tcg_gen_atomic_xchg_tl(dst, addr, src,
1884 da->mem_idx, da->memop | MO_ALIGN);
1887 /* ??? Should be DAE_invalid_asi. */
1888 gen_exception(dc, TT_DATA_ACCESS);
1893 static void gen_cas_asi(DisasContext *dc, DisasASI *da,
1894 TCGv oldv, TCGv newv, TCGv cmpv, TCGv addr)
1899 case GET_ASI_DIRECT:
1900 tcg_gen_atomic_cmpxchg_tl(oldv, addr, cmpv, newv,
1901 da->mem_idx, da->memop | MO_ALIGN);
1904 /* ??? Should be DAE_invalid_asi. */
1905 gen_exception(dc, TT_DATA_ACCESS);
1910 static void gen_ldstub_asi(DisasContext *dc, DisasASI *da, TCGv dst, TCGv addr)
1915 case GET_ASI_DIRECT:
1916 tcg_gen_atomic_xchg_tl(dst, addr, tcg_constant_tl(0xff),
1917 da->mem_idx, MO_UB);
1920 /* ??? In theory, this should be raise DAE_invalid_asi.
1921 But the SS-20 roms do ldstuba [%l0] #ASI_M_CTL, %o1. */
1922 if (tb_cflags(dc->base.tb) & CF_PARALLEL) {
1923 gen_helper_exit_atomic(tcg_env);
1925 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
1926 TCGv_i32 r_mop = tcg_constant_i32(MO_UB);
1930 t64 = tcg_temp_new_i64();
1931 gen_helper_ld_asi(t64, tcg_env, addr, r_asi, r_mop);
1933 s64 = tcg_constant_i64(0xff);
1934 gen_helper_st_asi(tcg_env, addr, s64, r_asi, r_mop);
1936 tcg_gen_trunc_i64_tl(dst, t64);
1939 dc->npc = DYNAMIC_PC;
1945 static void gen_ldf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
1948 MemOp memop = da->memop;
1949 MemOp size = memop & MO_SIZE;
1954 /* TODO: Use 128-bit load/store below. */
1955 if (size == MO_128) {
1956 memop = (memop & ~MO_SIZE) | MO_64;
1963 case GET_ASI_DIRECT:
1964 memop |= MO_ALIGN_4;
1967 d32 = tcg_temp_new_i32();
1968 tcg_gen_qemu_ld_i32(d32, addr, da->mem_idx, memop);
1969 gen_store_fpr_F(dc, rd, d32);
1973 d64 = tcg_temp_new_i64();
1974 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1975 gen_store_fpr_D(dc, rd, d64);
1979 d64 = tcg_temp_new_i64();
1980 l64 = tcg_temp_new_i64();
1981 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop);
1982 addr_tmp = tcg_temp_new();
1983 tcg_gen_addi_tl(addr_tmp, addr, 8);
1984 tcg_gen_qemu_ld_i64(l64, addr_tmp, da->mem_idx, memop);
1985 gen_store_fpr_D(dc, rd, d64);
1986 gen_store_fpr_D(dc, rd + 2, l64);
1989 g_assert_not_reached();
1994 /* Valid for lddfa on aligned registers only. */
1995 if (orig_size == MO_64 && (rd & 7) == 0) {
1996 /* The first operation checks required alignment. */
1997 addr_tmp = tcg_temp_new();
1998 d64 = tcg_temp_new_i64();
1999 for (int i = 0; ; ++i) {
2000 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx,
2001 memop | (i == 0 ? MO_ALIGN_64 : 0));
2002 gen_store_fpr_D(dc, rd + 2 * i, d64);
2006 tcg_gen_addi_tl(addr_tmp, addr, 8);
2010 gen_exception(dc, TT_ILL_INSN);
2015 /* Valid for lddfa only. */
2016 if (orig_size == MO_64) {
2017 d64 = tcg_temp_new_i64();
2018 tcg_gen_qemu_ld_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2019 gen_store_fpr_D(dc, rd, d64);
2021 gen_exception(dc, TT_ILL_INSN);
2027 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2028 TCGv_i32 r_mop = tcg_constant_i32(memop | MO_ALIGN);
2031 /* According to the table in the UA2011 manual, the only
2032 other asis that are valid for ldfa/lddfa/ldqfa are
2033 the NO_FAULT asis. We still need a helper for these,
2034 but we can just use the integer asi helper for them. */
2037 d64 = tcg_temp_new_i64();
2038 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2039 d32 = tcg_temp_new_i32();
2040 tcg_gen_extrl_i64_i32(d32, d64);
2041 gen_store_fpr_F(dc, rd, d32);
2044 d64 = tcg_temp_new_i64();
2045 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2046 gen_store_fpr_D(dc, rd, d64);
2049 d64 = tcg_temp_new_i64();
2050 l64 = tcg_temp_new_i64();
2051 gen_helper_ld_asi(d64, tcg_env, addr, r_asi, r_mop);
2052 addr_tmp = tcg_temp_new();
2053 tcg_gen_addi_tl(addr_tmp, addr, 8);
2054 gen_helper_ld_asi(l64, tcg_env, addr_tmp, r_asi, r_mop);
2055 gen_store_fpr_D(dc, rd, d64);
2056 gen_store_fpr_D(dc, rd + 2, l64);
2059 g_assert_not_reached();
2066 static void gen_stf_asi(DisasContext *dc, DisasASI *da, MemOp orig_size,
2069 MemOp memop = da->memop;
2070 MemOp size = memop & MO_SIZE;
2075 /* TODO: Use 128-bit load/store below. */
2076 if (size == MO_128) {
2077 memop = (memop & ~MO_SIZE) | MO_64;
2084 case GET_ASI_DIRECT:
2085 memop |= MO_ALIGN_4;
2088 d32 = gen_load_fpr_F(dc, rd);
2089 tcg_gen_qemu_st_i32(d32, addr, da->mem_idx, memop | MO_ALIGN);
2092 d64 = gen_load_fpr_D(dc, rd);
2093 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_4);
2096 /* Only 4-byte alignment required. However, it is legal for the
2097 cpu to signal the alignment fault, and the OS trap handler is
2098 required to fix it up. Requiring 16-byte alignment here avoids
2099 having to probe the second page before performing the first
2101 d64 = gen_load_fpr_D(dc, rd);
2102 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN_16);
2103 addr_tmp = tcg_temp_new();
2104 tcg_gen_addi_tl(addr_tmp, addr, 8);
2105 d64 = gen_load_fpr_D(dc, rd + 2);
2106 tcg_gen_qemu_st_i64(d64, addr_tmp, da->mem_idx, memop);
2109 g_assert_not_reached();
2114 /* Valid for stdfa on aligned registers only. */
2115 if (orig_size == MO_64 && (rd & 7) == 0) {
2116 /* The first operation checks required alignment. */
2117 addr_tmp = tcg_temp_new();
2118 for (int i = 0; ; ++i) {
2119 d64 = gen_load_fpr_D(dc, rd + 2 * i);
2120 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx,
2121 memop | (i == 0 ? MO_ALIGN_64 : 0));
2125 tcg_gen_addi_tl(addr_tmp, addr, 8);
2129 gen_exception(dc, TT_ILL_INSN);
2134 /* Valid for stdfa only. */
2135 if (orig_size == MO_64) {
2136 d64 = gen_load_fpr_D(dc, rd);
2137 tcg_gen_qemu_st_i64(d64, addr, da->mem_idx, memop | MO_ALIGN);
2139 gen_exception(dc, TT_ILL_INSN);
2144 /* According to the table in the UA2011 manual, the only
2145 other asis that are valid for ldfa/lddfa/ldqfa are
2146 the PST* asis, which aren't currently handled. */
2147 gen_exception(dc, TT_ILL_INSN);
2152 static void gen_ldda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2154 TCGv hi = gen_dest_gpr(dc, rd);
2155 TCGv lo = gen_dest_gpr(dc, rd + 1);
2161 case GET_ASI_DTWINX:
2162 #ifdef TARGET_SPARC64
2164 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2165 TCGv_i128 t = tcg_temp_new_i128();
2167 tcg_gen_qemu_ld_i128(t, addr, da->mem_idx, mop);
2169 * Note that LE twinx acts as if each 64-bit register result is
2170 * byte swapped. We perform one 128-bit LE load, so must swap
2171 * the order of the writebacks.
2173 if ((mop & MO_BSWAP) == MO_TE) {
2174 tcg_gen_extr_i128_i64(lo, hi, t);
2176 tcg_gen_extr_i128_i64(hi, lo, t);
2181 g_assert_not_reached();
2184 case GET_ASI_DIRECT:
2186 TCGv_i64 tmp = tcg_temp_new_i64();
2188 tcg_gen_qemu_ld_i64(tmp, addr, da->mem_idx, da->memop | MO_ALIGN);
2190 /* Note that LE ldda acts as if each 32-bit register
2191 result is byte swapped. Having just performed one
2192 64-bit bswap, we need now to swap the writebacks. */
2193 if ((da->memop & MO_BSWAP) == MO_TE) {
2194 tcg_gen_extr_i64_tl(lo, hi, tmp);
2196 tcg_gen_extr_i64_tl(hi, lo, tmp);
2202 #if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
2204 MemOpIdx oi = make_memop_idx(da->memop, da->mem_idx);
2205 TCGv_i64 tmp = tcg_temp_new_i64();
2207 gen_helper_ld_code(tmp, tcg_env, addr, tcg_constant_i32(oi));
2210 if ((da->memop & MO_BSWAP) == MO_TE) {
2211 tcg_gen_extr_i64_tl(lo, hi, tmp);
2213 tcg_gen_extr_i64_tl(hi, lo, tmp);
2218 g_assert_not_reached();
2222 /* ??? In theory we've handled all of the ASIs that are valid
2223 for ldda, and this should raise DAE_invalid_asi. However,
2224 real hardware allows others. This can be seen with e.g.
2225 FreeBSD 10.3 wrt ASI_IC_TAG. */
2227 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2228 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2229 TCGv_i64 tmp = tcg_temp_new_i64();
2232 gen_helper_ld_asi(tmp, tcg_env, addr, r_asi, r_mop);
2235 if ((da->memop & MO_BSWAP) == MO_TE) {
2236 tcg_gen_extr_i64_tl(lo, hi, tmp);
2238 tcg_gen_extr_i64_tl(hi, lo, tmp);
2244 gen_store_gpr(dc, rd, hi);
2245 gen_store_gpr(dc, rd + 1, lo);
2248 static void gen_stda_asi(DisasContext *dc, DisasASI *da, TCGv addr, int rd)
2250 TCGv hi = gen_load_gpr(dc, rd);
2251 TCGv lo = gen_load_gpr(dc, rd + 1);
2257 case GET_ASI_DTWINX:
2258 #ifdef TARGET_SPARC64
2260 MemOp mop = (da->memop & MO_BSWAP) | MO_128 | MO_ALIGN_16;
2261 TCGv_i128 t = tcg_temp_new_i128();
2264 * Note that LE twinx acts as if each 64-bit register result is
2265 * byte swapped. We perform one 128-bit LE store, so must swap
2266 * the order of the construction.
2268 if ((mop & MO_BSWAP) == MO_TE) {
2269 tcg_gen_concat_i64_i128(t, lo, hi);
2271 tcg_gen_concat_i64_i128(t, hi, lo);
2273 tcg_gen_qemu_st_i128(t, addr, da->mem_idx, mop);
2277 g_assert_not_reached();
2280 case GET_ASI_DIRECT:
2282 TCGv_i64 t64 = tcg_temp_new_i64();
2284 /* Note that LE stda acts as if each 32-bit register result is
2285 byte swapped. We will perform one 64-bit LE store, so now
2286 we must swap the order of the construction. */
2287 if ((da->memop & MO_BSWAP) == MO_TE) {
2288 tcg_gen_concat_tl_i64(t64, lo, hi);
2290 tcg_gen_concat_tl_i64(t64, hi, lo);
2292 tcg_gen_qemu_st_i64(t64, addr, da->mem_idx, da->memop | MO_ALIGN);
2297 assert(TARGET_LONG_BITS == 32);
2299 * Store 32 bytes of [rd:rd+1] to ADDR.
2300 * See comments for GET_ASI_COPY above.
2303 MemOp mop = MO_TE | MO_128 | MO_ATOM_IFALIGN_PAIR;
2304 TCGv_i64 t8 = tcg_temp_new_i64();
2305 TCGv_i128 t16 = tcg_temp_new_i128();
2306 TCGv daddr = tcg_temp_new();
2308 tcg_gen_concat_tl_i64(t8, lo, hi);
2309 tcg_gen_concat_i64_i128(t16, t8, t8);
2310 tcg_gen_andi_tl(daddr, addr, -32);
2311 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2312 tcg_gen_addi_tl(daddr, daddr, 16);
2313 tcg_gen_qemu_st_i128(t16, daddr, da->mem_idx, mop);
2318 /* ??? In theory we've handled all of the ASIs that are valid
2319 for stda, and this should raise DAE_invalid_asi. */
2321 TCGv_i32 r_asi = tcg_constant_i32(da->asi);
2322 TCGv_i32 r_mop = tcg_constant_i32(da->memop);
2323 TCGv_i64 t64 = tcg_temp_new_i64();
2326 if ((da->memop & MO_BSWAP) == MO_TE) {
2327 tcg_gen_concat_tl_i64(t64, lo, hi);
2329 tcg_gen_concat_tl_i64(t64, hi, lo);
2333 gen_helper_st_asi(tcg_env, addr, t64, r_asi, r_mop);
2339 static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2341 #ifdef TARGET_SPARC64
2342 TCGv_i32 c32, zero, dst, s1, s2;
2343 TCGv_i64 c64 = tcg_temp_new_i64();
2345 /* We have two choices here: extend the 32 bit data and use movcond_i64,
2346 or fold the comparison down to 32 bits and use movcond_i32. Choose
2348 c32 = tcg_temp_new_i32();
2349 tcg_gen_setcondi_i64(cmp->cond, c64, cmp->c1, cmp->c2);
2350 tcg_gen_extrl_i64_i32(c32, c64);
2352 s1 = gen_load_fpr_F(dc, rs);
2353 s2 = gen_load_fpr_F(dc, rd);
2354 dst = tcg_temp_new_i32();
2355 zero = tcg_constant_i32(0);
2357 tcg_gen_movcond_i32(TCG_COND_NE, dst, c32, zero, s1, s2);
2359 gen_store_fpr_F(dc, rd, dst);
2361 qemu_build_not_reached();
2365 static void gen_fmovd(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2367 #ifdef TARGET_SPARC64
2368 TCGv_i64 dst = tcg_temp_new_i64();
2369 tcg_gen_movcond_i64(cmp->cond, dst, cmp->c1, tcg_constant_tl(cmp->c2),
2370 gen_load_fpr_D(dc, rs),
2371 gen_load_fpr_D(dc, rd));
2372 gen_store_fpr_D(dc, rd, dst);
2374 qemu_build_not_reached();
2378 static void gen_fmovq(DisasContext *dc, DisasCompare *cmp, int rd, int rs)
2380 #ifdef TARGET_SPARC64
2381 TCGv c2 = tcg_constant_tl(cmp->c2);
2382 TCGv_i64 h = tcg_temp_new_i64();
2383 TCGv_i64 l = tcg_temp_new_i64();
2385 tcg_gen_movcond_i64(cmp->cond, h, cmp->c1, c2,
2386 gen_load_fpr_D(dc, rs),
2387 gen_load_fpr_D(dc, rd));
2388 tcg_gen_movcond_i64(cmp->cond, l, cmp->c1, c2,
2389 gen_load_fpr_D(dc, rs + 2),
2390 gen_load_fpr_D(dc, rd + 2));
2391 gen_store_fpr_D(dc, rd, h);
2392 gen_store_fpr_D(dc, rd + 2, l);
2394 qemu_build_not_reached();
2398 #ifdef TARGET_SPARC64
2399 static void gen_load_trap_state_at_tl(TCGv_ptr r_tsptr)
2401 TCGv_i32 r_tl = tcg_temp_new_i32();
2403 /* load env->tl into r_tl */
2404 tcg_gen_ld_i32(r_tl, tcg_env, offsetof(CPUSPARCState, tl));
2406 /* tl = [0 ... MAXTL_MASK] where MAXTL_MASK must be power of 2 */
2407 tcg_gen_andi_i32(r_tl, r_tl, MAXTL_MASK);
2409 /* calculate offset to current trap state from env->ts, reuse r_tl */
2410 tcg_gen_muli_i32(r_tl, r_tl, sizeof (trap_state));
2411 tcg_gen_addi_ptr(r_tsptr, tcg_env, offsetof(CPUSPARCState, ts));
2413 /* tsptr = env->ts[env->tl & MAXTL_MASK] */
2415 TCGv_ptr r_tl_tmp = tcg_temp_new_ptr();
2416 tcg_gen_ext_i32_ptr(r_tl_tmp, r_tl);
2417 tcg_gen_add_ptr(r_tsptr, r_tsptr, r_tl_tmp);
2422 static int extract_dfpreg(DisasContext *dc, int x)
2425 #ifdef TARGET_SPARC64
2431 static int extract_qfpreg(DisasContext *dc, int x)
2434 #ifdef TARGET_SPARC64
2440 /* Include the auto-generated decoder. */
2441 #include "decode-insns.c.inc"
2443 #define TRANS(NAME, AVAIL, FUNC, ...) \
2444 static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \
2445 { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); }
2447 #define avail_ALL(C) true
2448 #ifdef TARGET_SPARC64
2449 # define avail_32(C) false
2450 # define avail_ASR17(C) false
2451 # define avail_CASA(C) true
2452 # define avail_DIV(C) true
2453 # define avail_MUL(C) true
2454 # define avail_POWERDOWN(C) false
2455 # define avail_64(C) true
2456 # define avail_FMAF(C) ((C)->def->features & CPU_FEATURE_FMAF)
2457 # define avail_GL(C) ((C)->def->features & CPU_FEATURE_GL)
2458 # define avail_HYPV(C) ((C)->def->features & CPU_FEATURE_HYPV)
2459 # define avail_IMA(C) ((C)->def->features & CPU_FEATURE_IMA)
2460 # define avail_VIS1(C) ((C)->def->features & CPU_FEATURE_VIS1)
2461 # define avail_VIS2(C) ((C)->def->features & CPU_FEATURE_VIS2)
2462 # define avail_VIS3(C) ((C)->def->features & CPU_FEATURE_VIS3)
2463 # define avail_VIS3B(C) avail_VIS3(C)
2464 # define avail_VIS4(C) ((C)->def->features & CPU_FEATURE_VIS4)
2466 # define avail_32(C) true
2467 # define avail_ASR17(C) ((C)->def->features & CPU_FEATURE_ASR17)
2468 # define avail_CASA(C) ((C)->def->features & CPU_FEATURE_CASA)
2469 # define avail_DIV(C) ((C)->def->features & CPU_FEATURE_DIV)
2470 # define avail_MUL(C) ((C)->def->features & CPU_FEATURE_MUL)
2471 # define avail_POWERDOWN(C) ((C)->def->features & CPU_FEATURE_POWERDOWN)
2472 # define avail_64(C) false
2473 # define avail_FMAF(C) false
2474 # define avail_GL(C) false
2475 # define avail_HYPV(C) false
2476 # define avail_IMA(C) false
2477 # define avail_VIS1(C) false
2478 # define avail_VIS2(C) false
2479 # define avail_VIS3(C) false
2480 # define avail_VIS3B(C) false
2481 # define avail_VIS4(C) false
2484 /* Default case for non jump instructions. */
2485 static bool advance_pc(DisasContext *dc)
2494 case DYNAMIC_PC_LOOKUP:
2496 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2497 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2501 /* we can do a static jump */
2502 l1 = gen_new_label();
2503 tcg_gen_brcondi_tl(dc->jump.cond, dc->jump.c1, dc->jump.c2, l1);
2505 /* jump not taken */
2506 gen_goto_tb(dc, 1, dc->jump_pc[1], dc->jump_pc[1] + 4);
2510 gen_goto_tb(dc, 0, dc->jump_pc[0], dc->jump_pc[0] + 4);
2512 dc->base.is_jmp = DISAS_NORETURN;
2516 g_assert_not_reached();
2520 dc->npc = dc->npc + 4;
2526 * Major opcodes 00 and 01 -- branches, call, and sethi
2529 static bool advance_jump_cond(DisasContext *dc, DisasCompare *cmp,
2530 bool annul, int disp)
2532 target_ulong dest = address_mask_i(dc, dc->pc + disp * 4);
2537 if (cmp->cond == TCG_COND_ALWAYS) {
2548 if (cmp->cond == TCG_COND_NEVER) {
2553 tcg_gen_addi_tl(cpu_pc, cpu_pc, 4);
2555 tcg_gen_addi_tl(cpu_npc, cpu_pc, 4);
2557 dc->pc = npc + (annul ? 4 : 0);
2558 dc->npc = dc->pc + 4;
2567 TCGLabel *l1 = gen_new_label();
2569 tcg_gen_brcondi_tl(tcg_invert_cond(cmp->cond), cmp->c1, cmp->c2, l1);
2570 gen_goto_tb(dc, 0, npc, dest);
2572 gen_goto_tb(dc, 1, npc + 4, npc + 8);
2574 dc->base.is_jmp = DISAS_NORETURN;
2579 case DYNAMIC_PC_LOOKUP:
2580 tcg_gen_mov_tl(cpu_pc, cpu_npc);
2581 tcg_gen_addi_tl(cpu_npc, cpu_npc, 4);
2582 tcg_gen_movcond_tl(cmp->cond, cpu_npc,
2583 cmp->c1, tcg_constant_tl(cmp->c2),
2584 tcg_constant_tl(dest), cpu_npc);
2588 g_assert_not_reached();
2594 dc->jump_pc[0] = dest;
2595 dc->jump_pc[1] = npc + 4;
2597 /* The condition for cpu_cond is always NE -- normalize. */
2598 if (cmp->cond == TCG_COND_NE) {
2599 tcg_gen_xori_tl(cpu_cond, cmp->c1, cmp->c2);
2601 tcg_gen_setcondi_tl(cmp->cond, cpu_cond, cmp->c1, cmp->c2);
2603 dc->cpu_cond_live = true;
2609 static bool raise_priv(DisasContext *dc)
2611 gen_exception(dc, TT_PRIV_INSN);
2615 static bool raise_unimpfpop(DisasContext *dc)
2617 gen_op_fpexception_im(dc, FSR_FTT_UNIMPFPOP);
2621 static bool gen_trap_float128(DisasContext *dc)
2623 if (dc->def->features & CPU_FEATURE_FLOAT128) {
2626 return raise_unimpfpop(dc);
2629 static bool do_bpcc(DisasContext *dc, arg_bcc *a)
2633 gen_compare(&cmp, a->cc, a->cond, dc);
2634 return advance_jump_cond(dc, &cmp, a->a, a->i);
2637 TRANS(Bicc, ALL, do_bpcc, a)
2638 TRANS(BPcc, 64, do_bpcc, a)
2640 static bool do_fbpfcc(DisasContext *dc, arg_bcc *a)
2644 if (gen_trap_ifnofpu(dc)) {
2647 gen_fcompare(&cmp, a->cc, a->cond);
2648 return advance_jump_cond(dc, &cmp, a->a, a->i);
2651 TRANS(FBPfcc, 64, do_fbpfcc, a)
2652 TRANS(FBfcc, ALL, do_fbpfcc, a)
2654 static bool trans_BPr(DisasContext *dc, arg_BPr *a)
2658 if (!avail_64(dc)) {
2661 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
2664 return advance_jump_cond(dc, &cmp, a->a, a->i);
2667 static bool trans_CALL(DisasContext *dc, arg_CALL *a)
2669 target_long target = address_mask_i(dc, dc->pc + a->i * 4);
2671 gen_store_gpr(dc, 15, tcg_constant_tl(dc->pc));
2677 static bool trans_NCP(DisasContext *dc, arg_NCP *a)
2680 * For sparc32, always generate the no-coprocessor exception.
2681 * For sparc64, always generate illegal instruction.
2683 #ifdef TARGET_SPARC64
2686 gen_exception(dc, TT_NCP_INSN);
2691 static bool trans_SETHI(DisasContext *dc, arg_SETHI *a)
2693 /* Special-case %g0 because that's the canonical nop. */
2695 gen_store_gpr(dc, a->rd, tcg_constant_tl((uint32_t)a->i << 10));
2697 return advance_pc(dc);
2701 * Major Opcode 10 -- integer, floating-point, vis, and system insns.
2704 static bool do_tcc(DisasContext *dc, int cond, int cc,
2705 int rs1, bool imm, int rs2_or_imm)
2707 int mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
2708 ? UA2005_HTRAP_MASK : V8_TRAP_MASK);
2715 return advance_pc(dc);
2719 * Immediate traps are the most common case. Since this value is
2720 * live across the branch, it really pays to evaluate the constant.
2722 if (rs1 == 0 && (imm || rs2_or_imm == 0)) {
2723 trap = tcg_constant_i32((rs2_or_imm & mask) + TT_TRAP);
2725 trap = tcg_temp_new_i32();
2726 tcg_gen_trunc_tl_i32(trap, gen_load_gpr(dc, rs1));
2728 tcg_gen_addi_i32(trap, trap, rs2_or_imm);
2730 TCGv_i32 t2 = tcg_temp_new_i32();
2731 tcg_gen_trunc_tl_i32(t2, gen_load_gpr(dc, rs2_or_imm));
2732 tcg_gen_add_i32(trap, trap, t2);
2734 tcg_gen_andi_i32(trap, trap, mask);
2735 tcg_gen_addi_i32(trap, trap, TT_TRAP);
2743 gen_helper_raise_exception(tcg_env, trap);
2744 dc->base.is_jmp = DISAS_NORETURN;
2748 /* Conditional trap. */
2750 lab = delay_exceptionv(dc, trap);
2751 gen_compare(&cmp, cc, cond, dc);
2752 tcg_gen_brcondi_tl(cmp.cond, cmp.c1, cmp.c2, lab);
2754 return advance_pc(dc);
2757 static bool trans_Tcc_r(DisasContext *dc, arg_Tcc_r *a)
2759 if (avail_32(dc) && a->cc) {
2762 return do_tcc(dc, a->cond, a->cc, a->rs1, false, a->rs2);
2765 static bool trans_Tcc_i_v7(DisasContext *dc, arg_Tcc_i_v7 *a)
2770 return do_tcc(dc, a->cond, 0, a->rs1, true, a->i);
2773 static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a)
2778 return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i);
2781 static bool trans_STBAR(DisasContext *dc, arg_STBAR *a)
2783 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
2784 return advance_pc(dc);
2787 static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a)
2793 /* Note TCG_MO_* was modeled on sparc64, so mmask matches. */
2794 tcg_gen_mb(a->mmask | TCG_BAR_SC);
2797 /* For #Sync, etc, end the TB to recognize interrupts. */
2798 dc->base.is_jmp = DISAS_EXIT;
2800 return advance_pc(dc);
2803 static bool do_rd_special(DisasContext *dc, bool priv, int rd,
2804 TCGv (*func)(DisasContext *, TCGv))
2807 return raise_priv(dc);
2809 gen_store_gpr(dc, rd, func(dc, gen_dest_gpr(dc, rd)));
2810 return advance_pc(dc);
2813 static TCGv do_rdy(DisasContext *dc, TCGv dst)
2818 static bool trans_RDY(DisasContext *dc, arg_RDY *a)
2821 * TODO: Need a feature bit for sparcv8. In the meantime, treat all
2822 * 32-bit cpus like sparcv7, which ignores the rs1 field.
2823 * This matches after all other ASR, so Leon3 Asr17 is handled first.
2825 if (avail_64(dc) && a->rs1 != 0) {
2828 return do_rd_special(dc, true, a->rd, do_rdy);
2831 static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst)
2833 gen_helper_rdasr17(dst, tcg_env);
2837 TRANS(RDASR17, ASR17, do_rd_special, true, a->rd, do_rd_leon3_config)
2839 static TCGv do_rdccr(DisasContext *dc, TCGv dst)
2841 gen_helper_rdccr(dst, tcg_env);
2845 TRANS(RDCCR, 64, do_rd_special, true, a->rd, do_rdccr)
2847 static TCGv do_rdasi(DisasContext *dc, TCGv dst)
2849 #ifdef TARGET_SPARC64
2850 return tcg_constant_tl(dc->asi);
2852 qemu_build_not_reached();
2856 TRANS(RDASI, 64, do_rd_special, true, a->rd, do_rdasi)
2858 static TCGv do_rdtick(DisasContext *dc, TCGv dst)
2860 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2862 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
2863 if (translator_io_start(&dc->base)) {
2864 dc->base.is_jmp = DISAS_EXIT;
2866 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2867 tcg_constant_i32(dc->mem_idx));
2871 /* TODO: non-priv access only allowed when enabled. */
2872 TRANS(RDTICK, 64, do_rd_special, true, a->rd, do_rdtick)
2874 static TCGv do_rdpc(DisasContext *dc, TCGv dst)
2876 return tcg_constant_tl(address_mask_i(dc, dc->pc));
2879 TRANS(RDPC, 64, do_rd_special, true, a->rd, do_rdpc)
2881 static TCGv do_rdfprs(DisasContext *dc, TCGv dst)
2883 tcg_gen_ext_i32_tl(dst, cpu_fprs);
2887 TRANS(RDFPRS, 64, do_rd_special, true, a->rd, do_rdfprs)
2889 static TCGv do_rdgsr(DisasContext *dc, TCGv dst)
2891 gen_trap_ifnofpu(dc);
2895 TRANS(RDGSR, 64, do_rd_special, true, a->rd, do_rdgsr)
2897 static TCGv do_rdsoftint(DisasContext *dc, TCGv dst)
2899 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(softint));
2903 TRANS(RDSOFTINT, 64, do_rd_special, supervisor(dc), a->rd, do_rdsoftint)
2905 static TCGv do_rdtick_cmpr(DisasContext *dc, TCGv dst)
2907 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(tick_cmpr));
2911 /* TODO: non-priv access only allowed when enabled. */
2912 TRANS(RDTICK_CMPR, 64, do_rd_special, true, a->rd, do_rdtick_cmpr)
2914 static TCGv do_rdstick(DisasContext *dc, TCGv dst)
2916 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
2918 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
2919 if (translator_io_start(&dc->base)) {
2920 dc->base.is_jmp = DISAS_EXIT;
2922 gen_helper_tick_get_count(dst, tcg_env, r_tickptr,
2923 tcg_constant_i32(dc->mem_idx));
2927 /* TODO: non-priv access only allowed when enabled. */
2928 TRANS(RDSTICK, 64, do_rd_special, true, a->rd, do_rdstick)
2930 static TCGv do_rdstick_cmpr(DisasContext *dc, TCGv dst)
2932 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(stick_cmpr));
2936 /* TODO: supervisor access only allowed when enabled by hypervisor. */
2937 TRANS(RDSTICK_CMPR, 64, do_rd_special, supervisor(dc), a->rd, do_rdstick_cmpr)
2940 * UltraSPARC-T1 Strand status.
2941 * HYPV check maybe not enough, UA2005 & UA2007 describe
2942 * this ASR as impl. dep
2944 static TCGv do_rdstrand_status(DisasContext *dc, TCGv dst)
2946 return tcg_constant_tl(1);
2949 TRANS(RDSTRAND_STATUS, HYPV, do_rd_special, true, a->rd, do_rdstrand_status)
2951 static TCGv do_rdpsr(DisasContext *dc, TCGv dst)
2953 gen_helper_rdpsr(dst, tcg_env);
2957 TRANS(RDPSR, 32, do_rd_special, supervisor(dc), a->rd, do_rdpsr)
2959 static TCGv do_rdhpstate(DisasContext *dc, TCGv dst)
2961 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hpstate));
2965 TRANS(RDHPR_hpstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhpstate)
2967 static TCGv do_rdhtstate(DisasContext *dc, TCGv dst)
2969 TCGv_i32 tl = tcg_temp_new_i32();
2970 TCGv_ptr tp = tcg_temp_new_ptr();
2972 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
2973 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
2974 tcg_gen_shli_i32(tl, tl, 3);
2975 tcg_gen_ext_i32_ptr(tp, tl);
2976 tcg_gen_add_ptr(tp, tp, tcg_env);
2978 tcg_gen_ld_tl(dst, tp, env64_field_offsetof(htstate));
2982 TRANS(RDHPR_htstate, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtstate)
2984 static TCGv do_rdhintp(DisasContext *dc, TCGv dst)
2986 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hintp));
2990 TRANS(RDHPR_hintp, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhintp)
2992 static TCGv do_rdhtba(DisasContext *dc, TCGv dst)
2994 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(htba));
2998 TRANS(RDHPR_htba, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhtba)
3000 static TCGv do_rdhver(DisasContext *dc, TCGv dst)
3002 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hver));
3006 TRANS(RDHPR_hver, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdhver)
3008 static TCGv do_rdhstick_cmpr(DisasContext *dc, TCGv dst)
3010 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(hstick_cmpr));
3014 TRANS(RDHPR_hstick_cmpr, HYPV, do_rd_special, hypervisor(dc), a->rd,
3017 static TCGv do_rdwim(DisasContext *dc, TCGv dst)
3019 tcg_gen_ld_tl(dst, tcg_env, env32_field_offsetof(wim));
3023 TRANS(RDWIM, 32, do_rd_special, supervisor(dc), a->rd, do_rdwim)
3025 static TCGv do_rdtpc(DisasContext *dc, TCGv dst)
3027 #ifdef TARGET_SPARC64
3028 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3030 gen_load_trap_state_at_tl(r_tsptr);
3031 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tpc));
3034 qemu_build_not_reached();
3038 TRANS(RDPR_tpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtpc)
3040 static TCGv do_rdtnpc(DisasContext *dc, TCGv dst)
3042 #ifdef TARGET_SPARC64
3043 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3045 gen_load_trap_state_at_tl(r_tsptr);
3046 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tnpc));
3049 qemu_build_not_reached();
3053 TRANS(RDPR_tnpc, 64, do_rd_special, supervisor(dc), a->rd, do_rdtnpc)
3055 static TCGv do_rdtstate(DisasContext *dc, TCGv dst)
3057 #ifdef TARGET_SPARC64
3058 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3060 gen_load_trap_state_at_tl(r_tsptr);
3061 tcg_gen_ld_tl(dst, r_tsptr, offsetof(trap_state, tstate));
3064 qemu_build_not_reached();
3068 TRANS(RDPR_tstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdtstate)
3070 static TCGv do_rdtt(DisasContext *dc, TCGv dst)
3072 #ifdef TARGET_SPARC64
3073 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3075 gen_load_trap_state_at_tl(r_tsptr);
3076 tcg_gen_ld32s_tl(dst, r_tsptr, offsetof(trap_state, tt));
3079 qemu_build_not_reached();
3083 TRANS(RDPR_tt, 64, do_rd_special, supervisor(dc), a->rd, do_rdtt)
3084 TRANS(RDPR_tick, 64, do_rd_special, supervisor(dc), a->rd, do_rdtick)
3086 static TCGv do_rdtba(DisasContext *dc, TCGv dst)
3091 TRANS(RDTBR, 32, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3092 TRANS(RDPR_tba, 64, do_rd_special, supervisor(dc), a->rd, do_rdtba)
3094 static TCGv do_rdpstate(DisasContext *dc, TCGv dst)
3096 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(pstate));
3100 TRANS(RDPR_pstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdpstate)
3102 static TCGv do_rdtl(DisasContext *dc, TCGv dst)
3104 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(tl));
3108 TRANS(RDPR_tl, 64, do_rd_special, supervisor(dc), a->rd, do_rdtl)
3110 static TCGv do_rdpil(DisasContext *dc, TCGv dst)
3112 tcg_gen_ld32s_tl(dst, tcg_env, env_field_offsetof(psrpil));
3116 TRANS(RDPR_pil, 64, do_rd_special, supervisor(dc), a->rd, do_rdpil)
3118 static TCGv do_rdcwp(DisasContext *dc, TCGv dst)
3120 gen_helper_rdcwp(dst, tcg_env);
3124 TRANS(RDPR_cwp, 64, do_rd_special, supervisor(dc), a->rd, do_rdcwp)
3126 static TCGv do_rdcansave(DisasContext *dc, TCGv dst)
3128 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cansave));
3132 TRANS(RDPR_cansave, 64, do_rd_special, supervisor(dc), a->rd, do_rdcansave)
3134 static TCGv do_rdcanrestore(DisasContext *dc, TCGv dst)
3136 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(canrestore));
3140 TRANS(RDPR_canrestore, 64, do_rd_special, supervisor(dc), a->rd,
3143 static TCGv do_rdcleanwin(DisasContext *dc, TCGv dst)
3145 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(cleanwin));
3149 TRANS(RDPR_cleanwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdcleanwin)
3151 static TCGv do_rdotherwin(DisasContext *dc, TCGv dst)
3153 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(otherwin));
3157 TRANS(RDPR_otherwin, 64, do_rd_special, supervisor(dc), a->rd, do_rdotherwin)
3159 static TCGv do_rdwstate(DisasContext *dc, TCGv dst)
3161 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(wstate));
3165 TRANS(RDPR_wstate, 64, do_rd_special, supervisor(dc), a->rd, do_rdwstate)
3167 static TCGv do_rdgl(DisasContext *dc, TCGv dst)
3169 tcg_gen_ld32s_tl(dst, tcg_env, env64_field_offsetof(gl));
3173 TRANS(RDPR_gl, GL, do_rd_special, supervisor(dc), a->rd, do_rdgl)
3175 /* UA2005 strand status */
3176 static TCGv do_rdssr(DisasContext *dc, TCGv dst)
3178 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(ssr));
3182 TRANS(RDPR_strand_status, HYPV, do_rd_special, hypervisor(dc), a->rd, do_rdssr)
3184 static TCGv do_rdver(DisasContext *dc, TCGv dst)
3186 tcg_gen_ld_tl(dst, tcg_env, env64_field_offsetof(version));
3190 TRANS(RDPR_ver, 64, do_rd_special, supervisor(dc), a->rd, do_rdver)
3192 static bool trans_FLUSHW(DisasContext *dc, arg_FLUSHW *a)
3195 gen_helper_flushw(tcg_env);
3196 return advance_pc(dc);
3201 static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv,
3202 void (*func)(DisasContext *, TCGv))
3206 /* For simplicity, we under-decoded the rs2 form. */
3207 if (!a->imm && (a->rs2_or_imm & ~0x1f)) {
3211 return raise_priv(dc);
3214 if (a->rs1 == 0 && (a->imm || a->rs2_or_imm == 0)) {
3215 src = tcg_constant_tl(a->rs2_or_imm);
3217 TCGv src1 = gen_load_gpr(dc, a->rs1);
3218 if (a->rs2_or_imm == 0) {
3221 src = tcg_temp_new();
3223 tcg_gen_xori_tl(src, src1, a->rs2_or_imm);
3225 tcg_gen_xor_tl(src, src1, gen_load_gpr(dc, a->rs2_or_imm));
3230 return advance_pc(dc);
3233 static void do_wry(DisasContext *dc, TCGv src)
3235 tcg_gen_ext32u_tl(cpu_y, src);
3238 TRANS(WRY, ALL, do_wr_special, a, true, do_wry)
3240 static void do_wrccr(DisasContext *dc, TCGv src)
3242 gen_helper_wrccr(tcg_env, src);
3245 TRANS(WRCCR, 64, do_wr_special, a, true, do_wrccr)
3247 static void do_wrasi(DisasContext *dc, TCGv src)
3249 TCGv tmp = tcg_temp_new();
3251 tcg_gen_ext8u_tl(tmp, src);
3252 tcg_gen_st32_tl(tmp, tcg_env, env64_field_offsetof(asi));
3253 /* End TB to notice changed ASI. */
3254 dc->base.is_jmp = DISAS_EXIT;
3257 TRANS(WRASI, 64, do_wr_special, a, true, do_wrasi)
3259 static void do_wrfprs(DisasContext *dc, TCGv src)
3261 #ifdef TARGET_SPARC64
3262 tcg_gen_trunc_tl_i32(cpu_fprs, src);
3264 dc->base.is_jmp = DISAS_EXIT;
3266 qemu_build_not_reached();
3270 TRANS(WRFPRS, 64, do_wr_special, a, true, do_wrfprs)
3272 static void do_wrgsr(DisasContext *dc, TCGv src)
3274 gen_trap_ifnofpu(dc);
3275 tcg_gen_mov_tl(cpu_gsr, src);
3278 TRANS(WRGSR, 64, do_wr_special, a, true, do_wrgsr)
3280 static void do_wrsoftint_set(DisasContext *dc, TCGv src)
3282 gen_helper_set_softint(tcg_env, src);
3285 TRANS(WRSOFTINT_SET, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_set)
3287 static void do_wrsoftint_clr(DisasContext *dc, TCGv src)
3289 gen_helper_clear_softint(tcg_env, src);
3292 TRANS(WRSOFTINT_CLR, 64, do_wr_special, a, supervisor(dc), do_wrsoftint_clr)
3294 static void do_wrsoftint(DisasContext *dc, TCGv src)
3296 gen_helper_write_softint(tcg_env, src);
3299 TRANS(WRSOFTINT, 64, do_wr_special, a, supervisor(dc), do_wrsoftint)
3301 static void do_wrtick_cmpr(DisasContext *dc, TCGv src)
3303 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3305 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(tick_cmpr));
3306 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3307 translator_io_start(&dc->base);
3308 gen_helper_tick_set_limit(r_tickptr, src);
3309 /* End TB to handle timer interrupt */
3310 dc->base.is_jmp = DISAS_EXIT;
3313 TRANS(WRTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrtick_cmpr)
3315 static void do_wrstick(DisasContext *dc, TCGv src)
3317 #ifdef TARGET_SPARC64
3318 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3320 tcg_gen_ld_ptr(r_tickptr, tcg_env, offsetof(CPUSPARCState, stick));
3321 translator_io_start(&dc->base);
3322 gen_helper_tick_set_count(r_tickptr, src);
3323 /* End TB to handle timer interrupt */
3324 dc->base.is_jmp = DISAS_EXIT;
3326 qemu_build_not_reached();
3330 TRANS(WRSTICK, 64, do_wr_special, a, supervisor(dc), do_wrstick)
3332 static void do_wrstick_cmpr(DisasContext *dc, TCGv src)
3334 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3336 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(stick_cmpr));
3337 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(stick));
3338 translator_io_start(&dc->base);
3339 gen_helper_tick_set_limit(r_tickptr, src);
3340 /* End TB to handle timer interrupt */
3341 dc->base.is_jmp = DISAS_EXIT;
3344 TRANS(WRSTICK_CMPR, 64, do_wr_special, a, supervisor(dc), do_wrstick_cmpr)
3346 static void do_wrpowerdown(DisasContext *dc, TCGv src)
3350 gen_helper_power_down(tcg_env);
3353 TRANS(WRPOWERDOWN, POWERDOWN, do_wr_special, a, supervisor(dc), do_wrpowerdown)
3355 static void do_wrmwait(DisasContext *dc, TCGv src)
3358 * TODO: This is a stub version of mwait, which merely recognizes
3359 * interrupts immediately and does not wait.
3361 dc->base.is_jmp = DISAS_EXIT;
3364 TRANS(WRMWAIT, VIS4, do_wr_special, a, true, do_wrmwait)
3366 static void do_wrpsr(DisasContext *dc, TCGv src)
3368 gen_helper_wrpsr(tcg_env, src);
3369 dc->base.is_jmp = DISAS_EXIT;
3372 TRANS(WRPSR, 32, do_wr_special, a, supervisor(dc), do_wrpsr)
3374 static void do_wrwim(DisasContext *dc, TCGv src)
3376 target_ulong mask = MAKE_64BIT_MASK(0, dc->def->nwindows);
3377 TCGv tmp = tcg_temp_new();
3379 tcg_gen_andi_tl(tmp, src, mask);
3380 tcg_gen_st_tl(tmp, tcg_env, env32_field_offsetof(wim));
3383 TRANS(WRWIM, 32, do_wr_special, a, supervisor(dc), do_wrwim)
3385 static void do_wrtpc(DisasContext *dc, TCGv src)
3387 #ifdef TARGET_SPARC64
3388 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3390 gen_load_trap_state_at_tl(r_tsptr);
3391 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tpc));
3393 qemu_build_not_reached();
3397 TRANS(WRPR_tpc, 64, do_wr_special, a, supervisor(dc), do_wrtpc)
3399 static void do_wrtnpc(DisasContext *dc, TCGv src)
3401 #ifdef TARGET_SPARC64
3402 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3404 gen_load_trap_state_at_tl(r_tsptr);
3405 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tnpc));
3407 qemu_build_not_reached();
3411 TRANS(WRPR_tnpc, 64, do_wr_special, a, supervisor(dc), do_wrtnpc)
3413 static void do_wrtstate(DisasContext *dc, TCGv src)
3415 #ifdef TARGET_SPARC64
3416 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3418 gen_load_trap_state_at_tl(r_tsptr);
3419 tcg_gen_st_tl(src, r_tsptr, offsetof(trap_state, tstate));
3421 qemu_build_not_reached();
3425 TRANS(WRPR_tstate, 64, do_wr_special, a, supervisor(dc), do_wrtstate)
3427 static void do_wrtt(DisasContext *dc, TCGv src)
3429 #ifdef TARGET_SPARC64
3430 TCGv_ptr r_tsptr = tcg_temp_new_ptr();
3432 gen_load_trap_state_at_tl(r_tsptr);
3433 tcg_gen_st32_tl(src, r_tsptr, offsetof(trap_state, tt));
3435 qemu_build_not_reached();
3439 TRANS(WRPR_tt, 64, do_wr_special, a, supervisor(dc), do_wrtt)
3441 static void do_wrtick(DisasContext *dc, TCGv src)
3443 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3445 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(tick));
3446 translator_io_start(&dc->base);
3447 gen_helper_tick_set_count(r_tickptr, src);
3448 /* End TB to handle timer interrupt */
3449 dc->base.is_jmp = DISAS_EXIT;
3452 TRANS(WRPR_tick, 64, do_wr_special, a, supervisor(dc), do_wrtick)
3454 static void do_wrtba(DisasContext *dc, TCGv src)
3456 tcg_gen_mov_tl(cpu_tbr, src);
3459 TRANS(WRPR_tba, 64, do_wr_special, a, supervisor(dc), do_wrtba)
3461 static void do_wrpstate(DisasContext *dc, TCGv src)
3464 if (translator_io_start(&dc->base)) {
3465 dc->base.is_jmp = DISAS_EXIT;
3467 gen_helper_wrpstate(tcg_env, src);
3468 dc->npc = DYNAMIC_PC;
3471 TRANS(WRPR_pstate, 64, do_wr_special, a, supervisor(dc), do_wrpstate)
3473 static void do_wrtl(DisasContext *dc, TCGv src)
3476 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(tl));
3477 dc->npc = DYNAMIC_PC;
3480 TRANS(WRPR_tl, 64, do_wr_special, a, supervisor(dc), do_wrtl)
3482 static void do_wrpil(DisasContext *dc, TCGv src)
3484 if (translator_io_start(&dc->base)) {
3485 dc->base.is_jmp = DISAS_EXIT;
3487 gen_helper_wrpil(tcg_env, src);
3490 TRANS(WRPR_pil, 64, do_wr_special, a, supervisor(dc), do_wrpil)
3492 static void do_wrcwp(DisasContext *dc, TCGv src)
3494 gen_helper_wrcwp(tcg_env, src);
3497 TRANS(WRPR_cwp, 64, do_wr_special, a, supervisor(dc), do_wrcwp)
3499 static void do_wrcansave(DisasContext *dc, TCGv src)
3501 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cansave));
3504 TRANS(WRPR_cansave, 64, do_wr_special, a, supervisor(dc), do_wrcansave)
3506 static void do_wrcanrestore(DisasContext *dc, TCGv src)
3508 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(canrestore));
3511 TRANS(WRPR_canrestore, 64, do_wr_special, a, supervisor(dc), do_wrcanrestore)
3513 static void do_wrcleanwin(DisasContext *dc, TCGv src)
3515 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(cleanwin));
3518 TRANS(WRPR_cleanwin, 64, do_wr_special, a, supervisor(dc), do_wrcleanwin)
3520 static void do_wrotherwin(DisasContext *dc, TCGv src)
3522 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(otherwin));
3525 TRANS(WRPR_otherwin, 64, do_wr_special, a, supervisor(dc), do_wrotherwin)
3527 static void do_wrwstate(DisasContext *dc, TCGv src)
3529 tcg_gen_st32_tl(src, tcg_env, env64_field_offsetof(wstate));
3532 TRANS(WRPR_wstate, 64, do_wr_special, a, supervisor(dc), do_wrwstate)
3534 static void do_wrgl(DisasContext *dc, TCGv src)
3536 gen_helper_wrgl(tcg_env, src);
3539 TRANS(WRPR_gl, GL, do_wr_special, a, supervisor(dc), do_wrgl)
3541 /* UA2005 strand status */
3542 static void do_wrssr(DisasContext *dc, TCGv src)
3544 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(ssr));
3547 TRANS(WRPR_strand_status, HYPV, do_wr_special, a, hypervisor(dc), do_wrssr)
3549 TRANS(WRTBR, 32, do_wr_special, a, supervisor(dc), do_wrtba)
3551 static void do_wrhpstate(DisasContext *dc, TCGv src)
3553 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hpstate));
3554 dc->base.is_jmp = DISAS_EXIT;
3557 TRANS(WRHPR_hpstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhpstate)
3559 static void do_wrhtstate(DisasContext *dc, TCGv src)
3561 TCGv_i32 tl = tcg_temp_new_i32();
3562 TCGv_ptr tp = tcg_temp_new_ptr();
3564 tcg_gen_ld_i32(tl, tcg_env, env64_field_offsetof(tl));
3565 tcg_gen_andi_i32(tl, tl, MAXTL_MASK);
3566 tcg_gen_shli_i32(tl, tl, 3);
3567 tcg_gen_ext_i32_ptr(tp, tl);
3568 tcg_gen_add_ptr(tp, tp, tcg_env);
3570 tcg_gen_st_tl(src, tp, env64_field_offsetof(htstate));
3573 TRANS(WRHPR_htstate, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtstate)
3575 static void do_wrhintp(DisasContext *dc, TCGv src)
3577 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hintp));
3580 TRANS(WRHPR_hintp, HYPV, do_wr_special, a, hypervisor(dc), do_wrhintp)
3582 static void do_wrhtba(DisasContext *dc, TCGv src)
3584 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(htba));
3587 TRANS(WRHPR_htba, HYPV, do_wr_special, a, hypervisor(dc), do_wrhtba)
3589 static void do_wrhstick_cmpr(DisasContext *dc, TCGv src)
3591 TCGv_ptr r_tickptr = tcg_temp_new_ptr();
3593 tcg_gen_st_tl(src, tcg_env, env64_field_offsetof(hstick_cmpr));
3594 tcg_gen_ld_ptr(r_tickptr, tcg_env, env64_field_offsetof(hstick));
3595 translator_io_start(&dc->base);
3596 gen_helper_tick_set_limit(r_tickptr, src);
3597 /* End TB to handle timer interrupt */
3598 dc->base.is_jmp = DISAS_EXIT;
3601 TRANS(WRHPR_hstick_cmpr, HYPV, do_wr_special, a, hypervisor(dc),
3604 static bool do_saved_restored(DisasContext *dc, bool saved)
3606 if (!supervisor(dc)) {
3607 return raise_priv(dc);
3610 gen_helper_saved(tcg_env);
3612 gen_helper_restored(tcg_env);
3614 return advance_pc(dc);
3617 TRANS(SAVED, 64, do_saved_restored, true)
3618 TRANS(RESTORED, 64, do_saved_restored, false)
3620 static bool trans_NOP(DisasContext *dc, arg_NOP *a)
3622 return advance_pc(dc);
3626 * TODO: Need a feature bit for sparcv8.
3627 * In the meantime, treat all 32-bit cpus like sparcv7.
3629 TRANS(NOP_v7, 32, trans_NOP, a)
3630 TRANS(NOP_v9, 64, trans_NOP, a)
3632 static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a,
3633 void (*func)(TCGv, TCGv, TCGv),
3634 void (*funci)(TCGv, TCGv, target_long),
3639 /* For simplicity, we under-decoded the rs2 form. */
3640 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3647 dst = gen_dest_gpr(dc, a->rd);
3649 src1 = gen_load_gpr(dc, a->rs1);
3651 if (a->imm || a->rs2_or_imm == 0) {
3653 funci(dst, src1, a->rs2_or_imm);
3655 func(dst, src1, tcg_constant_tl(a->rs2_or_imm));
3658 func(dst, src1, cpu_regs[a->rs2_or_imm]);
3662 if (TARGET_LONG_BITS == 64) {
3663 tcg_gen_mov_tl(cpu_icc_Z, cpu_cc_N);
3664 tcg_gen_movi_tl(cpu_icc_C, 0);
3666 tcg_gen_mov_tl(cpu_cc_Z, cpu_cc_N);
3667 tcg_gen_movi_tl(cpu_cc_C, 0);
3668 tcg_gen_movi_tl(cpu_cc_V, 0);
3671 gen_store_gpr(dc, a->rd, dst);
3672 return advance_pc(dc);
3675 static bool do_arith(DisasContext *dc, arg_r_r_ri_cc *a,
3676 void (*func)(TCGv, TCGv, TCGv),
3677 void (*funci)(TCGv, TCGv, target_long),
3678 void (*func_cc)(TCGv, TCGv, TCGv))
3681 return do_arith_int(dc, a, func_cc, NULL, false);
3683 return do_arith_int(dc, a, func, funci, false);
3686 static bool do_logic(DisasContext *dc, arg_r_r_ri_cc *a,
3687 void (*func)(TCGv, TCGv, TCGv),
3688 void (*funci)(TCGv, TCGv, target_long))
3690 return do_arith_int(dc, a, func, funci, a->cc);
3693 TRANS(ADD, ALL, do_arith, a, tcg_gen_add_tl, tcg_gen_addi_tl, gen_op_addcc)
3694 TRANS(SUB, ALL, do_arith, a, tcg_gen_sub_tl, tcg_gen_subi_tl, gen_op_subcc)
3695 TRANS(ADDC, ALL, do_arith, a, gen_op_addc, NULL, gen_op_addccc)
3696 TRANS(SUBC, ALL, do_arith, a, gen_op_subc, NULL, gen_op_subccc)
3698 TRANS(TADDcc, ALL, do_arith, a, NULL, NULL, gen_op_taddcc)
3699 TRANS(TSUBcc, ALL, do_arith, a, NULL, NULL, gen_op_tsubcc)
3700 TRANS(TADDccTV, ALL, do_arith, a, NULL, NULL, gen_op_taddcctv)
3701 TRANS(TSUBccTV, ALL, do_arith, a, NULL, NULL, gen_op_tsubcctv)
3703 TRANS(AND, ALL, do_logic, a, tcg_gen_and_tl, tcg_gen_andi_tl)
3704 TRANS(XOR, ALL, do_logic, a, tcg_gen_xor_tl, tcg_gen_xori_tl)
3705 TRANS(ANDN, ALL, do_logic, a, tcg_gen_andc_tl, NULL)
3706 TRANS(ORN, ALL, do_logic, a, tcg_gen_orc_tl, NULL)
3707 TRANS(XORN, ALL, do_logic, a, tcg_gen_eqv_tl, NULL)
3709 TRANS(MULX, 64, do_arith, a, tcg_gen_mul_tl, tcg_gen_muli_tl, NULL)
3710 TRANS(UMUL, MUL, do_logic, a, gen_op_umul, NULL)
3711 TRANS(SMUL, MUL, do_logic, a, gen_op_smul, NULL)
3712 TRANS(MULScc, ALL, do_arith, a, NULL, NULL, gen_op_mulscc)
3714 TRANS(UDIVcc, DIV, do_arith, a, NULL, NULL, gen_op_udivcc)
3715 TRANS(SDIV, DIV, do_arith, a, gen_op_sdiv, NULL, gen_op_sdivcc)
3717 /* TODO: Should have feature bit -- comes in with UltraSparc T2. */
3718 TRANS(POPC, 64, do_arith, a, gen_op_popc, NULL, NULL)
3720 static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a)
3722 /* OR with %g0 is the canonical alias for MOV. */
3723 if (!a->cc && a->rs1 == 0) {
3724 if (a->imm || a->rs2_or_imm == 0) {
3725 gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm));
3726 } else if (a->rs2_or_imm & ~0x1f) {
3727 /* For simplicity, we under-decoded the rs2 form. */
3730 gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]);
3732 return advance_pc(dc);
3734 return do_logic(dc, a, tcg_gen_or_tl, tcg_gen_ori_tl);
3737 static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a)
3742 if (!avail_DIV(dc)) {
3745 /* For simplicity, we under-decoded the rs2 form. */
3746 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3750 if (unlikely(a->rs2_or_imm == 0)) {
3751 gen_exception(dc, TT_DIV_ZERO);
3756 t2 = tcg_constant_i64((uint32_t)a->rs2_or_imm);
3764 n2 = tcg_temp_new_i32();
3765 tcg_gen_trunc_tl_i32(n2, cpu_regs[a->rs2_or_imm]);
3767 lab = delay_exception(dc, TT_DIV_ZERO);
3768 tcg_gen_brcondi_i32(TCG_COND_EQ, n2, 0, lab);
3770 t2 = tcg_temp_new_i64();
3771 #ifdef TARGET_SPARC64
3772 tcg_gen_ext32u_i64(t2, cpu_regs[a->rs2_or_imm]);
3774 tcg_gen_extu_i32_i64(t2, cpu_regs[a->rs2_or_imm]);
3778 t1 = tcg_temp_new_i64();
3779 tcg_gen_concat_tl_i64(t1, gen_load_gpr(dc, a->rs1), cpu_y);
3781 tcg_gen_divu_i64(t1, t1, t2);
3782 tcg_gen_umin_i64(t1, t1, tcg_constant_i64(UINT32_MAX));
3784 dst = gen_dest_gpr(dc, a->rd);
3785 tcg_gen_trunc_i64_tl(dst, t1);
3786 gen_store_gpr(dc, a->rd, dst);
3787 return advance_pc(dc);
3790 static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a)
3792 TCGv dst, src1, src2;
3794 if (!avail_64(dc)) {
3797 /* For simplicity, we under-decoded the rs2 form. */
3798 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3802 if (unlikely(a->rs2_or_imm == 0)) {
3803 gen_exception(dc, TT_DIV_ZERO);
3808 src2 = tcg_constant_tl(a->rs2_or_imm);
3815 lab = delay_exception(dc, TT_DIV_ZERO);
3816 src2 = cpu_regs[a->rs2_or_imm];
3817 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3820 dst = gen_dest_gpr(dc, a->rd);
3821 src1 = gen_load_gpr(dc, a->rs1);
3823 tcg_gen_divu_tl(dst, src1, src2);
3824 gen_store_gpr(dc, a->rd, dst);
3825 return advance_pc(dc);
3828 static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a)
3830 TCGv dst, src1, src2;
3832 if (!avail_64(dc)) {
3835 /* For simplicity, we under-decoded the rs2 form. */
3836 if (!a->imm && a->rs2_or_imm & ~0x1f) {
3840 if (unlikely(a->rs2_or_imm == 0)) {
3841 gen_exception(dc, TT_DIV_ZERO);
3845 dst = gen_dest_gpr(dc, a->rd);
3846 src1 = gen_load_gpr(dc, a->rs1);
3849 if (unlikely(a->rs2_or_imm == -1)) {
3850 tcg_gen_neg_tl(dst, src1);
3851 gen_store_gpr(dc, a->rd, dst);
3852 return advance_pc(dc);
3854 src2 = tcg_constant_tl(a->rs2_or_imm);
3862 lab = delay_exception(dc, TT_DIV_ZERO);
3863 src2 = cpu_regs[a->rs2_or_imm];
3864 tcg_gen_brcondi_tl(TCG_COND_EQ, src2, 0, lab);
3867 * Need to avoid INT64_MIN / -1, which will trap on x86 host.
3868 * Set SRC2 to 1 as a new divisor, to produce the correct result.
3870 t1 = tcg_temp_new();
3871 t2 = tcg_temp_new();
3872 tcg_gen_setcondi_tl(TCG_COND_EQ, t1, src1, (target_long)INT64_MIN);
3873 tcg_gen_setcondi_tl(TCG_COND_EQ, t2, src2, -1);
3874 tcg_gen_and_tl(t1, t1, t2);
3875 tcg_gen_movcond_tl(TCG_COND_NE, t1, t1, tcg_constant_tl(0),
3876 tcg_constant_tl(1), src2);
3880 tcg_gen_div_tl(dst, src1, src2);
3881 gen_store_gpr(dc, a->rd, dst);
3882 return advance_pc(dc);
3885 static bool gen_edge(DisasContext *dc, arg_r_r_r *a,
3886 int width, bool cc, bool little_endian)
3888 TCGv dst, s1, s2, l, r, t, m;
3889 uint64_t amask = address_mask_i(dc, -8);
3891 dst = gen_dest_gpr(dc, a->rd);
3892 s1 = gen_load_gpr(dc, a->rs1);
3893 s2 = gen_load_gpr(dc, a->rs2);
3896 gen_op_subcc(cpu_cc_N, s1, s2);
3905 tcg_gen_andi_tl(l, s1, 7);
3906 tcg_gen_andi_tl(r, s2, 7);
3907 tcg_gen_xori_tl(r, r, 7);
3908 m = tcg_constant_tl(0xff);
3911 tcg_gen_extract_tl(l, s1, 1, 2);
3912 tcg_gen_extract_tl(r, s2, 1, 2);
3913 tcg_gen_xori_tl(r, r, 3);
3914 m = tcg_constant_tl(0xf);
3917 tcg_gen_extract_tl(l, s1, 2, 1);
3918 tcg_gen_extract_tl(r, s2, 2, 1);
3919 tcg_gen_xori_tl(r, r, 1);
3920 m = tcg_constant_tl(0x3);
3926 /* Compute Left Edge */
3927 if (little_endian) {
3928 tcg_gen_shl_tl(l, m, l);
3929 tcg_gen_and_tl(l, l, m);
3931 tcg_gen_shr_tl(l, m, l);
3933 /* Compute Right Edge */
3934 if (little_endian) {
3935 tcg_gen_shr_tl(r, m, r);
3937 tcg_gen_shl_tl(r, m, r);
3938 tcg_gen_and_tl(r, r, m);
3941 /* Compute dst = (s1 == s2 under amask ? l : l & r) */
3942 tcg_gen_xor_tl(t, s1, s2);
3943 tcg_gen_and_tl(r, r, l);
3944 tcg_gen_movcond_tl(TCG_COND_TSTEQ, dst, t, tcg_constant_tl(amask), r, l);
3946 gen_store_gpr(dc, a->rd, dst);
3947 return advance_pc(dc);
3950 TRANS(EDGE8cc, VIS1, gen_edge, a, 8, 1, 0)
3951 TRANS(EDGE8Lcc, VIS1, gen_edge, a, 8, 1, 1)
3952 TRANS(EDGE16cc, VIS1, gen_edge, a, 16, 1, 0)
3953 TRANS(EDGE16Lcc, VIS1, gen_edge, a, 16, 1, 1)
3954 TRANS(EDGE32cc, VIS1, gen_edge, a, 32, 1, 0)
3955 TRANS(EDGE32Lcc, VIS1, gen_edge, a, 32, 1, 1)
3957 TRANS(EDGE8N, VIS2, gen_edge, a, 8, 0, 0)
3958 TRANS(EDGE8LN, VIS2, gen_edge, a, 8, 0, 1)
3959 TRANS(EDGE16N, VIS2, gen_edge, a, 16, 0, 0)
3960 TRANS(EDGE16LN, VIS2, gen_edge, a, 16, 0, 1)
3961 TRANS(EDGE32N, VIS2, gen_edge, a, 32, 0, 0)
3962 TRANS(EDGE32LN, VIS2, gen_edge, a, 32, 0, 1)
3964 static bool do_rr(DisasContext *dc, arg_r_r *a,
3965 void (*func)(TCGv, TCGv))
3967 TCGv dst = gen_dest_gpr(dc, a->rd);
3968 TCGv src = gen_load_gpr(dc, a->rs);
3971 gen_store_gpr(dc, a->rd, dst);
3972 return advance_pc(dc);
3975 TRANS(LZCNT, VIS3, do_rr, a, gen_op_lzcnt)
3977 static bool do_rrr(DisasContext *dc, arg_r_r_r *a,
3978 void (*func)(TCGv, TCGv, TCGv))
3980 TCGv dst = gen_dest_gpr(dc, a->rd);
3981 TCGv src1 = gen_load_gpr(dc, a->rs1);
3982 TCGv src2 = gen_load_gpr(dc, a->rs2);
3984 func(dst, src1, src2);
3985 gen_store_gpr(dc, a->rd, dst);
3986 return advance_pc(dc);
3989 TRANS(ARRAY8, VIS1, do_rrr, a, gen_helper_array8)
3990 TRANS(ARRAY16, VIS1, do_rrr, a, gen_op_array16)
3991 TRANS(ARRAY32, VIS1, do_rrr, a, gen_op_array32)
3993 TRANS(ADDXC, VIS3, do_rrr, a, gen_op_addxc)
3994 TRANS(ADDXCcc, VIS3, do_rrr, a, gen_op_addxccc)
3996 TRANS(SUBXC, VIS4, do_rrr, a, gen_op_subxc)
3997 TRANS(SUBXCcc, VIS4, do_rrr, a, gen_op_subxccc)
3999 TRANS(UMULXHI, VIS3, do_rrr, a, gen_op_umulxhi)
4001 static void gen_op_alignaddr(TCGv dst, TCGv s1, TCGv s2)
4003 #ifdef TARGET_SPARC64
4004 TCGv tmp = tcg_temp_new();
4006 tcg_gen_add_tl(tmp, s1, s2);
4007 tcg_gen_andi_tl(dst, tmp, -8);
4008 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4010 g_assert_not_reached();
4014 static void gen_op_alignaddrl(TCGv dst, TCGv s1, TCGv s2)
4016 #ifdef TARGET_SPARC64
4017 TCGv tmp = tcg_temp_new();
4019 tcg_gen_add_tl(tmp, s1, s2);
4020 tcg_gen_andi_tl(dst, tmp, -8);
4021 tcg_gen_neg_tl(tmp, tmp);
4022 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, tmp, 0, 3);
4024 g_assert_not_reached();
4028 TRANS(ALIGNADDR, VIS1, do_rrr, a, gen_op_alignaddr)
4029 TRANS(ALIGNADDRL, VIS1, do_rrr, a, gen_op_alignaddrl)
4031 static void gen_op_bmask(TCGv dst, TCGv s1, TCGv s2)
4033 #ifdef TARGET_SPARC64
4034 tcg_gen_add_tl(dst, s1, s2);
4035 tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, dst, 32, 32);
4037 g_assert_not_reached();
4041 TRANS(BMASK, VIS2, do_rrr, a, gen_op_bmask)
4043 static bool do_cmask(DisasContext *dc, int rs2, void (*func)(TCGv, TCGv, TCGv))
4045 func(cpu_gsr, cpu_gsr, gen_load_gpr(dc, rs2));
4049 TRANS(CMASK8, VIS3, do_cmask, a->rs2, gen_helper_cmask8)
4050 TRANS(CMASK16, VIS3, do_cmask, a->rs2, gen_helper_cmask16)
4051 TRANS(CMASK32, VIS3, do_cmask, a->rs2, gen_helper_cmask32)
4053 static bool do_shift_r(DisasContext *dc, arg_shiftr *a, bool l, bool u)
4055 TCGv dst, src1, src2;
4057 /* Reject 64-bit shifts for sparc32. */
4058 if (avail_32(dc) && a->x) {
4062 src2 = tcg_temp_new();
4063 tcg_gen_andi_tl(src2, gen_load_gpr(dc, a->rs2), a->x ? 63 : 31);
4064 src1 = gen_load_gpr(dc, a->rs1);
4065 dst = gen_dest_gpr(dc, a->rd);
4068 tcg_gen_shl_tl(dst, src1, src2);
4070 tcg_gen_ext32u_tl(dst, dst);
4074 tcg_gen_ext32u_tl(dst, src1);
4077 tcg_gen_shr_tl(dst, src1, src2);
4080 tcg_gen_ext32s_tl(dst, src1);
4083 tcg_gen_sar_tl(dst, src1, src2);
4085 gen_store_gpr(dc, a->rd, dst);
4086 return advance_pc(dc);
4089 TRANS(SLL_r, ALL, do_shift_r, a, true, true)
4090 TRANS(SRL_r, ALL, do_shift_r, a, false, true)
4091 TRANS(SRA_r, ALL, do_shift_r, a, false, false)
4093 static bool do_shift_i(DisasContext *dc, arg_shifti *a, bool l, bool u)
4097 /* Reject 64-bit shifts for sparc32. */
4098 if (avail_32(dc) && (a->x || a->i >= 32)) {
4102 src1 = gen_load_gpr(dc, a->rs1);
4103 dst = gen_dest_gpr(dc, a->rd);
4105 if (avail_32(dc) || a->x) {
4107 tcg_gen_shli_tl(dst, src1, a->i);
4109 tcg_gen_shri_tl(dst, src1, a->i);
4111 tcg_gen_sari_tl(dst, src1, a->i);
4115 tcg_gen_deposit_z_tl(dst, src1, a->i, 32 - a->i);
4117 tcg_gen_extract_tl(dst, src1, a->i, 32 - a->i);
4119 tcg_gen_sextract_tl(dst, src1, a->i, 32 - a->i);
4122 gen_store_gpr(dc, a->rd, dst);
4123 return advance_pc(dc);
4126 TRANS(SLL_i, ALL, do_shift_i, a, true, true)
4127 TRANS(SRL_i, ALL, do_shift_i, a, false, true)
4128 TRANS(SRA_i, ALL, do_shift_i, a, false, false)
4130 static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm)
4132 /* For simplicity, we under-decoded the rs2 form. */
4133 if (!imm && rs2_or_imm & ~0x1f) {
4136 if (imm || rs2_or_imm == 0) {
4137 return tcg_constant_tl(rs2_or_imm);
4139 return cpu_regs[rs2_or_imm];
4143 static bool do_mov_cond(DisasContext *dc, DisasCompare *cmp, int rd, TCGv src2)
4145 TCGv dst = gen_load_gpr(dc, rd);
4146 TCGv c2 = tcg_constant_tl(cmp->c2);
4148 tcg_gen_movcond_tl(cmp->cond, dst, cmp->c1, c2, src2, dst);
4149 gen_store_gpr(dc, rd, dst);
4150 return advance_pc(dc);
4153 static bool trans_MOVcc(DisasContext *dc, arg_MOVcc *a)
4155 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4161 gen_compare(&cmp, a->cc, a->cond, dc);
4162 return do_mov_cond(dc, &cmp, a->rd, src2);
4165 static bool trans_MOVfcc(DisasContext *dc, arg_MOVfcc *a)
4167 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4173 gen_fcompare(&cmp, a->cc, a->cond);
4174 return do_mov_cond(dc, &cmp, a->rd, src2);
4177 static bool trans_MOVR(DisasContext *dc, arg_MOVR *a)
4179 TCGv src2 = gen_rs2_or_imm(dc, a->imm, a->rs2_or_imm);
4185 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
4188 return do_mov_cond(dc, &cmp, a->rd, src2);
4191 static bool do_add_special(DisasContext *dc, arg_r_r_ri *a,
4192 bool (*func)(DisasContext *dc, int rd, TCGv src))
4196 /* For simplicity, we under-decoded the rs2 form. */
4197 if (!a->imm && a->rs2_or_imm & ~0x1f) {
4202 * Always load the sum into a new temporary.
4203 * This is required to capture the value across a window change,
4204 * e.g. SAVE and RESTORE, and may be optimized away otherwise.
4206 sum = tcg_temp_new();
4207 src1 = gen_load_gpr(dc, a->rs1);
4208 if (a->imm || a->rs2_or_imm == 0) {
4209 tcg_gen_addi_tl(sum, src1, a->rs2_or_imm);
4211 tcg_gen_add_tl(sum, src1, cpu_regs[a->rs2_or_imm]);
4213 return func(dc, a->rd, sum);
4216 static bool do_jmpl(DisasContext *dc, int rd, TCGv src)
4219 * Preserve pc across advance, so that we can delay
4220 * the writeback to rd until after src is consumed.
4222 target_ulong cur_pc = dc->pc;
4224 gen_check_align(dc, src, 3);
4227 tcg_gen_mov_tl(cpu_npc, src);
4228 gen_address_mask(dc, cpu_npc);
4229 gen_store_gpr(dc, rd, tcg_constant_tl(cur_pc));
4231 dc->npc = DYNAMIC_PC_LOOKUP;
4235 TRANS(JMPL, ALL, do_add_special, a, do_jmpl)
4237 static bool do_rett(DisasContext *dc, int rd, TCGv src)
4239 if (!supervisor(dc)) {
4240 return raise_priv(dc);
4243 gen_check_align(dc, src, 3);
4246 tcg_gen_mov_tl(cpu_npc, src);
4247 gen_helper_rett(tcg_env);
4249 dc->npc = DYNAMIC_PC;
4253 TRANS(RETT, 32, do_add_special, a, do_rett)
4255 static bool do_return(DisasContext *dc, int rd, TCGv src)
4257 gen_check_align(dc, src, 3);
4258 gen_helper_restore(tcg_env);
4261 tcg_gen_mov_tl(cpu_npc, src);
4262 gen_address_mask(dc, cpu_npc);
4264 dc->npc = DYNAMIC_PC_LOOKUP;
4268 TRANS(RETURN, 64, do_add_special, a, do_return)
4270 static bool do_save(DisasContext *dc, int rd, TCGv src)
4272 gen_helper_save(tcg_env);
4273 gen_store_gpr(dc, rd, src);
4274 return advance_pc(dc);
4277 TRANS(SAVE, ALL, do_add_special, a, do_save)
4279 static bool do_restore(DisasContext *dc, int rd, TCGv src)
4281 gen_helper_restore(tcg_env);
4282 gen_store_gpr(dc, rd, src);
4283 return advance_pc(dc);
4286 TRANS(RESTORE, ALL, do_add_special, a, do_restore)
4288 static bool do_done_retry(DisasContext *dc, bool done)
4290 if (!supervisor(dc)) {
4291 return raise_priv(dc);
4293 dc->npc = DYNAMIC_PC;
4294 dc->pc = DYNAMIC_PC;
4295 translator_io_start(&dc->base);
4297 gen_helper_done(tcg_env);
4299 gen_helper_retry(tcg_env);
4304 TRANS(DONE, 64, do_done_retry, true)
4305 TRANS(RETRY, 64, do_done_retry, false)
4308 * Major opcode 11 -- load and store instructions
4311 static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm)
4313 TCGv addr, tmp = NULL;
4315 /* For simplicity, we under-decoded the rs2 form. */
4316 if (!imm && rs2_or_imm & ~0x1f) {
4320 addr = gen_load_gpr(dc, rs1);
4322 tmp = tcg_temp_new();
4324 tcg_gen_addi_tl(tmp, addr, rs2_or_imm);
4326 tcg_gen_add_tl(tmp, addr, cpu_regs[rs2_or_imm]);
4332 tmp = tcg_temp_new();
4334 tcg_gen_ext32u_tl(tmp, addr);
4340 static bool do_ld_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4342 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4348 da = resolve_asi(dc, a->asi, mop);
4350 reg = gen_dest_gpr(dc, a->rd);
4351 gen_ld_asi(dc, &da, reg, addr);
4352 gen_store_gpr(dc, a->rd, reg);
4353 return advance_pc(dc);
4356 TRANS(LDUW, ALL, do_ld_gpr, a, MO_TEUL)
4357 TRANS(LDUB, ALL, do_ld_gpr, a, MO_UB)
4358 TRANS(LDUH, ALL, do_ld_gpr, a, MO_TEUW)
4359 TRANS(LDSB, ALL, do_ld_gpr, a, MO_SB)
4360 TRANS(LDSH, ALL, do_ld_gpr, a, MO_TESW)
4361 TRANS(LDSW, 64, do_ld_gpr, a, MO_TESL)
4362 TRANS(LDX, 64, do_ld_gpr, a, MO_TEUQ)
4364 static bool do_st_gpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4366 TCGv reg, addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4372 da = resolve_asi(dc, a->asi, mop);
4374 reg = gen_load_gpr(dc, a->rd);
4375 gen_st_asi(dc, &da, reg, addr);
4376 return advance_pc(dc);
4379 TRANS(STW, ALL, do_st_gpr, a, MO_TEUL)
4380 TRANS(STB, ALL, do_st_gpr, a, MO_UB)
4381 TRANS(STH, ALL, do_st_gpr, a, MO_TEUW)
4382 TRANS(STX, 64, do_st_gpr, a, MO_TEUQ)
4384 static bool trans_LDD(DisasContext *dc, arg_r_r_ri_asi *a)
4392 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4396 da = resolve_asi(dc, a->asi, MO_TEUQ);
4397 gen_ldda_asi(dc, &da, addr, a->rd);
4398 return advance_pc(dc);
4401 static bool trans_STD(DisasContext *dc, arg_r_r_ri_asi *a)
4409 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4413 da = resolve_asi(dc, a->asi, MO_TEUQ);
4414 gen_stda_asi(dc, &da, addr, a->rd);
4415 return advance_pc(dc);
4418 static bool trans_LDSTUB(DisasContext *dc, arg_r_r_ri_asi *a)
4423 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4427 da = resolve_asi(dc, a->asi, MO_UB);
4429 reg = gen_dest_gpr(dc, a->rd);
4430 gen_ldstub_asi(dc, &da, reg, addr);
4431 gen_store_gpr(dc, a->rd, reg);
4432 return advance_pc(dc);
4435 static bool trans_SWAP(DisasContext *dc, arg_r_r_ri_asi *a)
4437 TCGv addr, dst, src;
4440 addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4444 da = resolve_asi(dc, a->asi, MO_TEUL);
4446 dst = gen_dest_gpr(dc, a->rd);
4447 src = gen_load_gpr(dc, a->rd);
4448 gen_swap_asi(dc, &da, dst, src, addr);
4449 gen_store_gpr(dc, a->rd, dst);
4450 return advance_pc(dc);
4453 static bool do_casa(DisasContext *dc, arg_r_r_ri_asi *a, MemOp mop)
4458 addr = gen_ldst_addr(dc, a->rs1, true, 0);
4462 da = resolve_asi(dc, a->asi, mop);
4464 o = gen_dest_gpr(dc, a->rd);
4465 n = gen_load_gpr(dc, a->rd);
4466 c = gen_load_gpr(dc, a->rs2_or_imm);
4467 gen_cas_asi(dc, &da, o, n, c, addr);
4468 gen_store_gpr(dc, a->rd, o);
4469 return advance_pc(dc);
4472 TRANS(CASA, CASA, do_casa, a, MO_TEUL)
4473 TRANS(CASXA, 64, do_casa, a, MO_TEUQ)
4475 static bool do_ld_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4477 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4483 if (gen_trap_ifnofpu(dc)) {
4486 if (sz == MO_128 && gen_trap_float128(dc)) {
4489 da = resolve_asi(dc, a->asi, MO_TE | sz);
4490 gen_ldf_asi(dc, &da, sz, addr, a->rd);
4491 gen_update_fprs_dirty(dc, a->rd);
4492 return advance_pc(dc);
4495 TRANS(LDF, ALL, do_ld_fpr, a, MO_32)
4496 TRANS(LDDF, ALL, do_ld_fpr, a, MO_64)
4497 TRANS(LDQF, ALL, do_ld_fpr, a, MO_128)
4499 TRANS(LDFA, 64, do_ld_fpr, a, MO_32)
4500 TRANS(LDDFA, 64, do_ld_fpr, a, MO_64)
4501 TRANS(LDQFA, 64, do_ld_fpr, a, MO_128)
4503 static bool do_st_fpr(DisasContext *dc, arg_r_r_ri_asi *a, MemOp sz)
4505 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4511 if (gen_trap_ifnofpu(dc)) {
4514 if (sz == MO_128 && gen_trap_float128(dc)) {
4517 da = resolve_asi(dc, a->asi, MO_TE | sz);
4518 gen_stf_asi(dc, &da, sz, addr, a->rd);
4519 return advance_pc(dc);
4522 TRANS(STF, ALL, do_st_fpr, a, MO_32)
4523 TRANS(STDF, ALL, do_st_fpr, a, MO_64)
4524 TRANS(STQF, ALL, do_st_fpr, a, MO_128)
4526 TRANS(STFA, 64, do_st_fpr, a, MO_32)
4527 TRANS(STDFA, 64, do_st_fpr, a, MO_64)
4528 TRANS(STQFA, 64, do_st_fpr, a, MO_128)
4530 static bool trans_STDFQ(DisasContext *dc, arg_STDFQ *a)
4532 if (!avail_32(dc)) {
4535 if (!supervisor(dc)) {
4536 return raise_priv(dc);
4538 if (gen_trap_ifnofpu(dc)) {
4541 gen_op_fpexception_im(dc, FSR_FTT_SEQ_ERROR);
4545 static bool trans_LDFSR(DisasContext *dc, arg_r_r_ri *a)
4547 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4553 if (gen_trap_ifnofpu(dc)) {
4557 tmp = tcg_temp_new_i32();
4558 tcg_gen_qemu_ld_i32(tmp, addr, dc->mem_idx, MO_TEUL | MO_ALIGN);
4560 tcg_gen_extract_i32(cpu_fcc[0], tmp, FSR_FCC0_SHIFT, 2);
4561 /* LDFSR does not change FCC[1-3]. */
4563 gen_helper_set_fsr_nofcc_noftt(tcg_env, tmp);
4564 return advance_pc(dc);
4567 static bool do_ldxfsr(DisasContext *dc, arg_r_r_ri *a, bool entire)
4569 #ifdef TARGET_SPARC64
4570 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4577 if (gen_trap_ifnofpu(dc)) {
4581 t64 = tcg_temp_new_i64();
4582 tcg_gen_qemu_ld_i64(t64, addr, dc->mem_idx, MO_TEUQ | MO_ALIGN);
4584 lo = tcg_temp_new_i32();
4586 tcg_gen_extr_i64_i32(lo, hi, t64);
4587 tcg_gen_extract_i32(cpu_fcc[0], lo, FSR_FCC0_SHIFT, 2);
4588 tcg_gen_extract_i32(cpu_fcc[1], hi, FSR_FCC1_SHIFT - 32, 2);
4589 tcg_gen_extract_i32(cpu_fcc[2], hi, FSR_FCC2_SHIFT - 32, 2);
4590 tcg_gen_extract_i32(cpu_fcc[3], hi, FSR_FCC3_SHIFT - 32, 2);
4593 gen_helper_set_fsr_nofcc(tcg_env, lo);
4595 gen_helper_set_fsr_nofcc_noftt(tcg_env, lo);
4597 return advance_pc(dc);
4603 TRANS(LDXFSR, 64, do_ldxfsr, a, false)
4604 TRANS(LDXEFSR, VIS3B, do_ldxfsr, a, true)
4606 static bool do_stfsr(DisasContext *dc, arg_r_r_ri *a, MemOp mop)
4608 TCGv addr = gen_ldst_addr(dc, a->rs1, a->imm, a->rs2_or_imm);
4614 if (gen_trap_ifnofpu(dc)) {
4618 fsr = tcg_temp_new();
4619 gen_helper_get_fsr(fsr, tcg_env);
4620 tcg_gen_qemu_st_tl(fsr, addr, dc->mem_idx, mop | MO_ALIGN);
4621 return advance_pc(dc);
4624 TRANS(STFSR, ALL, do_stfsr, a, MO_TEUL)
4625 TRANS(STXFSR, 64, do_stfsr, a, MO_TEUQ)
4627 static bool do_fc(DisasContext *dc, int rd, int32_t c)
4629 if (gen_trap_ifnofpu(dc)) {
4632 gen_store_fpr_F(dc, rd, tcg_constant_i32(c));
4633 return advance_pc(dc);
4636 TRANS(FZEROs, VIS1, do_fc, a->rd, 0)
4637 TRANS(FONEs, VIS1, do_fc, a->rd, -1)
4639 static bool do_dc(DisasContext *dc, int rd, int64_t c)
4641 if (gen_trap_ifnofpu(dc)) {
4644 gen_store_fpr_D(dc, rd, tcg_constant_i64(c));
4645 return advance_pc(dc);
4648 TRANS(FZEROd, VIS1, do_dc, a->rd, 0)
4649 TRANS(FONEd, VIS1, do_dc, a->rd, -1)
4651 static bool do_ff(DisasContext *dc, arg_r_r *a,
4652 void (*func)(TCGv_i32, TCGv_i32))
4656 if (gen_trap_ifnofpu(dc)) {
4660 tmp = gen_load_fpr_F(dc, a->rs);
4662 gen_store_fpr_F(dc, a->rd, tmp);
4663 return advance_pc(dc);
4666 TRANS(FMOVs, ALL, do_ff, a, gen_op_fmovs)
4667 TRANS(FNEGs, ALL, do_ff, a, gen_op_fnegs)
4668 TRANS(FABSs, ALL, do_ff, a, gen_op_fabss)
4669 TRANS(FSRCs, VIS1, do_ff, a, tcg_gen_mov_i32)
4670 TRANS(FNOTs, VIS1, do_ff, a, tcg_gen_not_i32)
4672 static bool do_fd(DisasContext *dc, arg_r_r *a,
4673 void (*func)(TCGv_i32, TCGv_i64))
4678 if (gen_trap_ifnofpu(dc)) {
4682 dst = tcg_temp_new_i32();
4683 src = gen_load_fpr_D(dc, a->rs);
4685 gen_store_fpr_F(dc, a->rd, dst);
4686 return advance_pc(dc);
4689 TRANS(FPACK16, VIS1, do_fd, a, gen_op_fpack16)
4690 TRANS(FPACKFIX, VIS1, do_fd, a, gen_op_fpackfix)
4692 static bool do_env_ff(DisasContext *dc, arg_r_r *a,
4693 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
4697 if (gen_trap_ifnofpu(dc)) {
4701 tmp = gen_load_fpr_F(dc, a->rs);
4702 func(tmp, tcg_env, tmp);
4703 gen_store_fpr_F(dc, a->rd, tmp);
4704 return advance_pc(dc);
4707 TRANS(FSQRTs, ALL, do_env_ff, a, gen_helper_fsqrts)
4708 TRANS(FiTOs, ALL, do_env_ff, a, gen_helper_fitos)
4709 TRANS(FsTOi, ALL, do_env_ff, a, gen_helper_fstoi)
4711 static bool do_env_fd(DisasContext *dc, arg_r_r *a,
4712 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
4717 if (gen_trap_ifnofpu(dc)) {
4721 dst = tcg_temp_new_i32();
4722 src = gen_load_fpr_D(dc, a->rs);
4723 func(dst, tcg_env, src);
4724 gen_store_fpr_F(dc, a->rd, dst);
4725 return advance_pc(dc);
4728 TRANS(FdTOs, ALL, do_env_fd, a, gen_helper_fdtos)
4729 TRANS(FdTOi, ALL, do_env_fd, a, gen_helper_fdtoi)
4730 TRANS(FxTOs, 64, do_env_fd, a, gen_helper_fxtos)
4732 static bool do_dd(DisasContext *dc, arg_r_r *a,
4733 void (*func)(TCGv_i64, TCGv_i64))
4737 if (gen_trap_ifnofpu(dc)) {
4741 dst = tcg_temp_new_i64();
4742 src = gen_load_fpr_D(dc, a->rs);
4744 gen_store_fpr_D(dc, a->rd, dst);
4745 return advance_pc(dc);
4748 TRANS(FMOVd, 64, do_dd, a, gen_op_fmovd)
4749 TRANS(FNEGd, 64, do_dd, a, gen_op_fnegd)
4750 TRANS(FABSd, 64, do_dd, a, gen_op_fabsd)
4751 TRANS(FSRCd, VIS1, do_dd, a, tcg_gen_mov_i64)
4752 TRANS(FNOTd, VIS1, do_dd, a, tcg_gen_not_i64)
4754 static bool do_env_dd(DisasContext *dc, arg_r_r *a,
4755 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
4759 if (gen_trap_ifnofpu(dc)) {
4763 dst = tcg_temp_new_i64();
4764 src = gen_load_fpr_D(dc, a->rs);
4765 func(dst, tcg_env, src);
4766 gen_store_fpr_D(dc, a->rd, dst);
4767 return advance_pc(dc);
4770 TRANS(FSQRTd, ALL, do_env_dd, a, gen_helper_fsqrtd)
4771 TRANS(FxTOd, 64, do_env_dd, a, gen_helper_fxtod)
4772 TRANS(FdTOx, 64, do_env_dd, a, gen_helper_fdtox)
4774 static bool do_df(DisasContext *dc, arg_r_r *a,
4775 void (*func)(TCGv_i64, TCGv_i32))
4780 if (gen_trap_ifnofpu(dc)) {
4784 dst = tcg_temp_new_i64();
4785 src = gen_load_fpr_F(dc, a->rs);
4787 gen_store_fpr_D(dc, a->rd, dst);
4788 return advance_pc(dc);
4791 TRANS(FEXPAND, VIS1, do_df, a, gen_helper_fexpand)
4793 static bool do_env_df(DisasContext *dc, arg_r_r *a,
4794 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
4799 if (gen_trap_ifnofpu(dc)) {
4803 dst = tcg_temp_new_i64();
4804 src = gen_load_fpr_F(dc, a->rs);
4805 func(dst, tcg_env, src);
4806 gen_store_fpr_D(dc, a->rd, dst);
4807 return advance_pc(dc);
4810 TRANS(FiTOd, ALL, do_env_df, a, gen_helper_fitod)
4811 TRANS(FsTOd, ALL, do_env_df, a, gen_helper_fstod)
4812 TRANS(FsTOx, 64, do_env_df, a, gen_helper_fstox)
4814 static bool do_qq(DisasContext *dc, arg_r_r *a,
4815 void (*func)(TCGv_i128, TCGv_i128))
4819 if (gen_trap_ifnofpu(dc)) {
4822 if (gen_trap_float128(dc)) {
4826 gen_op_clear_ieee_excp_and_FTT();
4827 t = gen_load_fpr_Q(dc, a->rs);
4829 gen_store_fpr_Q(dc, a->rd, t);
4830 return advance_pc(dc);
4833 TRANS(FMOVq, 64, do_qq, a, tcg_gen_mov_i128)
4834 TRANS(FNEGq, 64, do_qq, a, gen_op_fnegq)
4835 TRANS(FABSq, 64, do_qq, a, gen_op_fabsq)
4837 static bool do_env_qq(DisasContext *dc, arg_r_r *a,
4838 void (*func)(TCGv_i128, TCGv_env, TCGv_i128))
4842 if (gen_trap_ifnofpu(dc)) {
4845 if (gen_trap_float128(dc)) {
4849 t = gen_load_fpr_Q(dc, a->rs);
4850 func(t, tcg_env, t);
4851 gen_store_fpr_Q(dc, a->rd, t);
4852 return advance_pc(dc);
4855 TRANS(FSQRTq, ALL, do_env_qq, a, gen_helper_fsqrtq)
4857 static bool do_env_fq(DisasContext *dc, arg_r_r *a,
4858 void (*func)(TCGv_i32, TCGv_env, TCGv_i128))
4863 if (gen_trap_ifnofpu(dc)) {
4866 if (gen_trap_float128(dc)) {
4870 src = gen_load_fpr_Q(dc, a->rs);
4871 dst = tcg_temp_new_i32();
4872 func(dst, tcg_env, src);
4873 gen_store_fpr_F(dc, a->rd, dst);
4874 return advance_pc(dc);
4877 TRANS(FqTOs, ALL, do_env_fq, a, gen_helper_fqtos)
4878 TRANS(FqTOi, ALL, do_env_fq, a, gen_helper_fqtoi)
4880 static bool do_env_dq(DisasContext *dc, arg_r_r *a,
4881 void (*func)(TCGv_i64, TCGv_env, TCGv_i128))
4886 if (gen_trap_ifnofpu(dc)) {
4889 if (gen_trap_float128(dc)) {
4893 src = gen_load_fpr_Q(dc, a->rs);
4894 dst = tcg_temp_new_i64();
4895 func(dst, tcg_env, src);
4896 gen_store_fpr_D(dc, a->rd, dst);
4897 return advance_pc(dc);
4900 TRANS(FqTOd, ALL, do_env_dq, a, gen_helper_fqtod)
4901 TRANS(FqTOx, 64, do_env_dq, a, gen_helper_fqtox)
4903 static bool do_env_qf(DisasContext *dc, arg_r_r *a,
4904 void (*func)(TCGv_i128, TCGv_env, TCGv_i32))
4909 if (gen_trap_ifnofpu(dc)) {
4912 if (gen_trap_float128(dc)) {
4916 src = gen_load_fpr_F(dc, a->rs);
4917 dst = tcg_temp_new_i128();
4918 func(dst, tcg_env, src);
4919 gen_store_fpr_Q(dc, a->rd, dst);
4920 return advance_pc(dc);
4923 TRANS(FiTOq, ALL, do_env_qf, a, gen_helper_fitoq)
4924 TRANS(FsTOq, ALL, do_env_qf, a, gen_helper_fstoq)
4926 static bool do_env_qd(DisasContext *dc, arg_r_r *a,
4927 void (*func)(TCGv_i128, TCGv_env, TCGv_i64))
4932 if (gen_trap_ifnofpu(dc)) {
4935 if (gen_trap_float128(dc)) {
4939 src = gen_load_fpr_D(dc, a->rs);
4940 dst = tcg_temp_new_i128();
4941 func(dst, tcg_env, src);
4942 gen_store_fpr_Q(dc, a->rd, dst);
4943 return advance_pc(dc);
4946 TRANS(FdTOq, ALL, do_env_qd, a, gen_helper_fdtoq)
4947 TRANS(FxTOq, 64, do_env_qd, a, gen_helper_fxtoq)
4949 static bool do_fff(DisasContext *dc, arg_r_r_r *a,
4950 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32))
4952 TCGv_i32 src1, src2;
4954 if (gen_trap_ifnofpu(dc)) {
4958 src1 = gen_load_fpr_F(dc, a->rs1);
4959 src2 = gen_load_fpr_F(dc, a->rs2);
4960 func(src1, src1, src2);
4961 gen_store_fpr_F(dc, a->rd, src1);
4962 return advance_pc(dc);
4965 TRANS(FPADD16s, VIS1, do_fff, a, tcg_gen_vec_add16_i32)
4966 TRANS(FPADD32s, VIS1, do_fff, a, tcg_gen_add_i32)
4967 TRANS(FPSUB16s, VIS1, do_fff, a, tcg_gen_vec_sub16_i32)
4968 TRANS(FPSUB32s, VIS1, do_fff, a, tcg_gen_sub_i32)
4969 TRANS(FNORs, VIS1, do_fff, a, tcg_gen_nor_i32)
4970 TRANS(FANDNOTs, VIS1, do_fff, a, tcg_gen_andc_i32)
4971 TRANS(FXORs, VIS1, do_fff, a, tcg_gen_xor_i32)
4972 TRANS(FNANDs, VIS1, do_fff, a, tcg_gen_nand_i32)
4973 TRANS(FANDs, VIS1, do_fff, a, tcg_gen_and_i32)
4974 TRANS(FXNORs, VIS1, do_fff, a, tcg_gen_eqv_i32)
4975 TRANS(FORNOTs, VIS1, do_fff, a, tcg_gen_orc_i32)
4976 TRANS(FORs, VIS1, do_fff, a, tcg_gen_or_i32)
4978 TRANS(FHADDs, VIS3, do_fff, a, gen_op_fhadds)
4979 TRANS(FHSUBs, VIS3, do_fff, a, gen_op_fhsubs)
4980 TRANS(FNHADDs, VIS3, do_fff, a, gen_op_fnhadds)
4982 TRANS(FPADDS16s, VIS3, do_fff, a, gen_op_fpadds16s)
4983 TRANS(FPSUBS16s, VIS3, do_fff, a, gen_op_fpsubs16s)
4984 TRANS(FPADDS32s, VIS3, do_fff, a, gen_op_fpadds32s)
4985 TRANS(FPSUBS32s, VIS3, do_fff, a, gen_op_fpsubs32s)
4987 static bool do_env_fff(DisasContext *dc, arg_r_r_r *a,
4988 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
4990 TCGv_i32 src1, src2;
4992 if (gen_trap_ifnofpu(dc)) {
4996 src1 = gen_load_fpr_F(dc, a->rs1);
4997 src2 = gen_load_fpr_F(dc, a->rs2);
4998 func(src1, tcg_env, src1, src2);
4999 gen_store_fpr_F(dc, a->rd, src1);
5000 return advance_pc(dc);
5003 TRANS(FADDs, ALL, do_env_fff, a, gen_helper_fadds)
5004 TRANS(FSUBs, ALL, do_env_fff, a, gen_helper_fsubs)
5005 TRANS(FMULs, ALL, do_env_fff, a, gen_helper_fmuls)
5006 TRANS(FDIVs, ALL, do_env_fff, a, gen_helper_fdivs)
5007 TRANS(FNADDs, VIS3, do_env_fff, a, gen_helper_fnadds)
5008 TRANS(FNMULs, VIS3, do_env_fff, a, gen_helper_fnmuls)
5010 static bool do_dff(DisasContext *dc, arg_r_r_r *a,
5011 void (*func)(TCGv_i64, TCGv_i32, TCGv_i32))
5014 TCGv_i32 src1, src2;
5016 if (gen_trap_ifnofpu(dc)) {
5020 dst = tcg_temp_new_i64();
5021 src1 = gen_load_fpr_F(dc, a->rs1);
5022 src2 = gen_load_fpr_F(dc, a->rs2);
5023 func(dst, src1, src2);
5024 gen_store_fpr_D(dc, a->rd, dst);
5025 return advance_pc(dc);
5028 TRANS(FMUL8x16AU, VIS1, do_dff, a, gen_op_fmul8x16au)
5029 TRANS(FMUL8x16AL, VIS1, do_dff, a, gen_op_fmul8x16al)
5030 TRANS(FMULD8SUx16, VIS1, do_dff, a, gen_op_fmuld8sux16)
5031 TRANS(FMULD8ULx16, VIS1, do_dff, a, gen_op_fmuld8ulx16)
5032 TRANS(FPMERGE, VIS1, do_dff, a, gen_helper_fpmerge)
5034 static bool do_dfd(DisasContext *dc, arg_r_r_r *a,
5035 void (*func)(TCGv_i64, TCGv_i32, TCGv_i64))
5040 if (gen_trap_ifnofpu(dc)) {
5044 dst = tcg_temp_new_i64();
5045 src1 = gen_load_fpr_F(dc, a->rs1);
5046 src2 = gen_load_fpr_D(dc, a->rs2);
5047 func(dst, src1, src2);
5048 gen_store_fpr_D(dc, a->rd, dst);
5049 return advance_pc(dc);
5052 TRANS(FMUL8x16, VIS1, do_dfd, a, gen_helper_fmul8x16)
5054 static bool do_gvec_ddd(DisasContext *dc, arg_r_r_r *a, MemOp vece,
5055 void (*func)(unsigned, uint32_t, uint32_t,
5056 uint32_t, uint32_t, uint32_t))
5058 if (gen_trap_ifnofpu(dc)) {
5062 func(vece, gen_offset_fpr_D(a->rd), gen_offset_fpr_D(a->rs1),
5063 gen_offset_fpr_D(a->rs2), 8, 8);
5064 return advance_pc(dc);
5067 TRANS(FPADD8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_add)
5068 TRANS(FPADD16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_add)
5069 TRANS(FPADD32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_add)
5071 TRANS(FPSUB8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sub)
5072 TRANS(FPSUB16, VIS1, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sub)
5073 TRANS(FPSUB32, VIS1, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sub)
5075 TRANS(FCHKSM16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fchksm16)
5076 TRANS(FMEAN16, VIS3, do_gvec_ddd, a, MO_16, gen_op_fmean16)
5078 TRANS(FPADDS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ssadd)
5079 TRANS(FPADDS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ssadd)
5080 TRANS(FPADDS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_ssadd)
5081 TRANS(FPADDUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_usadd)
5082 TRANS(FPADDUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_usadd)
5084 TRANS(FPSUBS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_sssub)
5085 TRANS(FPSUBS16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sssub)
5086 TRANS(FPSUBS32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sssub)
5087 TRANS(FPSUBUS8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_ussub)
5088 TRANS(FPSUBUS16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_ussub)
5090 TRANS(FSLL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shlv)
5091 TRANS(FSLL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shlv)
5092 TRANS(FSRL16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_shrv)
5093 TRANS(FSRL32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_shrv)
5094 TRANS(FSRA16, VIS3, do_gvec_ddd, a, MO_16, tcg_gen_gvec_sarv)
5095 TRANS(FSRA32, VIS3, do_gvec_ddd, a, MO_32, tcg_gen_gvec_sarv)
5097 TRANS(FPMIN8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smin)
5098 TRANS(FPMIN16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smin)
5099 TRANS(FPMIN32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smin)
5100 TRANS(FPMINU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umin)
5101 TRANS(FPMINU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umin)
5102 TRANS(FPMINU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umin)
5104 TRANS(FPMAX8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_smax)
5105 TRANS(FPMAX16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_smax)
5106 TRANS(FPMAX32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_smax)
5107 TRANS(FPMAXU8, VIS4, do_gvec_ddd, a, MO_8, tcg_gen_gvec_umax)
5108 TRANS(FPMAXU16, VIS4, do_gvec_ddd, a, MO_16, tcg_gen_gvec_umax)
5109 TRANS(FPMAXU32, VIS4, do_gvec_ddd, a, MO_32, tcg_gen_gvec_umax)
5111 static bool do_ddd(DisasContext *dc, arg_r_r_r *a,
5112 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64))
5114 TCGv_i64 dst, src1, src2;
5116 if (gen_trap_ifnofpu(dc)) {
5120 dst = tcg_temp_new_i64();
5121 src1 = gen_load_fpr_D(dc, a->rs1);
5122 src2 = gen_load_fpr_D(dc, a->rs2);
5123 func(dst, src1, src2);
5124 gen_store_fpr_D(dc, a->rd, dst);
5125 return advance_pc(dc);
5128 TRANS(FMUL8SUx16, VIS1, do_ddd, a, gen_helper_fmul8sux16)
5129 TRANS(FMUL8ULx16, VIS1, do_ddd, a, gen_helper_fmul8ulx16)
5131 TRANS(FNORd, VIS1, do_ddd, a, tcg_gen_nor_i64)
5132 TRANS(FANDNOTd, VIS1, do_ddd, a, tcg_gen_andc_i64)
5133 TRANS(FXORd, VIS1, do_ddd, a, tcg_gen_xor_i64)
5134 TRANS(FNANDd, VIS1, do_ddd, a, tcg_gen_nand_i64)
5135 TRANS(FANDd, VIS1, do_ddd, a, tcg_gen_and_i64)
5136 TRANS(FXNORd, VIS1, do_ddd, a, tcg_gen_eqv_i64)
5137 TRANS(FORNOTd, VIS1, do_ddd, a, tcg_gen_orc_i64)
5138 TRANS(FORd, VIS1, do_ddd, a, tcg_gen_or_i64)
5140 TRANS(FPACK32, VIS1, do_ddd, a, gen_op_fpack32)
5141 TRANS(FALIGNDATAg, VIS1, do_ddd, a, gen_op_faligndata_g)
5142 TRANS(BSHUFFLE, VIS2, do_ddd, a, gen_op_bshuffle)
5144 TRANS(FHADDd, VIS3, do_ddd, a, gen_op_fhaddd)
5145 TRANS(FHSUBd, VIS3, do_ddd, a, gen_op_fhsubd)
5146 TRANS(FNHADDd, VIS3, do_ddd, a, gen_op_fnhaddd)
5148 TRANS(FPADD64, VIS3B, do_ddd, a, tcg_gen_add_i64)
5149 TRANS(FPSUB64, VIS3B, do_ddd, a, tcg_gen_sub_i64)
5150 TRANS(FSLAS16, VIS3, do_ddd, a, gen_helper_fslas16)
5151 TRANS(FSLAS32, VIS3, do_ddd, a, gen_helper_fslas32)
5153 static bool do_rdd(DisasContext *dc, arg_r_r_r *a,
5154 void (*func)(TCGv, TCGv_i64, TCGv_i64))
5156 TCGv_i64 src1, src2;
5159 if (gen_trap_ifnofpu(dc)) {
5163 dst = gen_dest_gpr(dc, a->rd);
5164 src1 = gen_load_fpr_D(dc, a->rs1);
5165 src2 = gen_load_fpr_D(dc, a->rs2);
5166 func(dst, src1, src2);
5167 gen_store_gpr(dc, a->rd, dst);
5168 return advance_pc(dc);
5171 TRANS(FPCMPLE16, VIS1, do_rdd, a, gen_helper_fcmple16)
5172 TRANS(FPCMPNE16, VIS1, do_rdd, a, gen_helper_fcmpne16)
5173 TRANS(FPCMPGT16, VIS1, do_rdd, a, gen_helper_fcmpgt16)
5174 TRANS(FPCMPEQ16, VIS1, do_rdd, a, gen_helper_fcmpeq16)
5175 TRANS(FPCMPULE16, VIS4, do_rdd, a, gen_helper_fcmpule16)
5176 TRANS(FPCMPUGT16, VIS4, do_rdd, a, gen_helper_fcmpugt16)
5178 TRANS(FPCMPLE32, VIS1, do_rdd, a, gen_helper_fcmple32)
5179 TRANS(FPCMPNE32, VIS1, do_rdd, a, gen_helper_fcmpne32)
5180 TRANS(FPCMPGT32, VIS1, do_rdd, a, gen_helper_fcmpgt32)
5181 TRANS(FPCMPEQ32, VIS1, do_rdd, a, gen_helper_fcmpeq32)
5182 TRANS(FPCMPULE32, VIS4, do_rdd, a, gen_helper_fcmpule32)
5183 TRANS(FPCMPUGT32, VIS4, do_rdd, a, gen_helper_fcmpugt32)
5185 TRANS(FPCMPEQ8, VIS3B, do_rdd, a, gen_helper_fcmpeq8)
5186 TRANS(FPCMPNE8, VIS3B, do_rdd, a, gen_helper_fcmpne8)
5187 TRANS(FPCMPULE8, VIS3B, do_rdd, a, gen_helper_fcmpule8)
5188 TRANS(FPCMPUGT8, VIS3B, do_rdd, a, gen_helper_fcmpugt8)
5189 TRANS(FPCMPLE8, VIS4, do_rdd, a, gen_helper_fcmple8)
5190 TRANS(FPCMPGT8, VIS4, do_rdd, a, gen_helper_fcmpgt8)
5192 TRANS(PDISTN, VIS3, do_rdd, a, gen_op_pdistn)
5193 TRANS(XMULX, VIS3, do_rrr, a, gen_helper_xmulx)
5194 TRANS(XMULXHI, VIS3, do_rrr, a, gen_helper_xmulxhi)
5196 static bool do_env_ddd(DisasContext *dc, arg_r_r_r *a,
5197 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
5199 TCGv_i64 dst, src1, src2;
5201 if (gen_trap_ifnofpu(dc)) {
5205 dst = tcg_temp_new_i64();
5206 src1 = gen_load_fpr_D(dc, a->rs1);
5207 src2 = gen_load_fpr_D(dc, a->rs2);
5208 func(dst, tcg_env, src1, src2);
5209 gen_store_fpr_D(dc, a->rd, dst);
5210 return advance_pc(dc);
5213 TRANS(FADDd, ALL, do_env_ddd, a, gen_helper_faddd)
5214 TRANS(FSUBd, ALL, do_env_ddd, a, gen_helper_fsubd)
5215 TRANS(FMULd, ALL, do_env_ddd, a, gen_helper_fmuld)
5216 TRANS(FDIVd, ALL, do_env_ddd, a, gen_helper_fdivd)
5217 TRANS(FNADDd, VIS3, do_env_ddd, a, gen_helper_fnaddd)
5218 TRANS(FNMULd, VIS3, do_env_ddd, a, gen_helper_fnmuld)
5220 static bool trans_FsMULd(DisasContext *dc, arg_r_r_r *a)
5223 TCGv_i32 src1, src2;
5225 if (gen_trap_ifnofpu(dc)) {
5228 if (!(dc->def->features & CPU_FEATURE_FSMULD)) {
5229 return raise_unimpfpop(dc);
5232 dst = tcg_temp_new_i64();
5233 src1 = gen_load_fpr_F(dc, a->rs1);
5234 src2 = gen_load_fpr_F(dc, a->rs2);
5235 gen_helper_fsmuld(dst, tcg_env, src1, src2);
5236 gen_store_fpr_D(dc, a->rd, dst);
5237 return advance_pc(dc);
5240 static bool trans_FNsMULd(DisasContext *dc, arg_r_r_r *a)
5243 TCGv_i32 src1, src2;
5245 if (!avail_VIS3(dc)) {
5248 if (gen_trap_ifnofpu(dc)) {
5251 dst = tcg_temp_new_i64();
5252 src1 = gen_load_fpr_F(dc, a->rs1);
5253 src2 = gen_load_fpr_F(dc, a->rs2);
5254 gen_helper_fnsmuld(dst, tcg_env, src1, src2);
5255 gen_store_fpr_D(dc, a->rd, dst);
5256 return advance_pc(dc);
5259 static bool do_ffff(DisasContext *dc, arg_r_r_r_r *a,
5260 void (*func)(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32))
5262 TCGv_i32 dst, src1, src2, src3;
5264 if (gen_trap_ifnofpu(dc)) {
5268 src1 = gen_load_fpr_F(dc, a->rs1);
5269 src2 = gen_load_fpr_F(dc, a->rs2);
5270 src3 = gen_load_fpr_F(dc, a->rs3);
5271 dst = tcg_temp_new_i32();
5272 func(dst, src1, src2, src3);
5273 gen_store_fpr_F(dc, a->rd, dst);
5274 return advance_pc(dc);
5277 TRANS(FMADDs, FMAF, do_ffff, a, gen_op_fmadds)
5278 TRANS(FMSUBs, FMAF, do_ffff, a, gen_op_fmsubs)
5279 TRANS(FNMSUBs, FMAF, do_ffff, a, gen_op_fnmsubs)
5280 TRANS(FNMADDs, FMAF, do_ffff, a, gen_op_fnmadds)
5282 static bool do_dddd(DisasContext *dc, arg_r_r_r_r *a,
5283 void (*func)(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_i64))
5285 TCGv_i64 dst, src1, src2, src3;
5287 if (gen_trap_ifnofpu(dc)) {
5291 dst = tcg_temp_new_i64();
5292 src1 = gen_load_fpr_D(dc, a->rs1);
5293 src2 = gen_load_fpr_D(dc, a->rs2);
5294 src3 = gen_load_fpr_D(dc, a->rs3);
5295 func(dst, src1, src2, src3);
5296 gen_store_fpr_D(dc, a->rd, dst);
5297 return advance_pc(dc);
5300 TRANS(PDIST, VIS1, do_dddd, a, gen_helper_pdist)
5301 TRANS(FMADDd, FMAF, do_dddd, a, gen_op_fmaddd)
5302 TRANS(FMSUBd, FMAF, do_dddd, a, gen_op_fmsubd)
5303 TRANS(FNMSUBd, FMAF, do_dddd, a, gen_op_fnmsubd)
5304 TRANS(FNMADDd, FMAF, do_dddd, a, gen_op_fnmaddd)
5305 TRANS(FPMADDX, IMA, do_dddd, a, gen_op_fpmaddx)
5306 TRANS(FPMADDXHI, IMA, do_dddd, a, gen_op_fpmaddxhi)
5308 static bool trans_FALIGNDATAi(DisasContext *dc, arg_r_r_r *a)
5310 TCGv_i64 dst, src1, src2;
5313 if (!avail_VIS4(dc)) {
5316 if (gen_trap_ifnofpu(dc)) {
5320 dst = tcg_temp_new_i64();
5321 src1 = gen_load_fpr_D(dc, a->rd);
5322 src2 = gen_load_fpr_D(dc, a->rs2);
5323 src3 = gen_load_gpr(dc, a->rs1);
5324 gen_op_faligndata_i(dst, src1, src2, src3);
5325 gen_store_fpr_D(dc, a->rd, dst);
5326 return advance_pc(dc);
5329 static bool do_env_qqq(DisasContext *dc, arg_r_r_r *a,
5330 void (*func)(TCGv_i128, TCGv_env, TCGv_i128, TCGv_i128))
5332 TCGv_i128 src1, src2;
5334 if (gen_trap_ifnofpu(dc)) {
5337 if (gen_trap_float128(dc)) {
5341 src1 = gen_load_fpr_Q(dc, a->rs1);
5342 src2 = gen_load_fpr_Q(dc, a->rs2);
5343 func(src1, tcg_env, src1, src2);
5344 gen_store_fpr_Q(dc, a->rd, src1);
5345 return advance_pc(dc);
5348 TRANS(FADDq, ALL, do_env_qqq, a, gen_helper_faddq)
5349 TRANS(FSUBq, ALL, do_env_qqq, a, gen_helper_fsubq)
5350 TRANS(FMULq, ALL, do_env_qqq, a, gen_helper_fmulq)
5351 TRANS(FDIVq, ALL, do_env_qqq, a, gen_helper_fdivq)
5353 static bool trans_FdMULq(DisasContext *dc, arg_r_r_r *a)
5355 TCGv_i64 src1, src2;
5358 if (gen_trap_ifnofpu(dc)) {
5361 if (gen_trap_float128(dc)) {
5365 src1 = gen_load_fpr_D(dc, a->rs1);
5366 src2 = gen_load_fpr_D(dc, a->rs2);
5367 dst = tcg_temp_new_i128();
5368 gen_helper_fdmulq(dst, tcg_env, src1, src2);
5369 gen_store_fpr_Q(dc, a->rd, dst);
5370 return advance_pc(dc);
5373 static bool do_fmovr(DisasContext *dc, arg_FMOVRs *a, bool is_128,
5374 void (*func)(DisasContext *, DisasCompare *, int, int))
5378 if (!gen_compare_reg(&cmp, a->cond, gen_load_gpr(dc, a->rs1))) {
5381 if (gen_trap_ifnofpu(dc)) {
5384 if (is_128 && gen_trap_float128(dc)) {
5388 gen_op_clear_ieee_excp_and_FTT();
5389 func(dc, &cmp, a->rd, a->rs2);
5390 return advance_pc(dc);
5393 TRANS(FMOVRs, 64, do_fmovr, a, false, gen_fmovs)
5394 TRANS(FMOVRd, 64, do_fmovr, a, false, gen_fmovd)
5395 TRANS(FMOVRq, 64, do_fmovr, a, true, gen_fmovq)
5397 static bool do_fmovcc(DisasContext *dc, arg_FMOVscc *a, bool is_128,
5398 void (*func)(DisasContext *, DisasCompare *, int, int))
5402 if (gen_trap_ifnofpu(dc)) {
5405 if (is_128 && gen_trap_float128(dc)) {
5409 gen_op_clear_ieee_excp_and_FTT();
5410 gen_compare(&cmp, a->cc, a->cond, dc);
5411 func(dc, &cmp, a->rd, a->rs2);
5412 return advance_pc(dc);
5415 TRANS(FMOVscc, 64, do_fmovcc, a, false, gen_fmovs)
5416 TRANS(FMOVdcc, 64, do_fmovcc, a, false, gen_fmovd)
5417 TRANS(FMOVqcc, 64, do_fmovcc, a, true, gen_fmovq)
5419 static bool do_fmovfcc(DisasContext *dc, arg_FMOVsfcc *a, bool is_128,
5420 void (*func)(DisasContext *, DisasCompare *, int, int))
5424 if (gen_trap_ifnofpu(dc)) {
5427 if (is_128 && gen_trap_float128(dc)) {
5431 gen_op_clear_ieee_excp_and_FTT();
5432 gen_fcompare(&cmp, a->cc, a->cond);
5433 func(dc, &cmp, a->rd, a->rs2);
5434 return advance_pc(dc);
5437 TRANS(FMOVsfcc, 64, do_fmovfcc, a, false, gen_fmovs)
5438 TRANS(FMOVdfcc, 64, do_fmovfcc, a, false, gen_fmovd)
5439 TRANS(FMOVqfcc, 64, do_fmovfcc, a, true, gen_fmovq)
5441 static bool do_fcmps(DisasContext *dc, arg_FCMPs *a, bool e)
5443 TCGv_i32 src1, src2;
5445 if (avail_32(dc) && a->cc != 0) {
5448 if (gen_trap_ifnofpu(dc)) {
5452 src1 = gen_load_fpr_F(dc, a->rs1);
5453 src2 = gen_load_fpr_F(dc, a->rs2);
5455 gen_helper_fcmpes(cpu_fcc[a->cc], tcg_env, src1, src2);
5457 gen_helper_fcmps(cpu_fcc[a->cc], tcg_env, src1, src2);
5459 return advance_pc(dc);
5462 TRANS(FCMPs, ALL, do_fcmps, a, false)
5463 TRANS(FCMPEs, ALL, do_fcmps, a, true)
5465 static bool do_fcmpd(DisasContext *dc, arg_FCMPd *a, bool e)
5467 TCGv_i64 src1, src2;
5469 if (avail_32(dc) && a->cc != 0) {
5472 if (gen_trap_ifnofpu(dc)) {
5476 src1 = gen_load_fpr_D(dc, a->rs1);
5477 src2 = gen_load_fpr_D(dc, a->rs2);
5479 gen_helper_fcmped(cpu_fcc[a->cc], tcg_env, src1, src2);
5481 gen_helper_fcmpd(cpu_fcc[a->cc], tcg_env, src1, src2);
5483 return advance_pc(dc);
5486 TRANS(FCMPd, ALL, do_fcmpd, a, false)
5487 TRANS(FCMPEd, ALL, do_fcmpd, a, true)
5489 static bool do_fcmpq(DisasContext *dc, arg_FCMPq *a, bool e)
5491 TCGv_i128 src1, src2;
5493 if (avail_32(dc) && a->cc != 0) {
5496 if (gen_trap_ifnofpu(dc)) {
5499 if (gen_trap_float128(dc)) {
5503 src1 = gen_load_fpr_Q(dc, a->rs1);
5504 src2 = gen_load_fpr_Q(dc, a->rs2);
5506 gen_helper_fcmpeq(cpu_fcc[a->cc], tcg_env, src1, src2);
5508 gen_helper_fcmpq(cpu_fcc[a->cc], tcg_env, src1, src2);
5510 return advance_pc(dc);
5513 TRANS(FCMPq, ALL, do_fcmpq, a, false)
5514 TRANS(FCMPEq, ALL, do_fcmpq, a, true)
5516 static bool trans_FLCMPs(DisasContext *dc, arg_FLCMPs *a)
5518 TCGv_i32 src1, src2;
5520 if (!avail_VIS3(dc)) {
5523 if (gen_trap_ifnofpu(dc)) {
5527 src1 = gen_load_fpr_F(dc, a->rs1);
5528 src2 = gen_load_fpr_F(dc, a->rs2);
5529 gen_helper_flcmps(cpu_fcc[a->cc], src1, src2);
5530 return advance_pc(dc);
5533 static bool trans_FLCMPd(DisasContext *dc, arg_FLCMPd *a)
5535 TCGv_i64 src1, src2;
5537 if (!avail_VIS3(dc)) {
5540 if (gen_trap_ifnofpu(dc)) {
5544 src1 = gen_load_fpr_D(dc, a->rs1);
5545 src2 = gen_load_fpr_D(dc, a->rs2);
5546 gen_helper_flcmpd(cpu_fcc[a->cc], src1, src2);
5547 return advance_pc(dc);
5550 static bool do_movf2r(DisasContext *dc, arg_r_r *a,
5551 int (*offset)(unsigned int),
5552 void (*load)(TCGv, TCGv_ptr, tcg_target_long))
5556 if (gen_trap_ifnofpu(dc)) {
5559 dst = gen_dest_gpr(dc, a->rd);
5560 load(dst, tcg_env, offset(a->rs));
5561 gen_store_gpr(dc, a->rd, dst);
5562 return advance_pc(dc);
5565 TRANS(MOVsTOsw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32s_tl)
5566 TRANS(MOVsTOuw, VIS3B, do_movf2r, a, gen_offset_fpr_F, tcg_gen_ld32u_tl)
5567 TRANS(MOVdTOx, VIS3B, do_movf2r, a, gen_offset_fpr_D, tcg_gen_ld_tl)
5569 static bool do_movr2f(DisasContext *dc, arg_r_r *a,
5570 int (*offset)(unsigned int),
5571 void (*store)(TCGv, TCGv_ptr, tcg_target_long))
5575 if (gen_trap_ifnofpu(dc)) {
5578 src = gen_load_gpr(dc, a->rs);
5579 store(src, tcg_env, offset(a->rd));
5580 return advance_pc(dc);
5583 TRANS(MOVwTOs, VIS3B, do_movr2f, a, gen_offset_fpr_F, tcg_gen_st32_tl)
5584 TRANS(MOVxTOd, VIS3B, do_movr2f, a, gen_offset_fpr_D, tcg_gen_st_tl)
5586 static void sparc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
5588 DisasContext *dc = container_of(dcbase, DisasContext, base);
5591 dc->pc = dc->base.pc_first;
5592 dc->npc = (target_ulong)dc->base.tb->cs_base;
5593 dc->mem_idx = dc->base.tb->flags & TB_FLAG_MMU_MASK;
5594 dc->def = &cpu_env(cs)->def;
5595 dc->fpu_enabled = tb_fpu_enabled(dc->base.tb->flags);
5596 dc->address_mask_32bit = tb_am_enabled(dc->base.tb->flags);
5597 #ifndef CONFIG_USER_ONLY
5598 dc->supervisor = (dc->base.tb->flags & TB_FLAG_SUPER) != 0;
5600 #ifdef TARGET_SPARC64
5602 dc->asi = (dc->base.tb->flags >> TB_FLAG_ASI_SHIFT) & 0xff;
5603 #ifndef CONFIG_USER_ONLY
5604 dc->hypervisor = (dc->base.tb->flags & TB_FLAG_HYPER) != 0;
5608 * if we reach a page boundary, we stop generation so that the
5609 * PC of a TT_TFAULT exception is always in the right page
5611 bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
5612 dc->base.max_insns = MIN(dc->base.max_insns, bound);
5615 static void sparc_tr_tb_start(DisasContextBase *db, CPUState *cs)
5619 static void sparc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
5621 DisasContext *dc = container_of(dcbase, DisasContext, base);
5622 target_ulong npc = dc->npc;
5627 assert(dc->jump_pc[1] == dc->pc + 4);
5628 npc = dc->jump_pc[0] | JUMP_PC;
5631 case DYNAMIC_PC_LOOKUP:
5635 g_assert_not_reached();
5638 tcg_gen_insn_start(dc->pc, npc);
5641 static void sparc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
5643 DisasContext *dc = container_of(dcbase, DisasContext, base);
5646 insn = translator_ldl(cpu_env(cs), &dc->base, dc->pc);
5647 dc->base.pc_next += 4;
5649 if (!decode(dc, insn)) {
5650 gen_exception(dc, TT_ILL_INSN);
5653 if (dc->base.is_jmp == DISAS_NORETURN) {
5656 if (dc->pc != dc->base.pc_next) {
5657 dc->base.is_jmp = DISAS_TOO_MANY;
5661 static void sparc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
5663 DisasContext *dc = container_of(dcbase, DisasContext, base);
5664 DisasDelayException *e, *e_next;
5669 switch (dc->base.is_jmp) {
5671 case DISAS_TOO_MANY:
5672 if (((dc->pc | dc->npc) & 3) == 0) {
5673 /* static PC and NPC: we can use direct chaining */
5674 gen_goto_tb(dc, 0, dc->pc, dc->npc);
5681 case DYNAMIC_PC_LOOKUP:
5687 g_assert_not_reached();
5690 tcg_gen_movi_tl(cpu_pc, dc->pc);
5696 gen_generic_branch(dc);
5701 case DYNAMIC_PC_LOOKUP:
5704 g_assert_not_reached();
5707 tcg_gen_movi_tl(cpu_npc, dc->npc);
5710 tcg_gen_lookup_and_goto_ptr();
5712 tcg_gen_exit_tb(NULL, 0);
5716 case DISAS_NORETURN:
5722 tcg_gen_exit_tb(NULL, 0);
5726 g_assert_not_reached();
5729 for (e = dc->delay_excp_list; e ; e = e_next) {
5730 gen_set_label(e->lab);
5732 tcg_gen_movi_tl(cpu_pc, e->pc);
5733 if (e->npc % 4 == 0) {
5734 tcg_gen_movi_tl(cpu_npc, e->npc);
5736 gen_helper_raise_exception(tcg_env, e->excp);
5743 static const TranslatorOps sparc_tr_ops = {
5744 .init_disas_context = sparc_tr_init_disas_context,
5745 .tb_start = sparc_tr_tb_start,
5746 .insn_start = sparc_tr_insn_start,
5747 .translate_insn = sparc_tr_translate_insn,
5748 .tb_stop = sparc_tr_tb_stop,
5751 void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
5752 vaddr pc, void *host_pc)
5754 DisasContext dc = {};
5756 translator_loop(cs, tb, max_insns, pc, host_pc, &sparc_tr_ops, &dc.base);
5759 void sparc_tcg_init(void)
5761 static const char gregnames[32][4] = {
5762 "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
5763 "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7",
5764 "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7",
5765 "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7",
5768 static const struct { TCGv_i32 *ptr; int off; const char *name; } r32[] = {
5769 #ifdef TARGET_SPARC64
5770 { &cpu_fprs, offsetof(CPUSPARCState, fprs), "fprs" },
5771 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc0" },
5772 { &cpu_fcc[1], offsetof(CPUSPARCState, fcc[1]), "fcc1" },
5773 { &cpu_fcc[2], offsetof(CPUSPARCState, fcc[2]), "fcc2" },
5774 { &cpu_fcc[3], offsetof(CPUSPARCState, fcc[3]), "fcc3" },
5776 { &cpu_fcc[0], offsetof(CPUSPARCState, fcc[0]), "fcc" },
5780 static const struct { TCGv *ptr; int off; const char *name; } rtl[] = {
5781 #ifdef TARGET_SPARC64
5782 { &cpu_gsr, offsetof(CPUSPARCState, gsr), "gsr" },
5783 { &cpu_xcc_Z, offsetof(CPUSPARCState, xcc_Z), "xcc_Z" },
5784 { &cpu_xcc_C, offsetof(CPUSPARCState, xcc_C), "xcc_C" },
5786 { &cpu_cc_N, offsetof(CPUSPARCState, cc_N), "cc_N" },
5787 { &cpu_cc_V, offsetof(CPUSPARCState, cc_V), "cc_V" },
5788 { &cpu_icc_Z, offsetof(CPUSPARCState, icc_Z), "icc_Z" },
5789 { &cpu_icc_C, offsetof(CPUSPARCState, icc_C), "icc_C" },
5790 { &cpu_cond, offsetof(CPUSPARCState, cond), "cond" },
5791 { &cpu_pc, offsetof(CPUSPARCState, pc), "pc" },
5792 { &cpu_npc, offsetof(CPUSPARCState, npc), "npc" },
5793 { &cpu_y, offsetof(CPUSPARCState, y), "y" },
5794 { &cpu_tbr, offsetof(CPUSPARCState, tbr), "tbr" },
5799 cpu_regwptr = tcg_global_mem_new_ptr(tcg_env,
5800 offsetof(CPUSPARCState, regwptr),
5803 for (i = 0; i < ARRAY_SIZE(r32); ++i) {
5804 *r32[i].ptr = tcg_global_mem_new_i32(tcg_env, r32[i].off, r32[i].name);
5807 for (i = 0; i < ARRAY_SIZE(rtl); ++i) {
5808 *rtl[i].ptr = tcg_global_mem_new(tcg_env, rtl[i].off, rtl[i].name);
5812 for (i = 1; i < 8; ++i) {
5813 cpu_regs[i] = tcg_global_mem_new(tcg_env,
5814 offsetof(CPUSPARCState, gregs[i]),
5818 for (i = 8; i < 32; ++i) {
5819 cpu_regs[i] = tcg_global_mem_new(cpu_regwptr,
5820 (i - 8) * sizeof(target_ulong),
5825 void sparc_restore_state_to_opc(CPUState *cs,
5826 const TranslationBlock *tb,
5827 const uint64_t *data)
5829 CPUSPARCState *env = cpu_env(cs);
5830 target_ulong pc = data[0];
5831 target_ulong npc = data[1];
5834 if (npc == DYNAMIC_PC) {
5835 /* dynamic NPC: already stored */
5836 } else if (npc & JUMP_PC) {
5837 /* jump PC: use 'cond' and the jump targets of the translation */
5839 env->npc = npc & ~3;