case GET_ASI_BLOCK:
/* Valid for lddfa on aligned registers only. */
if (size == 8 && (rd & 7) == 0) {
+ TCGMemOp memop;
TCGv eight;
int i;
- gen_check_align(addr, 0x3f);
gen_address_mask(dc, addr);
+ /* The first operation checks required alignment. */
+ memop = da.memop | MO_ALIGN_64;
eight = tcg_const_tl(8);
for (i = 0; ; ++i) {
tcg_gen_qemu_ld_i64(cpu_fpr[rd / 2 + i], addr,
- da.mem_idx, da.memop);
+ da.mem_idx, memop);
if (i == 7) {
break;
}
tcg_gen_add_tl(addr, addr, eight);
+ memop = da.memop;
}
tcg_temp_free(eight);
} else {
case GET_ASI_BLOCK:
/* Valid for stdfa on aligned registers only. */
if (size == 8 && (rd & 7) == 0) {
+ TCGMemOp memop;
TCGv eight;
int i;
- gen_check_align(addr, 0x3f);
gen_address_mask(dc, addr);
+ /* The first operation checks required alignment. */
+ memop = da.memop | MO_ALIGN_64;
eight = tcg_const_tl(8);
for (i = 0; ; ++i) {
tcg_gen_qemu_st_i64(cpu_fpr[rd / 2 + i], addr,
- da.mem_idx, da.memop);
+ da.mem_idx, memop);
if (i == 7) {
break;
}
tcg_gen_add_tl(addr, addr, eight);
+ memop = da.memop;
}
tcg_temp_free(eight);
} else {
return;
case GET_ASI_DTWINX:
- gen_check_align(addr, 15);
gen_address_mask(dc, addr);
- tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_ld_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
tcg_gen_addi_tl(addr, addr, 8);
tcg_gen_qemu_ld_i64(lo, addr, da.mem_idx, da.memop);
break;
break;
case GET_ASI_DTWINX:
- gen_check_align(addr, 15);
gen_address_mask(dc, addr);
- tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop);
+ tcg_gen_qemu_st_i64(hi, addr, da.mem_idx, da.memop | MO_ALIGN_16);
tcg_gen_addi_tl(addr, addr, 8);
tcg_gen_qemu_st_i64(lo, addr, da.mem_idx, da.memop);
break;
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
- gen_check_align(cpu_addr, 7);
gen_stf_asi(dc, cpu_addr, insn, 16, QFPREG(rd));
}
break;