DEF_HELPER_2(mmu_write, void, i32, i32)
#endif
+DEF_HELPER_4(memalign, void, i32, i32, i32, i32)
+
#include "def-helper.h"
return 0;
}
+void helper_memalign(uint32_t addr, uint32_t dr, uint32_t wr, uint32_t size)
+{
+ uint32_t mask;
+
+ switch (size) {
+ case 4: mask = 3; break;
+ case 2: mask = 1; break;
+ default:
+ case 1: mask = 0; break;
+ }
+
+ if (addr & mask) {
+ qemu_log("unaligned access addr=%x size=%d, wr=%d\n",
+ addr, size, wr);
+ if (!(env->sregs[SR_MSR] & MSR_EE)) {
+ return;
+ }
+
+ env->sregs[SR_ESR] = ESR_EC_UNALIGNED_DATA | (wr << 10) \
+ | (dr & 31) << 5;
+ if (size == 4) {
+ env->sregs[SR_ESR] |= 1 << 11;
+ }
+ helper_raise_exception(EXCP_HW_EXCP);
+ }
+}
+
#if !defined(CONFIG_USER_ONLY)
/* Writes/reads to the MMU's special regs end up here. */
uint32_t helper_mmu_read(uint32_t rn)
/* If we get a fault on a dslot, the jmpstate better be in sync. */
sync_jmpstate(dc);
- if (dc->rd)
+
+ /* Verify alignment if needed. */
+ if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
+ gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
+ tcg_const_tl(0), tcg_const_tl(size));
+ }
+
+ if (dc->rd) {
gen_load(dc, cpu_R[dc->rd], *addr, size);
- else {
+ } else {
gen_load(dc, env_imm, *addr, size);
}
/* If we get a fault on a dslot, the jmpstate better be in sync. */
sync_jmpstate(dc);
addr = compute_ldst_addr(dc, &t);
+
+ /* Verify alignment if needed. */
+ if ((dc->env->pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
+ gen_helper_memalign(*addr, tcg_const_tl(dc->rd),
+ tcg_const_tl(1), tcg_const_tl(size));
+ }
+
gen_store(dc, *addr, cpu_R[dc->rd], size);
if (addr == &t)
tcg_temp_free(t);