+++ /dev/null
-/*
- * ARM load/store instructions for code (armeb-user support)
- *
- * Copyright (c) 2012 CodeSourcery, LLC
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef ARM_LDST_H
-#define ARM_LDST_H
-
-#include "exec/translator.h"
-#include "qemu/bswap.h"
-
-/* Load an instruction and return it in the standard little-endian order */
-static inline uint32_t arm_ldl_code(CPUARMState *env, DisasContextBase *s,
- target_ulong addr, bool sctlr_b)
-{
- return translator_ldl_swap(env, s, addr, bswap_code(sctlr_b));
-}
-
-/* Ditto, for a halfword (Thumb) instruction */
-static inline uint16_t arm_lduw_code(CPUARMState *env, DisasContextBase* s,
- target_ulong addr, bool sctlr_b)
-{
-#ifndef CONFIG_USER_ONLY
- /* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped
- within each word. Undo that now. */
- if (sctlr_b) {
- addr ^= 2;
- }
-#endif
- return translator_lduw_swap(env, s, addr, bswap_code(sctlr_b));
-}
-
-#endif
+++ /dev/null
-/*
- * ARM SVE Load/Store Helpers
- *
- * Copyright (c) 2018-2022 Linaro
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef TARGET_ARM_SVE_LDST_INTERNAL_H
-#define TARGET_ARM_SVE_LDST_INTERNAL_H
-
-#include "exec/cpu_ldst.h"
-
-/*
- * Load one element into @vd + @reg_off from @host.
- * The controlling predicate is known to be true.
- */
-typedef void sve_ldst1_host_fn(void *vd, intptr_t reg_off, void *host);
-
-/*
- * Load one element into @vd + @reg_off from (@env, @vaddr, @ra).
- * The controlling predicate is known to be true.
- */
-typedef void sve_ldst1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off,
- target_ulong vaddr, uintptr_t retaddr);
-
-/*
- * Generate the above primitives.
- */
-
-#define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \
-static inline void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \
-{ TYPEM val = HOST(host); *(TYPEE *)(vd + H(reg_off)) = val; }
-
-#define DO_ST_HOST(NAME, H, TYPEE, TYPEM, HOST) \
-static inline void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \
-{ TYPEM val = *(TYPEE *)(vd + H(reg_off)); HOST(host, val); }
-
-#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, TLB) \
-static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \
- intptr_t reg_off, target_ulong addr, uintptr_t ra) \
-{ \
- TYPEM val = TLB(env, useronly_clean_ptr(addr), ra); \
- *(TYPEE *)(vd + H(reg_off)) = val; \
-}
-
-#define DO_ST_TLB(NAME, H, TYPEE, TYPEM, TLB) \
-static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \
- intptr_t reg_off, target_ulong addr, uintptr_t ra) \
-{ \
- TYPEM val = *(TYPEE *)(vd + H(reg_off)); \
- TLB(env, useronly_clean_ptr(addr), val, ra); \
-}
-
-#define DO_LD_PRIM_1(NAME, H, TE, TM) \
- DO_LD_HOST(NAME, H, TE, TM, ldub_p) \
- DO_LD_TLB(NAME, H, TE, TM, cpu_ldub_data_ra)
-
-DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t)
-DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t)
-DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t)
-DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t)
-DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t)
-DO_LD_PRIM_1(ld1bdu, H1_8, uint64_t, uint8_t)
-DO_LD_PRIM_1(ld1bds, H1_8, uint64_t, int8_t)
-
-#define DO_ST_PRIM_1(NAME, H, TE, TM) \
- DO_ST_HOST(st1##NAME, H, TE, TM, stb_p) \
- DO_ST_TLB(st1##NAME, H, TE, TM, cpu_stb_data_ra)
-
-DO_ST_PRIM_1(bb, H1, uint8_t, uint8_t)
-DO_ST_PRIM_1(bh, H1_2, uint16_t, uint8_t)
-DO_ST_PRIM_1(bs, H1_4, uint32_t, uint8_t)
-DO_ST_PRIM_1(bd, H1_8, uint64_t, uint8_t)
-
-#define DO_LD_PRIM_2(NAME, H, TE, TM, LD) \
- DO_LD_HOST(ld1##NAME##_be, H, TE, TM, LD##_be_p) \
- DO_LD_HOST(ld1##NAME##_le, H, TE, TM, LD##_le_p) \
- DO_LD_TLB(ld1##NAME##_be, H, TE, TM, cpu_##LD##_be_data_ra) \
- DO_LD_TLB(ld1##NAME##_le, H, TE, TM, cpu_##LD##_le_data_ra)
-
-#define DO_ST_PRIM_2(NAME, H, TE, TM, ST) \
- DO_ST_HOST(st1##NAME##_be, H, TE, TM, ST##_be_p) \
- DO_ST_HOST(st1##NAME##_le, H, TE, TM, ST##_le_p) \
- DO_ST_TLB(st1##NAME##_be, H, TE, TM, cpu_##ST##_be_data_ra) \
- DO_ST_TLB(st1##NAME##_le, H, TE, TM, cpu_##ST##_le_data_ra)
-
-DO_LD_PRIM_2(hh, H1_2, uint16_t, uint16_t, lduw)
-DO_LD_PRIM_2(hsu, H1_4, uint32_t, uint16_t, lduw)
-DO_LD_PRIM_2(hss, H1_4, uint32_t, int16_t, lduw)
-DO_LD_PRIM_2(hdu, H1_8, uint64_t, uint16_t, lduw)
-DO_LD_PRIM_2(hds, H1_8, uint64_t, int16_t, lduw)
-
-DO_ST_PRIM_2(hh, H1_2, uint16_t, uint16_t, stw)
-DO_ST_PRIM_2(hs, H1_4, uint32_t, uint16_t, stw)
-DO_ST_PRIM_2(hd, H1_8, uint64_t, uint16_t, stw)
-
-DO_LD_PRIM_2(ss, H1_4, uint32_t, uint32_t, ldl)
-DO_LD_PRIM_2(sdu, H1_8, uint64_t, uint32_t, ldl)
-DO_LD_PRIM_2(sds, H1_8, uint64_t, int32_t, ldl)
-
-DO_ST_PRIM_2(ss, H1_4, uint32_t, uint32_t, stl)
-DO_ST_PRIM_2(sd, H1_8, uint64_t, uint32_t, stl)
-
-DO_LD_PRIM_2(dd, H1_8, uint64_t, uint64_t, ldq)
-DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq)
-
-#undef DO_LD_TLB
-#undef DO_ST_TLB
-#undef DO_LD_HOST
-#undef DO_LD_PRIM_1
-#undef DO_ST_PRIM_1
-#undef DO_LD_PRIM_2
-#undef DO_ST_PRIM_2
-
-/*
- * Resolve the guest virtual address to info->host and info->flags.
- * If @nofault, return false if the page is invalid, otherwise
- * exit via page fault exception.
- */
-
-typedef struct {
- void *host;
- int flags;
- MemTxAttrs attrs;
- bool tagged;
-} SVEHostPage;
-
-bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
- target_ulong addr, int mem_off, MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr);
-
-/*
- * Analyse contiguous data, protected by a governing predicate.
- */
-
-typedef enum {
- FAULT_NO,
- FAULT_FIRST,
- FAULT_ALL,
-} SVEContFault;
-
-typedef struct {
- /*
- * First and last element wholly contained within the two pages.
- * mem_off_first[0] and reg_off_first[0] are always set >= 0.
- * reg_off_last[0] may be < 0 if the first element crosses pages.
- * All of mem_off_first[1], reg_off_first[1] and reg_off_last[1]
- * are set >= 0 only if there are complete elements on a second page.
- *
- * The reg_off_* offsets are relative to the internal vector register.
- * The mem_off_first offset is relative to the memory address; the
- * two offsets are different when a load operation extends, a store
- * operation truncates, or for multi-register operations.
- */
- int16_t mem_off_first[2];
- int16_t reg_off_first[2];
- int16_t reg_off_last[2];
-
- /*
- * One element that is misaligned and spans both pages,
- * or -1 if there is no such active element.
- */
- int16_t mem_off_split;
- int16_t reg_off_split;
-
- /*
- * The byte offset at which the entire operation crosses a page boundary.
- * Set >= 0 if and only if the entire operation spans two pages.
- */
- int16_t page_split;
-
- /* TLB data for the two pages. */
- SVEHostPage page[2];
-} SVEContLdSt;
-
-/*
- * Find first active element on each page, and a loose bound for the
- * final element on each page. Identify any single element that spans
- * the page boundary. Return true if there are any active elements.
- */
-bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, uint64_t *vg,
- intptr_t reg_max, int esz, int msize);
-
-/*
- * Resolve the guest virtual addresses to info->page[].
- * Control the generation of page faults with @fault. Return false if
- * there is no work to do, which can only happen with @fault == FAULT_NO.
- */
-bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
- CPUARMState *env, target_ulong addr,
- MMUAccessType access_type, uintptr_t retaddr);
-
-#ifdef CONFIG_USER_ONLY
-static inline void
-sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, uint64_t *vg,
- target_ulong addr, int esize, int msize,
- int wp_access, uintptr_t retaddr)
-{ }
-#else
-void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
- uint64_t *vg, target_ulong addr,
- int esize, int msize, int wp_access,
- uintptr_t retaddr);
-#endif
-
-void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, uint64_t *vg,
- target_ulong addr, int esize, int msize,
- uint32_t mtedesc, uintptr_t ra);
-
-#endif /* TARGET_ARM_SVE_LDST_INTERNAL_H */
--- /dev/null
+/*
+ * ARM load/store instructions for code (armeb-user support)
+ *
+ * Copyright (c) 2012 CodeSourcery, LLC
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef ARM_LDST_H
+#define ARM_LDST_H
+
+#include "exec/translator.h"
+#include "qemu/bswap.h"
+
+/* Load an instruction and return it in the standard little-endian order */
+static inline uint32_t arm_ldl_code(CPUARMState *env, DisasContextBase *s,
+ target_ulong addr, bool sctlr_b)
+{
+ return translator_ldl_swap(env, s, addr, bswap_code(sctlr_b));
+}
+
+/* Ditto, for a halfword (Thumb) instruction */
+static inline uint16_t arm_lduw_code(CPUARMState *env, DisasContextBase* s,
+ target_ulong addr, bool sctlr_b)
+{
+#ifndef CONFIG_USER_ONLY
+ /* In big-endian (BE32) mode, adjacent Thumb instructions have been swapped
+ within each word. Undo that now. */
+ if (sctlr_b) {
+ addr ^= 2;
+ }
+#endif
+ return translator_lduw_swap(env, s, addr, bswap_code(sctlr_b));
+}
+
+#endif
--- /dev/null
+/*
+ * ARM SVE Load/Store Helpers
+ *
+ * Copyright (c) 2018-2022 Linaro
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARM_SVE_LDST_INTERNAL_H
+#define TARGET_ARM_SVE_LDST_INTERNAL_H
+
+#include "exec/cpu_ldst.h"
+
+/*
+ * Load one element into @vd + @reg_off from @host.
+ * The controlling predicate is known to be true.
+ */
+typedef void sve_ldst1_host_fn(void *vd, intptr_t reg_off, void *host);
+
+/*
+ * Load one element into @vd + @reg_off from (@env, @vaddr, @ra).
+ * The controlling predicate is known to be true.
+ */
+typedef void sve_ldst1_tlb_fn(CPUARMState *env, void *vd, intptr_t reg_off,
+ target_ulong vaddr, uintptr_t retaddr);
+
+/*
+ * Generate the above primitives.
+ */
+
+#define DO_LD_HOST(NAME, H, TYPEE, TYPEM, HOST) \
+static inline void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \
+{ TYPEM val = HOST(host); *(TYPEE *)(vd + H(reg_off)) = val; }
+
+#define DO_ST_HOST(NAME, H, TYPEE, TYPEM, HOST) \
+static inline void sve_##NAME##_host(void *vd, intptr_t reg_off, void *host) \
+{ TYPEM val = *(TYPEE *)(vd + H(reg_off)); HOST(host, val); }
+
+#define DO_LD_TLB(NAME, H, TYPEE, TYPEM, TLB) \
+static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \
+ intptr_t reg_off, target_ulong addr, uintptr_t ra) \
+{ \
+ TYPEM val = TLB(env, useronly_clean_ptr(addr), ra); \
+ *(TYPEE *)(vd + H(reg_off)) = val; \
+}
+
+#define DO_ST_TLB(NAME, H, TYPEE, TYPEM, TLB) \
+static inline void sve_##NAME##_tlb(CPUARMState *env, void *vd, \
+ intptr_t reg_off, target_ulong addr, uintptr_t ra) \
+{ \
+ TYPEM val = *(TYPEE *)(vd + H(reg_off)); \
+ TLB(env, useronly_clean_ptr(addr), val, ra); \
+}
+
+#define DO_LD_PRIM_1(NAME, H, TE, TM) \
+ DO_LD_HOST(NAME, H, TE, TM, ldub_p) \
+ DO_LD_TLB(NAME, H, TE, TM, cpu_ldub_data_ra)
+
+DO_LD_PRIM_1(ld1bb, H1, uint8_t, uint8_t)
+DO_LD_PRIM_1(ld1bhu, H1_2, uint16_t, uint8_t)
+DO_LD_PRIM_1(ld1bhs, H1_2, uint16_t, int8_t)
+DO_LD_PRIM_1(ld1bsu, H1_4, uint32_t, uint8_t)
+DO_LD_PRIM_1(ld1bss, H1_4, uint32_t, int8_t)
+DO_LD_PRIM_1(ld1bdu, H1_8, uint64_t, uint8_t)
+DO_LD_PRIM_1(ld1bds, H1_8, uint64_t, int8_t)
+
+#define DO_ST_PRIM_1(NAME, H, TE, TM) \
+ DO_ST_HOST(st1##NAME, H, TE, TM, stb_p) \
+ DO_ST_TLB(st1##NAME, H, TE, TM, cpu_stb_data_ra)
+
+DO_ST_PRIM_1(bb, H1, uint8_t, uint8_t)
+DO_ST_PRIM_1(bh, H1_2, uint16_t, uint8_t)
+DO_ST_PRIM_1(bs, H1_4, uint32_t, uint8_t)
+DO_ST_PRIM_1(bd, H1_8, uint64_t, uint8_t)
+
+#define DO_LD_PRIM_2(NAME, H, TE, TM, LD) \
+ DO_LD_HOST(ld1##NAME##_be, H, TE, TM, LD##_be_p) \
+ DO_LD_HOST(ld1##NAME##_le, H, TE, TM, LD##_le_p) \
+ DO_LD_TLB(ld1##NAME##_be, H, TE, TM, cpu_##LD##_be_data_ra) \
+ DO_LD_TLB(ld1##NAME##_le, H, TE, TM, cpu_##LD##_le_data_ra)
+
+#define DO_ST_PRIM_2(NAME, H, TE, TM, ST) \
+ DO_ST_HOST(st1##NAME##_be, H, TE, TM, ST##_be_p) \
+ DO_ST_HOST(st1##NAME##_le, H, TE, TM, ST##_le_p) \
+ DO_ST_TLB(st1##NAME##_be, H, TE, TM, cpu_##ST##_be_data_ra) \
+ DO_ST_TLB(st1##NAME##_le, H, TE, TM, cpu_##ST##_le_data_ra)
+
+DO_LD_PRIM_2(hh, H1_2, uint16_t, uint16_t, lduw)
+DO_LD_PRIM_2(hsu, H1_4, uint32_t, uint16_t, lduw)
+DO_LD_PRIM_2(hss, H1_4, uint32_t, int16_t, lduw)
+DO_LD_PRIM_2(hdu, H1_8, uint64_t, uint16_t, lduw)
+DO_LD_PRIM_2(hds, H1_8, uint64_t, int16_t, lduw)
+
+DO_ST_PRIM_2(hh, H1_2, uint16_t, uint16_t, stw)
+DO_ST_PRIM_2(hs, H1_4, uint32_t, uint16_t, stw)
+DO_ST_PRIM_2(hd, H1_8, uint64_t, uint16_t, stw)
+
+DO_LD_PRIM_2(ss, H1_4, uint32_t, uint32_t, ldl)
+DO_LD_PRIM_2(sdu, H1_8, uint64_t, uint32_t, ldl)
+DO_LD_PRIM_2(sds, H1_8, uint64_t, int32_t, ldl)
+
+DO_ST_PRIM_2(ss, H1_4, uint32_t, uint32_t, stl)
+DO_ST_PRIM_2(sd, H1_8, uint64_t, uint32_t, stl)
+
+DO_LD_PRIM_2(dd, H1_8, uint64_t, uint64_t, ldq)
+DO_ST_PRIM_2(dd, H1_8, uint64_t, uint64_t, stq)
+
+#undef DO_LD_TLB
+#undef DO_ST_TLB
+#undef DO_LD_HOST
+#undef DO_LD_PRIM_1
+#undef DO_ST_PRIM_1
+#undef DO_LD_PRIM_2
+#undef DO_ST_PRIM_2
+
+/*
+ * Resolve the guest virtual address to info->host and info->flags.
+ * If @nofault, return false if the page is invalid, otherwise
+ * exit via page fault exception.
+ */
+
+typedef struct {
+ void *host;
+ int flags;
+ MemTxAttrs attrs;
+ bool tagged;
+} SVEHostPage;
+
+bool sve_probe_page(SVEHostPage *info, bool nofault, CPUARMState *env,
+ target_ulong addr, int mem_off, MMUAccessType access_type,
+ int mmu_idx, uintptr_t retaddr);
+
+/*
+ * Analyse contiguous data, protected by a governing predicate.
+ */
+
+typedef enum {
+ FAULT_NO,
+ FAULT_FIRST,
+ FAULT_ALL,
+} SVEContFault;
+
+typedef struct {
+ /*
+ * First and last element wholly contained within the two pages.
+ * mem_off_first[0] and reg_off_first[0] are always set >= 0.
+ * reg_off_last[0] may be < 0 if the first element crosses pages.
+ * All of mem_off_first[1], reg_off_first[1] and reg_off_last[1]
+ * are set >= 0 only if there are complete elements on a second page.
+ *
+ * The reg_off_* offsets are relative to the internal vector register.
+ * The mem_off_first offset is relative to the memory address; the
+ * two offsets are different when a load operation extends, a store
+ * operation truncates, or for multi-register operations.
+ */
+ int16_t mem_off_first[2];
+ int16_t reg_off_first[2];
+ int16_t reg_off_last[2];
+
+ /*
+ * One element that is misaligned and spans both pages,
+ * or -1 if there is no such active element.
+ */
+ int16_t mem_off_split;
+ int16_t reg_off_split;
+
+ /*
+ * The byte offset at which the entire operation crosses a page boundary.
+ * Set >= 0 if and only if the entire operation spans two pages.
+ */
+ int16_t page_split;
+
+ /* TLB data for the two pages. */
+ SVEHostPage page[2];
+} SVEContLdSt;
+
+/*
+ * Find first active element on each page, and a loose bound for the
+ * final element on each page. Identify any single element that spans
+ * the page boundary. Return true if there are any active elements.
+ */
+bool sve_cont_ldst_elements(SVEContLdSt *info, target_ulong addr, uint64_t *vg,
+ intptr_t reg_max, int esz, int msize);
+
+/*
+ * Resolve the guest virtual addresses to info->page[].
+ * Control the generation of page faults with @fault. Return false if
+ * there is no work to do, which can only happen with @fault == FAULT_NO.
+ */
+bool sve_cont_ldst_pages(SVEContLdSt *info, SVEContFault fault,
+ CPUARMState *env, target_ulong addr,
+ MMUAccessType access_type, uintptr_t retaddr);
+
+#ifdef CONFIG_USER_ONLY
+static inline void
+sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env, uint64_t *vg,
+ target_ulong addr, int esize, int msize,
+ int wp_access, uintptr_t retaddr)
+{ }
+#else
+void sve_cont_ldst_watchpoints(SVEContLdSt *info, CPUARMState *env,
+ uint64_t *vg, target_ulong addr,
+ int esize, int msize, int wp_access,
+ uintptr_t retaddr);
+#endif
+
+void sve_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, uint64_t *vg,
+ target_ulong addr, int esize, int msize,
+ uint32_t mtedesc, uintptr_t ra);
+
+#endif /* TARGET_ARM_SVE_LDST_INTERNAL_H */
--- /dev/null
+/*
+ * AArch32 translation, common definitions.
+ *
+ * Copyright (c) 2021 Linaro, Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef TARGET_ARM_TRANSLATE_A32_H
+#define TARGET_ARM_TRANSLATE_A32_H
+
+/* Prototypes for autogenerated disassembler functions */
+bool disas_m_nocp(DisasContext *dc, uint32_t insn);
+bool disas_mve(DisasContext *dc, uint32_t insn);
+bool disas_vfp(DisasContext *s, uint32_t insn);
+bool disas_vfp_uncond(DisasContext *s, uint32_t insn);
+bool disas_neon_dp(DisasContext *s, uint32_t insn);
+bool disas_neon_ls(DisasContext *s, uint32_t insn);
+bool disas_neon_shared(DisasContext *s, uint32_t insn);
+
+void load_reg_var(DisasContext *s, TCGv_i32 var, int reg);
+void arm_gen_condlabel(DisasContext *s);
+bool vfp_access_check(DisasContext *s);
+bool vfp_access_check_m(DisasContext *s, bool skip_context_update);
+void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop);
+void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop);
+void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop);
+void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop);
+TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs);
+void gen_set_cpsr(TCGv_i32 var, uint32_t mask);
+void gen_set_condexec(DisasContext *s);
+void gen_update_pc(DisasContext *s, target_long diff);
+void gen_lookup_tb(DisasContext *s);
+long vfp_reg_offset(bool dp, unsigned reg);
+long neon_full_reg_offset(unsigned reg);
+long neon_element_offset(int reg, int element, MemOp memop);
+void gen_rev16(TCGv_i32 dest, TCGv_i32 var);
+void clear_eci_state(DisasContext *s);
+bool mve_eci_check(DisasContext *s);
+void mve_update_eci(DisasContext *s);
+void mve_update_and_store_eci(DisasContext *s);
+bool mve_skip_vmov(DisasContext *s, int vn, int index, int size);
+
+static inline TCGv_i32 load_cpu_offset(int offset)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ tcg_gen_ld_i32(tmp, cpu_env, offset);
+ return tmp;
+}
+
+/* Load from a 32-bit field to a TCGv_i32 */
+#define load_cpu_field(name) \
+ ({ \
+ QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 4); \
+ load_cpu_offset(offsetof(CPUARMState, name)); \
+ })
+
+/* Load from the low half of a 64-bit field to a TCGv_i32 */
+#define load_cpu_field_low32(name) \
+ ({ \
+ QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 8); \
+ load_cpu_offset(offsetoflow32(CPUARMState, name)); \
+ })
+
+void store_cpu_offset(TCGv_i32 var, int offset, int size);
+
+#define store_cpu_field(val, name) \
+ ({ \
+ QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 4 \
+ && sizeof_field(CPUARMState, name) != 1); \
+ store_cpu_offset(val, offsetof(CPUARMState, name), \
+ sizeof_field(CPUARMState, name)); \
+ })
+
+#define store_cpu_field_constant(val, name) \
+ store_cpu_field(tcg_constant_i32(val), name)
+
+/* Create a new temporary and set it to the value of a CPU register. */
+static inline TCGv_i32 load_reg(DisasContext *s, int reg)
+{
+ TCGv_i32 tmp = tcg_temp_new_i32();
+ load_reg_var(s, tmp, reg);
+ return tmp;
+}
+
+void store_reg(DisasContext *s, int reg, TCGv_i32 var);
+
+void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
+ TCGv_i32 a32, int index, MemOp opc);
+void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
+ TCGv_i32 a32, int index, MemOp opc);
+void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index, MemOp opc);
+void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index, MemOp opc);
+void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, MemOp opc);
+void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
+ int index, MemOp opc);
+void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, MemOp opc);
+void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
+ int index, MemOp opc);
+
+#define DO_GEN_LD(SUFF, OPC) \
+ static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+ { \
+ gen_aa32_ld_i32(s, val, a32, index, OPC); \
+ }
+
+#define DO_GEN_ST(SUFF, OPC) \
+ static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
+ TCGv_i32 a32, int index) \
+ { \
+ gen_aa32_st_i32(s, val, a32, index, OPC); \
+ }
+
+static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index)
+{
+ gen_aa32_ld_i64(s, val, a32, index, MO_UQ);
+}
+
+static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
+ TCGv_i32 a32, int index)
+{
+ gen_aa32_st_i64(s, val, a32, index, MO_UQ);
+}
+
+DO_GEN_LD(8u, MO_UB)
+DO_GEN_LD(16u, MO_UW)
+DO_GEN_LD(32u, MO_UL)
+DO_GEN_ST(8, MO_UB)
+DO_GEN_ST(16, MO_UW)
+DO_GEN_ST(32, MO_UL)
+
+#undef DO_GEN_LD
+#undef DO_GEN_ST
+
+#if defined(CONFIG_USER_ONLY)
+#define IS_USER(s) 1
+#else
+#define IS_USER(s) (s->user)
+#endif
+
+/* Set NZCV flags from the high 4 bits of var. */
+#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
+
+/* Swap low and high halfwords. */
+static inline void gen_swap_half(TCGv_i32 dest, TCGv_i32 var)
+{
+ tcg_gen_rotri_i32(dest, var, 16);
+}
+
+#endif
+++ /dev/null
-/*
- * AArch32 translation, common definitions.
- *
- * Copyright (c) 2021 Linaro, Ltd.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef TARGET_ARM_TRANSLATE_A32_H
-#define TARGET_ARM_TRANSLATE_A32_H
-
-/* Prototypes for autogenerated disassembler functions */
-bool disas_m_nocp(DisasContext *dc, uint32_t insn);
-bool disas_mve(DisasContext *dc, uint32_t insn);
-bool disas_vfp(DisasContext *s, uint32_t insn);
-bool disas_vfp_uncond(DisasContext *s, uint32_t insn);
-bool disas_neon_dp(DisasContext *s, uint32_t insn);
-bool disas_neon_ls(DisasContext *s, uint32_t insn);
-bool disas_neon_shared(DisasContext *s, uint32_t insn);
-
-void load_reg_var(DisasContext *s, TCGv_i32 var, int reg);
-void arm_gen_condlabel(DisasContext *s);
-bool vfp_access_check(DisasContext *s);
-bool vfp_access_check_m(DisasContext *s, bool skip_context_update);
-void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop);
-void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop);
-void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop);
-void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop);
-TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs);
-void gen_set_cpsr(TCGv_i32 var, uint32_t mask);
-void gen_set_condexec(DisasContext *s);
-void gen_update_pc(DisasContext *s, target_long diff);
-void gen_lookup_tb(DisasContext *s);
-long vfp_reg_offset(bool dp, unsigned reg);
-long neon_full_reg_offset(unsigned reg);
-long neon_element_offset(int reg, int element, MemOp memop);
-void gen_rev16(TCGv_i32 dest, TCGv_i32 var);
-void clear_eci_state(DisasContext *s);
-bool mve_eci_check(DisasContext *s);
-void mve_update_eci(DisasContext *s);
-void mve_update_and_store_eci(DisasContext *s);
-bool mve_skip_vmov(DisasContext *s, int vn, int index, int size);
-
-static inline TCGv_i32 load_cpu_offset(int offset)
-{
- TCGv_i32 tmp = tcg_temp_new_i32();
- tcg_gen_ld_i32(tmp, cpu_env, offset);
- return tmp;
-}
-
-/* Load from a 32-bit field to a TCGv_i32 */
-#define load_cpu_field(name) \
- ({ \
- QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 4); \
- load_cpu_offset(offsetof(CPUARMState, name)); \
- })
-
-/* Load from the low half of a 64-bit field to a TCGv_i32 */
-#define load_cpu_field_low32(name) \
- ({ \
- QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 8); \
- load_cpu_offset(offsetoflow32(CPUARMState, name)); \
- })
-
-void store_cpu_offset(TCGv_i32 var, int offset, int size);
-
-#define store_cpu_field(val, name) \
- ({ \
- QEMU_BUILD_BUG_ON(sizeof_field(CPUARMState, name) != 4 \
- && sizeof_field(CPUARMState, name) != 1); \
- store_cpu_offset(val, offsetof(CPUARMState, name), \
- sizeof_field(CPUARMState, name)); \
- })
-
-#define store_cpu_field_constant(val, name) \
- store_cpu_field(tcg_constant_i32(val), name)
-
-/* Create a new temporary and set it to the value of a CPU register. */
-static inline TCGv_i32 load_reg(DisasContext *s, int reg)
-{
- TCGv_i32 tmp = tcg_temp_new_i32();
- load_reg_var(s, tmp, reg);
- return tmp;
-}
-
-void store_reg(DisasContext *s, int reg, TCGv_i32 var);
-
-void gen_aa32_ld_internal_i32(DisasContext *s, TCGv_i32 val,
- TCGv_i32 a32, int index, MemOp opc);
-void gen_aa32_st_internal_i32(DisasContext *s, TCGv_i32 val,
- TCGv_i32 a32, int index, MemOp opc);
-void gen_aa32_ld_internal_i64(DisasContext *s, TCGv_i64 val,
- TCGv_i32 a32, int index, MemOp opc);
-void gen_aa32_st_internal_i64(DisasContext *s, TCGv_i64 val,
- TCGv_i32 a32, int index, MemOp opc);
-void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
- int index, MemOp opc);
-void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
- int index, MemOp opc);
-void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
- int index, MemOp opc);
-void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
- int index, MemOp opc);
-
-#define DO_GEN_LD(SUFF, OPC) \
- static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
- TCGv_i32 a32, int index) \
- { \
- gen_aa32_ld_i32(s, val, a32, index, OPC); \
- }
-
-#define DO_GEN_ST(SUFF, OPC) \
- static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
- TCGv_i32 a32, int index) \
- { \
- gen_aa32_st_i32(s, val, a32, index, OPC); \
- }
-
-static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
- TCGv_i32 a32, int index)
-{
- gen_aa32_ld_i64(s, val, a32, index, MO_UQ);
-}
-
-static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
- TCGv_i32 a32, int index)
-{
- gen_aa32_st_i64(s, val, a32, index, MO_UQ);
-}
-
-DO_GEN_LD(8u, MO_UB)
-DO_GEN_LD(16u, MO_UW)
-DO_GEN_LD(32u, MO_UL)
-DO_GEN_ST(8, MO_UB)
-DO_GEN_ST(16, MO_UW)
-DO_GEN_ST(32, MO_UL)
-
-#undef DO_GEN_LD
-#undef DO_GEN_ST
-
-#if defined(CONFIG_USER_ONLY)
-#define IS_USER(s) 1
-#else
-#define IS_USER(s) (s->user)
-#endif
-
-/* Set NZCV flags from the high 4 bits of var. */
-#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
-
-/* Swap low and high halfwords. */
-static inline void gen_swap_half(TCGv_i32 dest, TCGv_i32 var)
-{
- tcg_gen_rotri_i32(dest, var, 16);
-}
-
-#endif