Implement the MVE VSHLC insn, which performs a shift left of the
entire vector with carry in bits provided from a general purpose
register and carry out bits written back to that register.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id:
20210628135835.6690-14-peter.maydell@linaro.org
DEF_HELPER_FLAGS_4(mve_vqrshrunbh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vqrshruntb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vqrshrunth, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
+
+DEF_HELPER_FLAGS_4(mve_vshlc, TCG_CALL_NO_WG, i32, env, ptr, i32, i32)
VQRSHRUNB 111 1 1110 1 . ... ... ... 0 1111 1 1 . 0 ... 0 @2_shr_h
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_b
VQRSHRUNT 111 1 1110 1 . ... ... ... 1 1111 1 1 . 0 ... 0 @2_shr_h
+
+VSHLC 111 0 1110 1 . 1 imm:5 ... 0 1111 1100 rdm:4 qd=%qd
DO_VSHRN_SAT_UH(vqrshrnb_uh, vqrshrnt_uh, DO_RSHRN_UH)
DO_VSHRN_SAT_SB(vqrshrunbb, vqrshruntb, DO_RSHRUN_B)
DO_VSHRN_SAT_SH(vqrshrunbh, vqrshrunth, DO_RSHRUN_H)
+
+uint32_t HELPER(mve_vshlc)(CPUARMState *env, void *vd, uint32_t rdm,
+ uint32_t shift)
+{
+ uint32_t *d = vd;
+ uint16_t mask = mve_element_mask(env);
+ unsigned e;
+ uint32_t r;
+
+ /*
+ * For each 32-bit element, we shift it left, bringing in the
+ * low 'shift' bits of rdm at the bottom. Bits shifted out at
+ * the top become the new rdm, if the predicate mask permits.
+ * The final rdm value is returned to update the register.
+ * shift == 0 here means "shift by 32 bits".
+ */
+ if (shift == 0) {
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
+ r = rdm;
+ if (mask & 1) {
+ rdm = d[H4(e)];
+ }
+ mergemask(&d[H4(e)], r, mask);
+ }
+ } else {
+ uint32_t shiftmask = MAKE_64BIT_MASK(0, shift);
+
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) {
+ r = (d[H4(e)] << shift) | (rdm & shiftmask);
+ if (mask & 1) {
+ rdm = d[H4(e)] >> (32 - shift);
+ }
+ mergemask(&d[H4(e)], r, mask);
+ }
+ }
+ mve_advance_vpt(env);
+ return rdm;
+}
DO_2SHIFT_N(VQRSHRNT_U, vqrshrnt_u)
DO_2SHIFT_N(VQRSHRUNB, vqrshrunb)
DO_2SHIFT_N(VQRSHRUNT, vqrshrunt)
+
+static bool trans_VSHLC(DisasContext *s, arg_VSHLC *a)
+{
+ /*
+ * Whole Vector Left Shift with Carry. The carry is taken
+ * from a general purpose register and written back there.
+ * An imm of 0 means "shift by 32".
+ */
+ TCGv_ptr qd;
+ TCGv_i32 rdm;
+
+ if (!dc_isar_feature(aa32_mve, s) || !mve_check_qreg_bank(s, a->qd)) {
+ return false;
+ }
+ if (a->rdm == 13 || a->rdm == 15) {
+ /* CONSTRAINED UNPREDICTABLE: we UNDEF */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qd = mve_qreg_ptr(a->qd);
+ rdm = load_reg(s, a->rdm);
+ gen_helper_mve_vshlc(rdm, cpu_env, qd, rdm, tcg_constant_i32(a->imm));
+ store_reg(s, a->rdm, rdm);
+ tcg_temp_free_ptr(qd);
+ mve_update_eci(s);
+ return true;
+}