}
}
+static void mve_update_and_store_eci(DisasContext *s)
+{
+ /*
+ * For insns which don't call a helper function that will call
+ * mve_advance_vpt(), this version updates s->eci and also stores
+ * it out to the CPUState field.
+ */
+ if (s->eci) {
+ mve_update_eci(s);
+ store_cpu_field(tcg_constant_i32(s->eci << 4), condexec_bits);
+ }
+}
+
static bool mve_skip_first_beat(DisasContext *s)
{
/* Return true if PSR.ECI says we must skip the first beat of this insn */
};
return do_long_dual_acc(s, a, fns[a->x]);
}
+
+static bool trans_VPST(DisasContext *s, arg_VPST *a)
+{
+ TCGv_i32 vpr;
+
+ /* mask == 0 is a "related encoding" */
+ if (!dc_isar_feature(aa32_mve, s) || !a->mask) {
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+ /*
+ * Set the VPR mask fields. We take advantage of MASK01 and MASK23
+ * being adjacent fields in the register.
+ *
+ * This insn is not predicated, but it is subject to beat-wise
+ * execution, and the mask is updated on the odd-numbered beats.
+ * So if PSR.ECI says we should skip beat 1, we mustn't update the
+ * 01 mask field.
+ */
+ vpr = load_cpu_field(v7m.vpr);
+ switch (s->eci) {
+ case ECI_NONE:
+ case ECI_A0:
+ /* Update both 01 and 23 fields */
+ tcg_gen_deposit_i32(vpr, vpr,
+ tcg_constant_i32(a->mask | (a->mask << 4)),
+ R_V7M_VPR_MASK01_SHIFT,
+ R_V7M_VPR_MASK01_LENGTH + R_V7M_VPR_MASK23_LENGTH);
+ break;
+ case ECI_A0A1:
+ case ECI_A0A1A2:
+ case ECI_A0A1A2B0:
+ /* Update only the 23 mask field */
+ tcg_gen_deposit_i32(vpr, vpr,
+ tcg_constant_i32(a->mask),
+ R_V7M_VPR_MASK23_SHIFT, R_V7M_VPR_MASK23_LENGTH);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ store_cpu_field(vpr, v7m.vpr);
+ mve_update_and_store_eci(s);
+ return true;
+}