tcg_isa_2_06,
tcg_isa_2_07,
tcg_isa_3_00,
+ tcg_isa_3_10,
} TCGPowerISA;
extern TCGPowerISA have_isa;
#define have_isa_2_06 (have_isa >= tcg_isa_2_06)
#define have_isa_2_07 (have_isa >= tcg_isa_2_07)
#define have_isa_3_00 (have_isa >= tcg_isa_3_00)
+#define have_isa_3_10 (have_isa >= tcg_isa_3_10)
/* optional instructions automatically implemented */
#define TCG_TARGET_HAS_ext8u_i32 0 /* andi */
#define VMULOUH VX4(72)
#define VMULOUW VX4(136) /* v2.07 */
#define VMULUWM VX4(137) /* v2.07 */
+#define VMULLD VX4(457) /* v3.10 */
#define VMSUMUHM VX4(38)
#define VMRGHB VX4(12)
return -1;
case MO_32:
return have_isa_2_07 ? 1 : -1;
+ case MO_64:
+ return have_isa_3_10;
}
return 0;
case INDEX_op_bitsel_vec:
static const uint32_t
add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
+ mul_op[4] = { 0, 0, VMULUWM, VMULLD },
neg_op[4] = { 0, 0, VNEGW, VNEGD },
eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
a1 = 0;
break;
case INDEX_op_mul_vec:
- tcg_debug_assert(vece == MO_32 && have_isa_2_07);
- insn = VMULUWM;
+ insn = mul_op[vece];
break;
case INDEX_op_ssadd_vec:
insn = ssadd_op[vece];
have_isa = tcg_isa_3_00;
}
#endif
+#ifdef PPC_FEATURE2_ARCH_3_10
+ if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
+ have_isa = tcg_isa_3_10;
+ }
+#endif
#ifdef PPC_FEATURE2_HAS_ISEL
/* Prefer explicit instruction from the kernel. */