@@ -2639,6 +2639,8 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx)
#define VsrSW(i) s32[i]
#define VsrD(i) u64[i]
#define VsrSD(i) s64[i]
+#define VsrSF(i) f32[i]
+#define VsrDF(i) f64[i]
#else
#define VsrB(i) u8[15 - (i)]
#define VsrSB(i) s8[15 - (i)]
@@ -2648,6 +2650,8 @@ static inline bool lsw_reg_in_range(int start, int nregs, int rx)
#define VsrSW(i) s32[3 - (i)]
#define VsrD(i) u64[1 - (i)]
#define VsrSD(i) s64[1 - (i)]
+#define VsrSF(i) f32[3 - (i)]
+#define VsrDF(i) f64[1 - (i)]
#endif
static inline int vsr64_offset(int i, bool high)
@@ -3462,3 +3462,181 @@ void helper_xssubqp(CPUPPCState *env, uint32_t opcode,
*xt = t;
do_float_check_status(env, GETPC());
}
+
+static void set_rounding_mode_rn(CPUPPCState *env)
+{
+ uint8_t rmode = (env->fpscr & FP_RN) >> FPSCR_RN0;
+ switch (rmode) {
+ case 0:
+ set_float_rounding_mode(float_round_nearest_even, &env->fp_status);
+ break;
+ case 1:
+ set_float_rounding_mode(float_round_to_zero, &env->fp_status);
+ break;
+ case 2:
+ set_float_rounding_mode(float_round_up, &env->fp_status);
+ break;
+ case 3:
+ set_float_rounding_mode(float_round_down, &env->fp_status);
+ break;
+ default:
+ abort();
+ }
+}
+
+typedef void vsxger_zero(ppc_vsr_t *at, int, int);
+
+typedef void vsxger_muladd_f(ppc_vsr_t *, ppc_vsr_t *, ppc_vsr_t *, int, int,
+ int flags, float_status *s);
+
+static void vsxger_muladd32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i, int j,
+ int flags, float_status *s)
+{
+ at[i].VsrSF(j) = float32_muladd(a->VsrSF(i), b->VsrSF(j), at[i].VsrSF(j), flags, s);
+}
+
+static void vsxger_mul32(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i, int j,
+ int flags, float_status *s)
+{
+ at[i].VsrSF(j) = float32_mul(a->VsrSF(i), b->VsrSF(j), s);
+}
+
+static void vsxger_zero32(ppc_vsr_t *at, int i, int j)
+{
+ at[i].VsrSF(j) = float32_zero;
+}
+
+static void vsxger_muladd64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i, int j,
+ int flags, float_status *s)
+{
+ if (j >= 2) {
+ j -= 2;
+ at[i].VsrDF(j) = float64_muladd(a[i / 2].VsrDF(i % 2), b->VsrDF(j),
+ at[i].VsrDF(j), flags, s);
+ }
+}
+
+static void vsxger_mul64(ppc_vsr_t *at, ppc_vsr_t *a, ppc_vsr_t *b, int i, int j,
+ int flags, float_status *s)
+{
+ if (j >= 2) {
+ j -= 2;
+ at[i].VsrDF(j) = float64_mul(a[i / 2].VsrDF(i % 2), b->VsrDF(j), s);
+ }
+}
+
+static void vsxger_zero64(ppc_vsr_t *at, int i, int j)
+{
+ if (j >= 2) {
+ j -= 2;
+ at[i].VsrDF(j) = float64_zero;
+ }
+}
+
+static void vsxger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, ppc_acc_t *at,
+ uint32_t mask, bool acc, bool neg_mul, bool neg_acc,
+ vsxger_muladd_f mul, vsxger_muladd_f muladd, vsxger_zero zero)
+{
+ int i, j, xmsk_bit, ymsk_bit, op_flags;
+ uint8_t xmsk = mask & 0x0F;
+ uint8_t ymsk = (mask >> 4) & 0x0F;
+ float_status *excp_ptr = &env->fp_status;
+ op_flags = (neg_acc ^ neg_mul) ? float_muladd_negate_c : 0;
+ op_flags |= (neg_mul) ? float_muladd_negate_result : 0;
+ helper_reset_fpstatus(env);
+ set_rounding_mode_rn(env);
+ for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
+ for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
+ if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
+ if (acc) {
+ muladd(at, a, b, i, j, op_flags, excp_ptr);
+ } else {
+ mul(at, a, b, i, j, op_flags, excp_ptr);
+ }
+ } else {
+ zero(at, i, j);
+ }
+ }
+ }
+ do_float_check_status(env, GETPC());
+}
+
+QEMU_FLATTEN
+void helper_XVF32GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, false, false, false, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, false, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, true, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, false, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF32GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, true, vsxger_mul32,
+ vsxger_muladd32, vsxger_zero32);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GER(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, false, false, false, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, false, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERPN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, false, true, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERNP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, false, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
+
+QEMU_FLATTEN
+void helper_XVF64GERNN(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ vsxger(env, a, b, at, mask, true, true, true, vsxger_mul64,
+ vsxger_muladd64, vsxger_zero64);
+}
@@ -546,6 +546,16 @@ DEF_HELPER_5(XVI16GER2, void, env, vsr, vsr, vsr, i32)
DEF_HELPER_5(XVI16GER2S, void, env, vsr, vsr, vsr, i32)
DEF_HELPER_5(XVI16GER2PP, void, env, vsr, vsr, vsr, i32)
DEF_HELPER_5(XVI16GER2SPP, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF32GER, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF32GERPP, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF32GERPN, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF32GERNP, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF32GERNN, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF64GER, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF64GERPP, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF64GERPN, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF64GERNP, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVF64GERNN, void, env, vsr, vsr, vsr, i32)
DEF_HELPER_2(efscfsi, i32, env, i32)
DEF_HELPER_2(efscfui, i32, env, i32)
@@ -171,6 +171,7 @@
@XX3 ...... ..... ..... ..... ........ ... &XX3 xt=%xx_xt xa=%xx_xa xb=%xx_xb
%xx_at 23:3
+%xx_xa_pair 2:1 17:4 !function=times_2
@XX3_at ...... ... .. ..... ..... ........ ... &XX3 xt=%xx_at xb=%xx_xb
&XX3_dm xt xa xb dm
@@ -734,3 +735,15 @@ XVI16GER2PP 111011 ... -- ..... ..... 01101011 ..- @XX3_at xa=%xx_xa
XVI8GER4SPP 111011 ... -- ..... ..... 01100011 ..- @XX3_at xa=%xx_xa
XVI16GER2S 111011 ... -- ..... ..... 00101011 ..- @XX3_at xa=%xx_xa
XVI16GER2SPP 111011 ... -- ..... ..... 00101010 ..- @XX3_at xa=%xx_xa
+
+XVF32GER 111011 ... -- ..... ..... 00011011 ..- @XX3_at xa=%xx_xa
+XVF32GERPP 111011 ... -- ..... ..... 00011010 ..- @XX3_at xa=%xx_xa
+XVF32GERPN 111011 ... -- ..... ..... 10011010 ..- @XX3_at xa=%xx_xa
+XVF32GERNP 111011 ... -- ..... ..... 01011010 ..- @XX3_at xa=%xx_xa
+XVF32GERNN 111011 ... -- ..... ..... 11011010 ..- @XX3_at xa=%xx_xa
+
+XVF64GER 111011 ... -- .... 0 ..... 00111011 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERPP 111011 ... -- .... 0 ..... 00111010 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERPN 111011 ... -- .... 0 ..... 10111010 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERNP 111011 ... -- .... 0 ..... 01111010 ..- @XX3_at xa=%xx_xa_pair
+XVF64GERNN 111011 ... -- .... 0 ..... 11111010 ..- @XX3_at xa=%xx_xa_pair
@@ -2884,6 +2884,18 @@ TRANS64(PMXVI16GER2PP, do_ger_MMIRR_XX3, gen_helper_XVI16GER2PP)
TRANS64(PMXVI16GER2S, do_ger_MMIRR_XX3, gen_helper_XVI16GER2S)
TRANS64(PMXVI16GER2SPP, do_ger_MMIRR_XX3, gen_helper_XVI16GER2SPP)
+TRANS(XVF32GER, do_ger_XX3, gen_helper_XVF32GER)
+TRANS(XVF32GERPP, do_ger_XX3, gen_helper_XVF32GERPP)
+TRANS(XVF32GERPN, do_ger_XX3, gen_helper_XVF32GERPN)
+TRANS(XVF32GERNP, do_ger_XX3, gen_helper_XVF32GERNP)
+TRANS(XVF32GERNN, do_ger_XX3, gen_helper_XVF32GERNN)
+
+TRANS(XVF64GER, do_ger_XX3, gen_helper_XVF64GER)
+TRANS(XVF64GERPP, do_ger_XX3, gen_helper_XVF64GERPP)
+TRANS(XVF64GERPN, do_ger_XX3, gen_helper_XVF64GERPN)
+TRANS(XVF64GERNP, do_ger_XX3, gen_helper_XVF64GERNP)
+TRANS(XVF64GERNN, do_ger_XX3, gen_helper_XVF64GERNN)
+
#undef GEN_XX2FORM
#undef GEN_XX3FORM
#undef GEN_XX2IFORM