@@ -238,6 +238,7 @@ typedef union _ppc_vsr_t {
typedef ppc_vsr_t ppc_avr_t;
typedef ppc_vsr_t ppc_fprp_t;
+typedef ppc_vsr_t ppc_acc_t;
#if !defined(CONFIG_USER_ONLY)
/* Software TLB cache */
@@ -537,6 +537,15 @@ DEF_HELPER_5(XXBLENDVB, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_5(XXBLENDVH, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_5(XXBLENDVW, void, vsr, vsr, vsr, vsr, i32)
DEF_HELPER_5(XXBLENDVD, void, vsr, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI4GER8, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI4GER8PP, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI8GER4, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI8GER4PP, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI8GER4SPP, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI16GER2, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI16GER2S, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI16GER2PP, void, env, vsr, vsr, vsr, i32)
+DEF_HELPER_5(XVI16GER2SPP, void, env, vsr, vsr, vsr, i32)
DEF_HELPER_2(efscfsi, i32, env, i32)
DEF_HELPER_2(efscfui, i32, env, i32)
@@ -170,6 +170,9 @@
&XX3 xt xa xb
@XX3 ...... ..... ..... ..... ........ ... &XX3 xt=%xx_xt xa=%xx_xa xb=%xx_xb
+%xx_at 23:3
+@XX3_at ...... ... .. ..... ..... ........ ... &XX3 xt=%xx_at xb=%xx_xb
+
&XX3_dm xt xa xb dm
@XX3_dm ...... ..... ..... ..... . dm:2 ..... ... &XX3_dm xt=%xx_xt xa=%xx_xa xb=%xx_xb
@@ -719,3 +722,15 @@ RFEBB 010011-------------- . 0010010010 - @XL_s
XXMFACC 011111 ... -- 00000 ----- 0010110001 - @X_a
XXMTACC 011111 ... -- 00001 ----- 0010110001 - @X_a
XXSETACCZ 011111 ... -- 00011 ----- 0010110001 - @X_a
+
+## Vector GER instruction
+
+XVI4GER8 111011 ... -- ..... ..... 00100011 ..- @XX3_at xa=%xx_xa
+XVI4GER8PP 111011 ... -- ..... ..... 00100010 ..- @XX3_at xa=%xx_xa
+XVI8GER4 111011 ... -- ..... ..... 00000011 ..- @XX3_at xa=%xx_xa
+XVI8GER4PP 111011 ... -- ..... ..... 00000010 ..- @XX3_at xa=%xx_xa
+XVI16GER2 111011 ... -- ..... ..... 01001011 ..- @XX3_at xa=%xx_xa
+XVI16GER2PP 111011 ... -- ..... ..... 01101011 ..- @XX3_at xa=%xx_xa
+XVI8GER4SPP 111011 ... -- ..... ..... 01100011 ..- @XX3_at xa=%xx_xa
+XVI16GER2S 111011 ... -- ..... ..... 00101011 ..- @XX3_at xa=%xx_xa
+XVI16GER2SPP 111011 ... -- ..... ..... 00101010 ..- @XX3_at xa=%xx_xa
@@ -782,6 +782,136 @@ VCT(uxs, cvtsduw, u32)
VCT(sxs, cvtsdsw, s32)
#undef VCT
+typedef int64_t do_ger(uint32_t, uint32_t, uint32_t);
+
+static int64_t ger_rank8(uint32_t a, uint32_t b, uint32_t mask)
+{
+ int64_t psum = 0;
+ for (int i = 0; i < 8; i++, mask >>= 1) {
+ if (mask & 1) {
+ psum += sextract32(a, 4 * i, 4) * sextract32(b, 4 * i, 4);
+ }
+ }
+ return psum;
+}
+
+static int64_t ger_rank4(uint32_t a, uint32_t b, uint32_t mask)
+{
+ int64_t psum = 0;
+ for (int i = 0; i < 4; i++, mask >>= 1) {
+ if (mask & 1) {
+ psum += sextract32(a, 8 * i, 8) * (int64_t)extract32(b, 8 * i, 8);
+ }
+ }
+ return psum;
+}
+
+static int64_t ger_rank2(uint32_t a, uint32_t b, uint32_t mask)
+{
+ int64_t psum = 0;
+ for (int i = 0; i < 2; i++, mask >>= 1) {
+ if (mask & 1) {
+ psum += sextract32(a, 16 * i, 16) * sextract32(b, 16 * i, 16);
+ }
+ }
+ return psum;
+}
+
+static void xviger(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b, ppc_acc_t *at,
+ uint32_t mask, bool sat, bool acc, do_ger ger)
+{
+ uint8_t pmsk = FIELD_EX32(mask, GER_MSK, PMSK),
+ xmsk = FIELD_EX32(mask, GER_MSK, XMSK),
+ ymsk = FIELD_EX32(mask, GER_MSK, YMSK);
+ uint8_t xmsk_bit, ymsk_bit;
+ int64_t psum;
+ int i, j;
+ for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {
+ for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {
+ if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {
+ psum = ger(a->VsrW(i), b->VsrW(j), pmsk);
+ if (acc) {
+ psum += at[i].VsrSW(j);
+ }
+ if (sat && psum > INT32_MAX) {
+ set_vscr_sat(env);
+ at[i].VsrSW(j) = INT32_MAX;
+ } else if (sat && psum < INT32_MIN) {
+ set_vscr_sat(env);
+ at[i].VsrSW(j) = INT32_MIN;
+ } else {
+ at[i].VsrSW(j) = (int32_t) psum;
+ }
+ } else {
+ at[i].VsrSW(j) = 0;
+ }
+ }
+ }
+}
+
+QEMU_FLATTEN
+void helper_XVI4GER8(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, false, ger_rank8);
+}
+
+QEMU_FLATTEN
+void helper_XVI4GER8PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, true, ger_rank8);
+}
+
+QEMU_FLATTEN
+void helper_XVI8GER4(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, false, ger_rank4);
+}
+
+QEMU_FLATTEN
+void helper_XVI8GER4PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, true, ger_rank4);
+}
+
+QEMU_FLATTEN
+void helper_XVI8GER4SPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, true, true, ger_rank4);
+}
+
+QEMU_FLATTEN
+void helper_XVI16GER2(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, false, ger_rank2);
+}
+
+QEMU_FLATTEN
+void helper_XVI16GER2S(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, true, false, ger_rank2);
+}
+
+QEMU_FLATTEN
+void helper_XVI16GER2PP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, false, true, ger_rank2);
+}
+
+QEMU_FLATTEN
+void helper_XVI16GER2SPP(CPUPPCState *env, ppc_vsr_t *a, ppc_vsr_t *b,
+ ppc_acc_t *at, uint32_t mask)
+{
+ xviger(env, a, b, at, mask, true, true, ger_rank2);
+}
+
target_ulong helper_vclzlsbb(ppc_avr_t *r)
{
target_ulong count = 0;
@@ -18,6 +18,8 @@
#ifndef PPC_INTERNAL_H
#define PPC_INTERNAL_H
+#include "hw/registerfields.h"
+
#define FUNC_MASK(name, ret_type, size, max_val) \
static inline ret_type name(uint##size##_t start, \
uint##size##_t end) \
@@ -291,4 +293,17 @@ G_NORETURN void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
uintptr_t retaddr);
#endif
+FIELD(GER_MSK, XMSK, 0, 4)
+FIELD(GER_MSK, YMSK, 4, 4)
+FIELD(GER_MSK, PMSK, 8, 8)
+
+static inline int ger_pack_masks(int pmsk, int ymsk, int xmsk)
+{
+ int msk = 0;
+ msk = FIELD_DP32(msk, GER_MSK, XMSK, xmsk);
+ msk = FIELD_DP32(msk, GER_MSK, YMSK, ymsk);
+ msk = FIELD_DP32(msk, GER_MSK, PMSK, pmsk);
+ return msk;
+}
+
#endif /* PPC_INTERNAL_H */
@@ -17,6 +17,13 @@ static inline TCGv_ptr gen_vsr_ptr(int reg)
return r;
}
+static inline TCGv_ptr gen_acc_ptr(int reg)
+{
+ TCGv_ptr r = tcg_temp_new_ptr();
+ tcg_gen_addi_ptr(r, cpu_env, acc_full_offset(reg));
+ return r;
+}
+
#define VSX_LOAD_SCALAR(name, operation) \
static void gen_##name(DisasContext *ctx) \
{ \
@@ -2818,6 +2825,41 @@ static bool trans_XXSETACCZ(DisasContext *ctx, arg_X_a *a)
return true;
}
+static bool do_ger_XX3(DisasContext *ctx, arg_XX3 *a,
+ void (*helper)(TCGv_env, TCGv_ptr, TCGv_ptr,
+ TCGv_ptr, TCGv_i32))
+{
+ uint32_t mask;
+ TCGv_ptr xt, xa, xb;
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VSX(ctx);
+ if (unlikely((a->xa / 4 == a->xt) || (a->xb / 4 == a->xt))) {
+ gen_invalid(ctx);
+ return true;
+ }
+
+ xt = gen_acc_ptr(a->xt);
+ xa = gen_vsr_ptr(a->xa);
+ xb = gen_vsr_ptr(a->xb);
+
+ mask = 0xFFFFFFFF;
+ helper(cpu_env, xa, xb, xt, tcg_constant_i32(mask));
+ tcg_temp_free_ptr(xt);
+ tcg_temp_free_ptr(xa);
+ tcg_temp_free_ptr(xb);
+ return true;
+}
+
+TRANS(XVI4GER8, do_ger_XX3, gen_helper_XVI4GER8)
+TRANS(XVI4GER8PP, do_ger_XX3, gen_helper_XVI4GER8PP)
+TRANS(XVI8GER4, do_ger_XX3, gen_helper_XVI8GER4)
+TRANS(XVI8GER4PP, do_ger_XX3, gen_helper_XVI8GER4PP)
+TRANS(XVI8GER4SPP, do_ger_XX3, gen_helper_XVI8GER4SPP)
+TRANS(XVI16GER2, do_ger_XX3, gen_helper_XVI16GER2)
+TRANS(XVI16GER2PP, do_ger_XX3, gen_helper_XVI16GER2PP)
+TRANS(XVI16GER2S, do_ger_XX3, gen_helper_XVI16GER2S)
+TRANS(XVI16GER2SPP, do_ger_XX3, gen_helper_XVI16GER2SPP)
+
#undef GEN_XX2FORM
#undef GEN_XX3FORM
#undef GEN_XX2IFORM