Message ID | 20211021194547.672988-30-matheus.ferst@eldorado.org.br (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | PowerISA v3.1 instruction batch | expand |
On 10/21/21 12:45 PM, matheus.ferst@eldorado.org.br wrote: > From: "Bruno Larsen (billionai)" <bruno.larsen@eldorado.org.br> > > Implemented XXSPLTI32DX emulation using decodetree > > Signed-off-by: Bruno Larsen (billionai) <bruno.larsen@eldorado.org.br> > Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br> > --- > target/ppc/insn64.decode | 11 ++++++++ > target/ppc/translate/vsx-impl.c.inc | 41 +++++++++++++++++++++++++++++ > 2 files changed, 52 insertions(+) > > diff --git a/target/ppc/insn64.decode b/target/ppc/insn64.decode > index 880ac3edc7..8d8d5d5729 100644 > --- a/target/ppc/insn64.decode > +++ b/target/ppc/insn64.decode > @@ -32,6 +32,14 @@ > ...... ..... ra:5 ................ \ > &PLS_D si=%pls_si rt=%rt_tsxp > > +# Format 8RR:D > +%8rr_si 32:s16 0:16 > +%8rr_xt 16:1 21:5 > +&8RR_D_IX xt ix si:int32_t What is it about this field that says signed, expecially? It doesn't seem wrong, of course, but you are jumping through extra hoops here... > + get_cpu_vsrh(t0, a->xt); > + get_cpu_vsrl(t1, a->xt); > + > + tcg_gen_movi_i64(new_val, a->si); > + if (a->ix) { > + tcg_gen_movi_i64(mask, 0x00000000ffffffff); > + tcg_gen_shli_i64(new_val, new_val, 32); > + } else { > + tcg_gen_movi_i64(mask, 0xffffffff00000000); > + } > + tcg_gen_and_i64(t0, t0, mask); > + tcg_gen_or_i64(t0, t0, new_val); > + tcg_gen_and_i64(t1, t1, mask); > + tcg_gen_or_i64(t1, t1, new_val); > + > + set_cpu_vsrh(a->xt, t0); > + set_cpu_vsrl(a->xt, t1); You're working too hard here. I think you should just store the two int32_t at the correct offsets. And failing that, use tcg_gen_deposit_i64. r~
diff --git a/target/ppc/insn64.decode b/target/ppc/insn64.decode index 880ac3edc7..8d8d5d5729 100644 --- a/target/ppc/insn64.decode +++ b/target/ppc/insn64.decode @@ -32,6 +32,14 @@ ...... ..... ra:5 ................ \ &PLS_D si=%pls_si rt=%rt_tsxp +# Format 8RR:D +%8rr_si 32:s16 0:16 +%8rr_xt 16:1 21:5 +&8RR_D_IX xt ix si:int32_t +@8RR_D_IX ...... .. .... .. .. ................ \ + ...... ..... ... ix:1 . ................ \ + &8RR_D_IX si=%8rr_si xt=%8rr_xt + ### Fixed-Point Load Instructions PLBZ 000001 10 0--.-- .................. \ @@ -156,3 +164,6 @@ PLXVP 000001 00 0--.-- .................. \ 111010 ..... ..... ................ @8LS_D_TSXP PSTXVP 000001 00 0--.-- .................. \ 111110 ..... ..... ................ @8LS_D_TSXP + +XXSPLTI32DX 000001 01 0000 -- -- ................ \ + 100000 ..... 000 .. ................ @8RR_D_IX diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc index 3dbdfc2539..17cbe2dc15 100644 --- a/target/ppc/translate/vsx-impl.c.inc +++ b/target/ppc/translate/vsx-impl.c.inc @@ -1491,6 +1491,47 @@ static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a) return true; } +static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + TCGv_i64 new_val; + TCGv_i64 mask; + TCGv_i64 t0; + TCGv_i64 t1; + new_val = tcg_temp_new_i64(); + mask = tcg_temp_new_i64(); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_cpu_vsrh(t0, a->xt); + get_cpu_vsrl(t1, a->xt); + + tcg_gen_movi_i64(new_val, a->si); + if (a->ix) { + tcg_gen_movi_i64(mask, 0x00000000ffffffff); + tcg_gen_shli_i64(new_val, new_val, 32); + } else { + tcg_gen_movi_i64(mask, 0xffffffff00000000); + } + tcg_gen_and_i64(t0, t0, mask); + tcg_gen_or_i64(t0, t0, new_val); + tcg_gen_and_i64(t1, t1, mask); + tcg_gen_or_i64(t1, t1, new_val); + + set_cpu_vsrh(a->xt, t0); + set_cpu_vsrl(a->xt, t1); + + + tcg_temp_free_i64(mask); + tcg_temp_free_i64(new_val); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t0); + + return true; +} + static void gen_xxsldwi(DisasContext *ctx) { TCGv_i64 xth, xtl;