Message ID | 20211229023348.12606-7-frank.chang@sifive.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add RISC-V RVV Zve32f and Zve64f extensions | expand |
On Wed, Dec 29, 2021 at 12:43 PM <frank.chang@sifive.com> wrote: > > From: Frank Chang <frank.chang@sifive.com> > > Zve64f extension requires the scalar processor to implement the F > extension and implement all vector floating-point instructions for > floating-point operands with EEW=32 (i.e., no widening floating-point > operations). > > Signed-off-by: Frank Chang <frank.chang@sifive.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Alistair > --- > target/riscv/insn_trans/trans_rvv.c.inc | 41 +++++++++++++++++++------ > 1 file changed, 31 insertions(+), 10 deletions(-) > > diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc > index a1e403fe86..0aa8b7918f 100644 > --- a/target/riscv/insn_trans/trans_rvv.c.inc > +++ b/target/riscv/insn_trans/trans_rvv.c.inc > @@ -66,6 +66,17 @@ static bool require_scale_rvf(DisasContext *s) > } > } > > +static bool require_zve64f(DisasContext *s) > +{ > + /* RVV + Zve64f = RVV. */ > + if (has_ext(s, RVV)) { > + return true; > + } > + > + /* Zve64f doesn't support FP64. (Section 18.2) */ > + return s->ext_zve64f ? s->sew <= MO_32 : true; > +} > + > /* Destination vector register group cannot overlap source mask register. */ > static bool require_vm(int vm, int vd) > { > @@ -2204,7 +2215,8 @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a) > return require_rvv(s) && > require_rvf(s) && > vext_check_isa_ill(s) && > - vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); > + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) && > + require_zve64f(s); > } > > /* OPFVV without GVEC IR */ > @@ -2284,7 +2296,8 @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a) > return require_rvv(s) && > require_rvf(s) && > vext_check_isa_ill(s) && > - vext_check_ss(s, a->rd, a->rs2, a->vm); > + vext_check_ss(s, a->rd, a->rs2, a->vm) && > + require_zve64f(s); > } > > /* OPFVF without GVEC IR */ > @@ -2501,7 +2514,8 @@ static bool opfv_check(DisasContext *s, arg_rmr *a) > require_rvf(s) && > vext_check_isa_ill(s) && > /* OPFV instructions ignore vs1 check */ > - vext_check_ss(s, a->rd, a->rs2, a->vm); > + vext_check_ss(s, a->rd, a->rs2, a->vm) && > + require_zve64f(s); > } > > static bool do_opfv(DisasContext *s, arg_rmr *a, > @@ -2566,7 +2580,8 @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) > return require_rvv(s) && > require_rvf(s) && > vext_check_isa_ill(s) && > - vext_check_mss(s, a->rd, a->rs1, a->rs2); > + vext_check_mss(s, a->rd, a->rs1, a->rs2) && > + require_zve64f(s); > } > > GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) > @@ -2579,7 +2594,8 @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) > return require_rvv(s) && > require_rvf(s) && > vext_check_isa_ill(s) && > - vext_check_ms(s, a->rd, a->rs2); > + vext_check_ms(s, a->rd, a->rs2) && > + require_zve64f(s); > } > > GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) > @@ -2600,7 +2616,8 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) > if (require_rvv(s) && > require_rvf(s) && > vext_check_isa_ill(s) && > - require_align(a->rd, s->lmul)) { > + require_align(a->rd, s->lmul) && > + require_zve64f(s)) { > gen_set_rm(s, RISCV_FRM_DYN); > > TCGv_i64 t1; > @@ -3326,7 +3343,8 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) > { > if (require_rvv(s) && > require_rvf(s) && > - vext_check_isa_ill(s)) { > + vext_check_isa_ill(s) && > + require_zve64f(s)) { > gen_set_rm(s, RISCV_FRM_DYN); > > unsigned int ofs = (8 << s->sew); > @@ -3352,7 +3370,8 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) > { > if (require_rvv(s) && > require_rvf(s) && > - vext_check_isa_ill(s)) { > + vext_check_isa_ill(s) && > + require_zve64f(s)) { > gen_set_rm(s, RISCV_FRM_DYN); > > /* The instructions ignore LMUL and vector register group. */ > @@ -3403,13 +3422,15 @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) > static bool fslideup_check(DisasContext *s, arg_rmrr *a) > { > return slideup_check(s, a) && > - require_rvf(s); > + require_rvf(s) && > + require_zve64f(s); > } > > static bool fslidedown_check(DisasContext *s, arg_rmrr *a) > { > return slidedown_check(s, a) && > - require_rvf(s); > + require_rvf(s) && > + require_zve64f(s); > } > > GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) > -- > 2.31.1 > >
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index a1e403fe86..0aa8b7918f 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -66,6 +66,17 @@ static bool require_scale_rvf(DisasContext *s) } } +static bool require_zve64f(DisasContext *s) +{ + /* RVV + Zve64f = RVV. */ + if (has_ext(s, RVV)) { + return true; + } + + /* Zve64f doesn't support FP64. (Section 18.2) */ + return s->ext_zve64f ? s->sew <= MO_32 : true; +} + /* Destination vector register group cannot overlap source mask register. */ static bool require_vm(int vm, int vd) { @@ -2204,7 +2215,8 @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) && + require_zve64f(s); } /* OPFVV without GVEC IR */ @@ -2284,7 +2296,8 @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ss(s, a->rd, a->rs2, a->vm); + vext_check_ss(s, a->rd, a->rs2, a->vm) && + require_zve64f(s); } /* OPFVF without GVEC IR */ @@ -2501,7 +2514,8 @@ static bool opfv_check(DisasContext *s, arg_rmr *a) require_rvf(s) && vext_check_isa_ill(s) && /* OPFV instructions ignore vs1 check */ - vext_check_ss(s, a->rd, a->rs2, a->vm); + vext_check_ss(s, a->rd, a->rs2, a->vm) && + require_zve64f(s); } static bool do_opfv(DisasContext *s, arg_rmr *a, @@ -2566,7 +2580,8 @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_mss(s, a->rd, a->rs1, a->rs2); + vext_check_mss(s, a->rd, a->rs1, a->rs2) && + require_zve64f(s); } GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) @@ -2579,7 +2594,8 @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ms(s, a->rd, a->rs2); + vext_check_ms(s, a->rd, a->rs2) && + require_zve64f(s); } GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) @@ -2600,7 +2616,8 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) if (require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - require_align(a->rd, s->lmul)) { + require_align(a->rd, s->lmul) && + require_zve64f(s)) { gen_set_rm(s, RISCV_FRM_DYN); TCGv_i64 t1; @@ -3326,7 +3343,8 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s)) { + vext_check_isa_ill(s) && + require_zve64f(s)) { gen_set_rm(s, RISCV_FRM_DYN); unsigned int ofs = (8 << s->sew); @@ -3352,7 +3370,8 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s)) { + vext_check_isa_ill(s) && + require_zve64f(s)) { gen_set_rm(s, RISCV_FRM_DYN); /* The instructions ignore LMUL and vector register group. */ @@ -3403,13 +3422,15 @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) static bool fslideup_check(DisasContext *s, arg_rmrr *a) { return slideup_check(s, a) && - require_rvf(s); + require_rvf(s) && + require_zve64f(s); } static bool fslidedown_check(DisasContext *s, arg_rmrr *a) { return slidedown_check(s, a) && - require_rvf(s); + require_rvf(s) && + require_zve64f(s); } GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)