Message ID | 20240718021012.2057986-6-alistair.francis@wdc.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [PULL,01/30] target/riscv: Add zimop extension | expand |
On Thu, Jul 18, 2024 at 12:10 PM Alistair Francis <alistair23@gmail.com> wrote: > > From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> > > Zama16b is the property that misaligned load/stores/atomics within > a naturally aligned 16-byte region are atomic. > > According to the specification, Zama16b applies only to AMOs, loads > and stores defined in the base ISAs, and loads and stores of no more > than XLEN bits defined in the F, D, and Q extensions. Thus it should > not apply to zacas or RVC instructions. > > For an instruction in that set, if all accessed bytes lie within 16B granule, > the instruction will not raise an exception for reasons of address alignment, > and the instruction will give rise to only one memory operation for the > purposes of RVWMO—i.e., it will execute atomically. > > Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> > Reviewed-by: Alistair Francis <alistair.francis@wdc.com> > Message-ID: <20240709113652.1239-6-zhiwei_liu@linux.alibaba.com> > Signed-off-by: Alistair Francis <alistair.francis@wdc.com> > --- > target/riscv/cpu_cfg.h | 1 + > target/riscv/cpu.c | 2 ++ > target/riscv/insn_trans/trans_rva.c.inc | 42 ++++++++++++++----------- > target/riscv/insn_trans/trans_rvd.c.inc | 14 +++++++-- > target/riscv/insn_trans/trans_rvf.c.inc | 14 +++++++-- > target/riscv/insn_trans/trans_rvi.c.inc | 6 ++++ > 6 files changed, 57 insertions(+), 22 deletions(-) > > diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h > index d85e54b475..ddbfae37e5 100644 > --- a/target/riscv/cpu_cfg.h > +++ b/target/riscv/cpu_cfg.h > @@ -83,6 +83,7 @@ struct RISCVCPUConfig { > bool ext_zdinx; > bool ext_zaamo; > bool ext_zacas; > + bool ext_zama16b; > bool ext_zalrsc; > bool ext_zawrs; > bool ext_zfa; > diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c > index f4f8287a6d..de9c06904f 100644 > --- a/target/riscv/cpu.c > +++ b/target/riscv/cpu.c > @@ -118,6 +118,7 @@ const RISCVIsaExtData isa_edata_arr[] = { > ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), > ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), > ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), > + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), > ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), > ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), > ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), > @@ -1476,6 +1477,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { > MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), > MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), > MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), > + MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), > MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), > MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), > MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), > diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc > index 4a9e4591d1..eb080baddd 100644 > --- a/target/riscv/insn_trans/trans_rva.c.inc > +++ b/target/riscv/insn_trans/trans_rva.c.inc > @@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, > TCGv dest = dest_gpr(ctx, a->rd); > TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); > > + if (ctx->cfg_ptr->ext_zama16b) { > + mop |= MO_ATOM_WITHIN16; > + } else { > + mop |= MO_ALIGN; > + } > + > decode_save_opc(ctx); > src1 = get_address(ctx, a->rs1, 0); > func(dest, src1, src2, ctx->mem_idx, mop); > @@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) > static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL); > } > > static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL); > } > > static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL); > } > > static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL); > } > > static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL); > } > > static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL); > } > > static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL); > } > > static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL); > } > > static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) > { > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL); > } > > static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) > @@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ); > } > > static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ); > } > > static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ); > } > > static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ); > } > > static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ); > } > > static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ); > } > > static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ); > } > > static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ); > } > > static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) > { > REQUIRE_64BIT(ctx); > REQUIRE_A_OR_ZAAMO(ctx); > - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); > + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ); > } > diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc > index d9ce9e407f..1f5fac65a2 100644 > --- a/target/riscv/insn_trans/trans_rvd.c.inc > +++ b/target/riscv/insn_trans/trans_rvd.c.inc > @@ -42,13 +42,18 @@ > static bool trans_fld(DisasContext *ctx, arg_fld *a) > { > TCGv addr; > + MemOp memop = MO_TEUQ; > > REQUIRE_FPU; > REQUIRE_EXT(ctx, RVD); > > + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { Richard pointed out that checking the cur_insn_len looks a bit strange. He's right, the length of the instruction has no impact. We should instead just check for ext_zama16b Do you mind sending a followup patch to remove all of the instruction length checks? Alistair
On 2024/7/23 7:32, Alistair Francis wrote: > On Thu, Jul 18, 2024 at 12:10 PM Alistair Francis <alistair23@gmail.com> wrote: >> From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> >> >> Zama16b is the property that misaligned load/stores/atomics within >> a naturally aligned 16-byte region are atomic. >> >> According to the specification, Zama16b applies only to AMOs, loads >> and stores defined in the base ISAs, and loads and stores of no more >> than XLEN bits defined in the F, D, and Q extensions. Thus it should >> not apply to zacas or RVC instructions. >> >> For an instruction in that set, if all accessed bytes lie within 16B granule, >> the instruction will not raise an exception for reasons of address alignment, >> and the instruction will give rise to only one memory operation for the >> purposes of RVWMO—i.e., it will execute atomically. >> >> Signed-off-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com> >> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> >> Message-ID: <20240709113652.1239-6-zhiwei_liu@linux.alibaba.com> >> Signed-off-by: Alistair Francis <alistair.francis@wdc.com> >> --- >> target/riscv/cpu_cfg.h | 1 + >> target/riscv/cpu.c | 2 ++ >> target/riscv/insn_trans/trans_rva.c.inc | 42 ++++++++++++++----------- >> target/riscv/insn_trans/trans_rvd.c.inc | 14 +++++++-- >> target/riscv/insn_trans/trans_rvf.c.inc | 14 +++++++-- >> target/riscv/insn_trans/trans_rvi.c.inc | 6 ++++ >> 6 files changed, 57 insertions(+), 22 deletions(-) >> >> diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h >> index d85e54b475..ddbfae37e5 100644 >> --- a/target/riscv/cpu_cfg.h >> +++ b/target/riscv/cpu_cfg.h >> @@ -83,6 +83,7 @@ struct RISCVCPUConfig { >> bool ext_zdinx; >> bool ext_zaamo; >> bool ext_zacas; >> + bool ext_zama16b; >> bool ext_zalrsc; >> bool ext_zawrs; >> bool ext_zfa; >> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c >> index f4f8287a6d..de9c06904f 100644 >> --- a/target/riscv/cpu.c >> +++ b/target/riscv/cpu.c >> @@ -118,6 +118,7 @@ const RISCVIsaExtData isa_edata_arr[] = { >> ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), >> ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), >> ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), >> + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), >> ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), >> ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), >> ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), >> @@ -1476,6 +1477,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { >> MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), >> MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), >> MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), >> + MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), >> MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), >> MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), >> MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), >> diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc >> index 4a9e4591d1..eb080baddd 100644 >> --- a/target/riscv/insn_trans/trans_rva.c.inc >> +++ b/target/riscv/insn_trans/trans_rva.c.inc >> @@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, >> TCGv dest = dest_gpr(ctx, a->rd); >> TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); >> >> + if (ctx->cfg_ptr->ext_zama16b) { >> + mop |= MO_ATOM_WITHIN16; >> + } else { >> + mop |= MO_ALIGN; >> + } >> + >> decode_save_opc(ctx); >> src1 = get_address(ctx, a->rs1, 0); >> func(dest, src1, src2, ctx->mem_idx, mop); >> @@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) >> static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) >> { >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL); >> } >> >> static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) >> { >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL); >> } >> >> static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) >> { >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL); >> } >> >> static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) >> { >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL); >> } >> >> static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) >> { >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL); >> } >> >> static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) >> { >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL); >> } >> >> static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) >> { >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL); >> } >> >> static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) >> { >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL); >> } >> >> static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) >> { >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL); >> } >> >> static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) >> @@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) >> { >> REQUIRE_64BIT(ctx); >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ); >> } >> >> static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) >> { >> REQUIRE_64BIT(ctx); >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ); >> } >> >> static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) >> { >> REQUIRE_64BIT(ctx); >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ); >> } >> >> static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) >> { >> REQUIRE_64BIT(ctx); >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ); >> } >> >> static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) >> { >> REQUIRE_64BIT(ctx); >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ); >> } >> >> static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) >> { >> REQUIRE_64BIT(ctx); >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ); >> } >> >> static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) >> { >> REQUIRE_64BIT(ctx); >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ); >> } >> >> static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) >> { >> REQUIRE_64BIT(ctx); >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ); >> } >> >> static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) >> { >> REQUIRE_64BIT(ctx); >> REQUIRE_A_OR_ZAAMO(ctx); >> - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); >> + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ); >> } >> diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc >> index d9ce9e407f..1f5fac65a2 100644 >> --- a/target/riscv/insn_trans/trans_rvd.c.inc >> +++ b/target/riscv/insn_trans/trans_rvd.c.inc >> @@ -42,13 +42,18 @@ >> static bool trans_fld(DisasContext *ctx, arg_fld *a) >> { >> TCGv addr; >> + MemOp memop = MO_TEUQ; >> >> REQUIRE_FPU; >> REQUIRE_EXT(ctx, RVD); >> >> + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { > Richard pointed out that checking the cur_insn_len looks a bit strange. > > He's right, the length of the instruction has no impact. We should > instead just check for ext_zama16b > > Do you mind sending a followup patch to remove all of the instruction > length checks? Sure. Once I didn't notice that these instruction decode functions, such as trans_fld, will never be RVC instructions. Thanks, Zhiwei > > Alistair >
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index d85e54b475..ddbfae37e5 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -83,6 +83,7 @@ struct RISCVCPUConfig { bool ext_zdinx; bool ext_zaamo; bool ext_zacas; + bool ext_zama16b; bool ext_zalrsc; bool ext_zawrs; bool ext_zfa; diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index f4f8287a6d..de9c06904f 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -118,6 +118,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), @@ -1476,6 +1477,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), + MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 4a9e4591d1..eb080baddd 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, TCGv dest = dest_gpr(ctx, a->rd); TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); + if (ctx->cfg_ptr->ext_zama16b) { + mop |= MO_ATOM_WITHIN16; + } else { + mop |= MO_ALIGN; + } + decode_save_opc(ctx); src1 = get_address(ctx, a->rs1, 0); func(dest, src1, src2, ctx->mem_idx, mop); @@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL); } static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL); } static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL); } static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL); } static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL); } static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL); } static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL); } static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL); } static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL); } static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) @@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ); } static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ); } static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ); } static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ); } static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ); } static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ); } static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ); } static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ); } static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ); } diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index d9ce9e407f..1f5fac65a2 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -42,13 +42,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) { TCGv addr; + MemOp memop = MO_TEUQ; REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); - tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop); mark_fs_dirty(ctx); return true; @@ -57,13 +62,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) static bool trans_fsd(DisasContext *ctx, arg_fsd *a) { TCGv addr; + MemOp memop = MO_TEUQ; REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; } diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index 97a368970b..f771aa1939 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -43,14 +43,19 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) { TCGv_i64 dest; TCGv addr; + MemOp memop = MO_TEUL; REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); dest = cpu_fpr[a->rd]; - tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop); gen_nanbox_s(dest, dest); mark_fs_dirty(ctx); @@ -60,13 +65,18 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) static bool trans_fsw(DisasContext *ctx, arg_fsw *a) { TCGv addr; + MemOp memop = MO_TEUL; REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; } diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index ad40d3e87f..98e3806d5e 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -268,6 +268,9 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { bool out; + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } decode_save_opc(ctx); if (get_xl(ctx) == MXL_RV128) { out = gen_load_i128(ctx, a, memop); @@ -366,6 +369,9 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) { + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } decode_save_opc(ctx); if (get_xl(ctx) == MXL_RV128) { return gen_store_i128(ctx, a, memop);