@@ -926,6 +926,7 @@ static Property riscv_cpu_extensions[] = {
DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false),
DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false),
DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false),
+ DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false),
DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false),
DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false),
DEFINE_PROP_BOOL("xtheadxmae", RISCVCPU, cfg.ext_xtheadxmae, false),
@@ -446,6 +446,7 @@ struct RISCVCPUConfig {
bool ext_xtheadcmo;
bool ext_xtheadcondmov;
bool ext_xtheadmac;
+ bool ext_xtheadmemidx;
bool ext_xtheadmempair;
bool ext_xtheadsync;
bool ext_xtheadxmae;
@@ -374,3 +374,380 @@ static bool trans_th_swd(DisasContext *ctx, arg_th_pair *a)
{
return gen_storepair_tl(ctx, a, MO_TESL, 3);
}
+
+/*
+ * Load with memop from indexed address and add sext(imm5 << imm2) to rs1.
+ * If !preinc, then the address is rs1.
+ * If preinc, then the address is rs1 + (sext(imm5) << imm2).
+ */
+static bool gen_load_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop,
+ bool preinc)
+{
+ TCGv rd = dest_gpr(ctx, a->rd);
+ TCGv base = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv addr = tcg_temp_new();
+ TCGv offs = tcg_temp_new();
+
+ tcg_gen_movi_tl(offs, a->imm5);
+ tcg_gen_sextract_tl(offs, offs, 0, 5);
+ tcg_gen_shli_tl(offs, offs, a->imm2);
+
+ if (preinc) {
+ tcg_gen_add_tl(addr, base, offs);
+ if (get_xl(ctx) == MXL_RV32) {
+ tcg_gen_ext32u_tl(addr, addr);
+ }
+ } else {
+ tcg_gen_mov_tl(addr, base);
+ }
+
+ tcg_gen_qemu_ld_tl(rd, addr, ctx->mem_idx, memop);
+
+ if (!preinc) {
+ tcg_gen_add_tl(addr, base, offs);
+ if (get_xl(ctx) == MXL_RV32) {
+ tcg_gen_ext32u_tl(addr, addr);
+ }
+ }
+
+ gen_set_gpr(ctx, a->rd, rd);
+ gen_set_gpr(ctx, a->rs1, addr);
+
+ tcg_temp_free(addr);
+ tcg_temp_free(offs);
+ return true;
+}
+
+/*
+ * Store with memop to indexed address and add sext(imm5 << imm2) to rs1.
+ * If !preinc, then the address is rs1.
+ * If preinc, then the address is rs1 + (sext(imm5) << imm2).
+ */
+static bool gen_store_inc(DisasContext *ctx, arg_th_meminc *a, MemOp memop,
+ bool preinc)
+{
+ TCGv data = get_gpr(ctx, a->rd, EXT_NONE);
+ TCGv base = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv addr = tcg_temp_new();
+ TCGv offs = tcg_temp_new();
+
+ tcg_gen_movi_tl(offs, a->imm5);
+ tcg_gen_sextract_tl(offs, offs, 0, 5);
+ tcg_gen_shli_tl(offs, offs, a->imm2);
+
+ if (preinc) {
+ tcg_gen_add_tl(addr, base, offs);
+ if (get_xl(ctx) == MXL_RV32) {
+ tcg_gen_ext32u_tl(addr, addr);
+ }
+ } else {
+ tcg_gen_mov_tl(addr, base);
+ }
+
+ tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
+
+ if (!preinc) {
+ tcg_gen_add_tl(addr, base, offs);
+ if (get_xl(ctx) == MXL_RV32) {
+ tcg_gen_ext32u_tl(addr, addr);
+ }
+ }
+
+ gen_set_gpr(ctx, a->rs1, addr);
+
+ tcg_temp_free(addr);
+ tcg_temp_free(offs);
+ return true;
+}
+
+static bool trans_th_ldia(DisasContext *ctx, arg_th_meminc *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load_inc(ctx, a, MO_TESQ, false);
+}
+
+static bool trans_th_ldib(DisasContext *ctx, arg_th_meminc *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load_inc(ctx, a, MO_TESQ, true);
+}
+
+static bool trans_th_lwia(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_TESL, false);
+}
+
+static bool trans_th_lwib(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_TESL, true);
+}
+
+static bool trans_th_lwuia(DisasContext *ctx, arg_th_meminc *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load_inc(ctx, a, MO_TEUL, false);
+}
+
+static bool trans_th_lwuib(DisasContext *ctx, arg_th_meminc *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load_inc(ctx, a, MO_TEUL, true);
+}
+
+static bool trans_th_lhia(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_TESW, false);
+}
+
+static bool trans_th_lhib(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_TESW, true);
+}
+
+static bool trans_th_lhuia(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_TEUW, false);
+}
+
+static bool trans_th_lhuib(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_TEUW, true);
+}
+
+static bool trans_th_lbia(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_SB, false);
+}
+
+static bool trans_th_lbib(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_SB, true);
+}
+
+static bool trans_th_lbuia(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_UB, false);
+}
+
+static bool trans_th_lbuib(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_load_inc(ctx, a, MO_UB, true);
+}
+
+static bool trans_th_sdia(DisasContext *ctx, arg_th_meminc *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_store_inc(ctx, a, MO_TESQ, false);
+}
+
+static bool trans_th_sdib(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_store_inc(ctx, a, MO_TESQ, true);
+}
+
+static bool trans_th_swia(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_store_inc(ctx, a, MO_TESL, false);
+}
+
+static bool trans_th_swib(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_store_inc(ctx, a, MO_TESL, true);
+}
+
+static bool trans_th_shia(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_store_inc(ctx, a, MO_TESW, false);
+}
+
+static bool trans_th_shib(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_store_inc(ctx, a, MO_TESW, true);
+}
+
+static bool trans_th_sbia(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_store_inc(ctx, a, MO_SB, false);
+}
+
+static bool trans_th_sbib(DisasContext *ctx, arg_th_meminc *a)
+{
+ return gen_store_inc(ctx, a, MO_SB, true);
+}
+
+/*
+ * Load with memop from indexed address.
+ * If !zero_extend_offset, then address is rs1 + (rs2 << imm2).
+ * If zero_extend_offset, then address is rs1 + (zext(rs2[31:0]) << imm2).
+ */
+static bool gen_load_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
+ bool zero_extend_offset)
+{
+ TCGv rd = dest_gpr(ctx, a->rd);
+ TCGv base = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv offs = get_gpr(ctx, a->rs2, EXT_NONE);
+ TCGv addr = tcg_temp_new();
+
+ if (zero_extend_offset) {
+ tcg_gen_extract_tl(addr, offs, 0, 32);
+ } else {
+ tcg_gen_mov_tl(addr, offs);
+ }
+ tcg_gen_shli_tl(addr, addr, a->imm2);
+ tcg_gen_add_tl(addr, base, addr);
+
+ if (get_xl(ctx) == MXL_RV32) {
+ tcg_gen_ext32u_tl(addr, addr);
+ }
+
+ tcg_gen_qemu_ld_tl(rd, addr, ctx->mem_idx, memop);
+ gen_set_gpr(ctx, a->rd, rd);
+
+ tcg_temp_free(addr);
+ return true;
+}
+
+/*
+ * Store with memop to indexed address.
+ * If !zero_extend_offset, then address is rs1 + (rs2 << imm2).
+ * If zero_extend_offset, then address is rs1 + (zext(rs2[31:0]) << imm2).
+ */
+static bool gen_store_idx(DisasContext *ctx, arg_th_memidx *a, MemOp memop,
+ bool zero_extend_offset)
+{
+ TCGv data = get_gpr(ctx, a->rd, EXT_NONE);
+ TCGv base = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv offs = get_gpr(ctx, a->rs2, EXT_NONE);
+ TCGv addr = tcg_temp_new();
+
+ if (zero_extend_offset) {
+ tcg_gen_extract_tl(addr, offs, 0, 32);
+ } else {
+ tcg_gen_mov_tl(addr, offs);
+ }
+ tcg_gen_shli_tl(addr, addr, a->imm2);
+ tcg_gen_add_tl(addr, base, addr);
+
+ if (get_xl(ctx) == MXL_RV32) {
+ tcg_gen_ext32u_tl(addr, addr);
+ }
+
+ tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop);
+
+ tcg_temp_free(addr);
+ return true;
+}
+
+static bool trans_th_lrd(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load_idx(ctx, a, MO_TESQ, false);
+}
+
+static bool trans_th_lrw(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_TESL, false);
+}
+
+static bool trans_th_lrwu(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load_idx(ctx, a, MO_TEUL, false);
+}
+
+static bool trans_th_lrh(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_TESW, false);
+}
+
+static bool trans_th_lrhu(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_TEUW, false);
+}
+
+static bool trans_th_lrb(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_SB, false);
+}
+
+static bool trans_th_lrbu(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_UB, false);
+}
+
+static bool trans_th_srd(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_store_idx(ctx, a, MO_TESQ, false);
+}
+
+static bool trans_th_srw(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_store_idx(ctx, a, MO_TESL, false);
+}
+
+static bool trans_th_srh(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_store_idx(ctx, a, MO_TESW, false);
+}
+
+static bool trans_th_srb(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_store_idx(ctx, a, MO_SB, false);
+}
+static bool trans_th_lurd(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load_idx(ctx, a, MO_TESQ, true);
+}
+
+static bool trans_th_lurw(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_TESL, true);
+}
+
+static bool trans_th_lurwu(DisasContext *ctx, arg_th_memidx *a)
+{
+ REQUIRE_64BIT(ctx);
+ return gen_load_idx(ctx, a, MO_TEUL, true);
+}
+
+static bool trans_th_lurh(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_TESW, true);
+}
+
+static bool trans_th_lurhu(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_TEUW, true);
+}
+
+static bool trans_th_lurb(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_SB, true);
+}
+
+static bool trans_th_lurbu(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_load_idx(ctx, a, MO_UB, true);
+}
+
+static bool trans_th_surd(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_store_idx(ctx, a, MO_TESQ, true);
+}
+
+static bool trans_th_surw(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_store_idx(ctx, a, MO_TESL, true);
+}
+
+static bool trans_th_surh(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_store_idx(ctx, a, MO_TESW, true);
+}
+
+static bool trans_th_surb(DisasContext *ctx, arg_th_memidx *a)
+{
+ return gen_store_idx(ctx, a, MO_SB, true);
+}
+
@@ -8,6 +8,7 @@ gen = [
decodetree.process('xtheadcmo.decode', extra_args: '--static-decode=decode_xtheadcmo'),
decodetree.process('xtheadcondmov.decode', extra_args: '--static-decode=decode_xtheadcondmov'),
decodetree.process('xtheadmac.decode', extra_args: '--static-decode=decode_xtheadmac'),
+ decodetree.process('xtheadmemidx.decode', extra_args: '--static-decode=decode_xtheadmemidx'),
decodetree.process('xtheadmempair.decode', extra_args: '--static-decode=decode_xtheadmempair'),
decodetree.process('xtheadsync.decode', extra_args: '--static-decode=decode_xtheadsync'),
decodetree.process('XVentanaCondOps.decode', extra_args: '--static-decode=decode_XVentanaCodeOps'),
@@ -138,6 +138,7 @@ MATERIALISE_EXT_PREDICATE(xtheadbs)
MATERIALISE_EXT_PREDICATE(xtheadcmo)
MATERIALISE_EXT_PREDICATE(xtheadcondmov);
MATERIALISE_EXT_PREDICATE(xtheadmac);
+MATERIALISE_EXT_PREDICATE(xtheadmemidx);
MATERIALISE_EXT_PREDICATE(xtheadmempair);
MATERIALISE_EXT_PREDICATE(xtheadsync)
MATERIALISE_EXT_PREDICATE(XVentanaCondOps)
@@ -732,6 +733,7 @@ static int ex_rvc_shifti(DisasContext *ctx, int imm)
#include "decode-xtheadcmo.c.inc"
#include "decode-xtheadcondmov.c.inc"
#include "decode-xtheadmac.c.inc"
+#include "decode-xtheadmemidx.c.inc"
#include "decode-xtheadmempair.c.inc"
#include "decode-xtheadsync.c.inc"
#include "decode-XVentanaCondOps.c.inc"
@@ -1060,6 +1062,7 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
{ has_xtheadcmo_p, decode_xtheadcmo },
{ has_xtheadcondmov_p, decode_xtheadcondmov },
{ has_xtheadmac_p, decode_xtheadmac },
+ { has_xtheadmemidx_p, decode_xtheadmemidx },
{ has_xtheadmempair_p, decode_xtheadmempair },
{ has_xtheadsync_p, decode_xtheadsync },
{ has_XVentanaCondOps_p, decode_XVentanaCodeOps },
new file mode 100644
@@ -0,0 +1,73 @@
+#
+# RISC-V instruction decode for the XTheadMemIdx extension
+#
+# Copyright (c) 2022 Christoph Muellner, christoph.muellner@vrull.eu
+#
+# SPDX-License-Identifier: LGPL-2.1-or-later
+#
+# The XTheadMemIdx extension provides GPR memory operations.
+#
+# It is documented in
+# https://github.com/T-head-Semi/thead-extension-spec/releases/download/2.0.0/xthead-2022-09-05-2.0.0.pdf
+
+# Fields
+%imm2 25:2
+%imm5 20:5
+%rs2 20:5
+%rs1 15:5
+%rd 7:5
+
+# Argument sets
+&th_meminc rd rs1 imm5 imm2
+&th_memidx rd rs1 rs2 imm2
+
+# Formats:
+@th_meminc ..... .. ..... ..... ... ..... ....... &th_meminc %rd %rs1 %imm5 %imm2
+@th_memidx ..... .. ..... ..... ... ..... ....... &th_memidx %rd %rs1 %rs2 %imm2
+
+th_ldia 01111 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_ldib 01101 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lwia 01011 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lwib 01001 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lwuia 11011 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lwuib 11001 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lhia 00111 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lhib 00101 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lhuia 10111 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lhuib 10101 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lbia 00011 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lbib 00001 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lbuia 10011 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_lbuib 10001 .. ..... ..... 100 ..... 0001011 @th_meminc
+th_sdia 01111 .. ..... ..... 101 ..... 0001011 @th_meminc
+th_sdib 01101 .. ..... ..... 101 ..... 0001011 @th_meminc
+th_swia 01011 .. ..... ..... 101 ..... 0001011 @th_meminc
+th_swib 01001 .. ..... ..... 101 ..... 0001011 @th_meminc
+th_shia 00111 .. ..... ..... 101 ..... 0001011 @th_meminc
+th_shib 00101 .. ..... ..... 101 ..... 0001011 @th_meminc
+th_sbia 00011 .. ..... ..... 101 ..... 0001011 @th_meminc
+th_sbib 00001 .. ..... ..... 101 ..... 0001011 @th_meminc
+
+th_lrd 01100 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lrw 01000 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lrwu 11000 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lrh 00100 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lrhu 10100 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lrb 00000 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lrbu 10000 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_srd 01100 .. ..... ..... 101 ..... 0001011 @th_memidx
+th_srw 01000 .. ..... ..... 101 ..... 0001011 @th_memidx
+th_srh 00100 .. ..... ..... 101 ..... 0001011 @th_memidx
+th_srb 00000 .. ..... ..... 101 ..... 0001011 @th_memidx
+
+th_lurd 01110 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lurw 01010 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lurwu 11010 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lurh 00110 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lurhu 10110 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lurb 00010 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_lurbu 10010 .. ..... ..... 100 ..... 0001011 @th_memidx
+th_surd 01110 .. ..... ..... 101 ..... 0001011 @th_memidx
+th_surw 01010 .. ..... ..... 101 ..... 0001011 @th_memidx
+th_surh 00110 .. ..... ..... 101 ..... 0001011 @th_memidx
+th_surb 00010 .. ..... ..... 101 ..... 0001011 @th_memidx