@@ -658,16 +658,18 @@ static vaddr riscv_cpu_get_pc(CPUState *cs)
static void riscv_cpu_synchronize_from_tb(CPUState *cs,
const TranslationBlock *tb)
{
- RISCVCPU *cpu = RISCV_CPU(cs);
- CPURISCVState *env = &cpu->env;
- RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
+ if (!(tb_cflags(tb) & CF_PCREL)) {
+ RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
+ RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
- tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
+ tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
- if (xl == MXL_RV32) {
- env->pc = (int32_t) tb->pc;
- } else {
- env->pc = tb->pc;
+ if (xl == MXL_RV32) {
+ env->pc = (int32_t) tb->pc;
+ } else {
+ env->pc = tb->pc;
+ }
}
}
@@ -693,11 +695,18 @@ static void riscv_restore_state_to_opc(CPUState *cs,
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
+ target_ulong pc;
+
+ if (tb_cflags(tb) & CF_PCREL) {
+ pc = (env->pc & TARGET_PAGE_MASK) | data[0];
+ } else {
+ pc = data[0];
+ }
if (xl == MXL_RV32) {
- env->pc = (int32_t)data[0];
+ env->pc = (int32_t)pc;
} else {
- env->pc = data[0];
+ env->pc = pc;
}
env->bins = data[1];
}
@@ -38,7 +38,9 @@ static bool trans_lui(DisasContext *ctx, arg_lui *a)
static bool trans_auipc(DisasContext *ctx, arg_auipc *a)
{
- gen_set_gpri(ctx, a->rd, a->imm + ctx->base.pc_next);
+ TCGv target_pc = dest_gpr(ctx, a->rd);
+ gen_get_target_pc(target_pc, ctx, a->imm + ctx->base.pc_next);
+ gen_set_gpr(ctx, a->rd, target_pc);
return true;
}
@@ -52,6 +54,7 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
{
TCGLabel *misaligned = NULL;
TCGv target_pc = tcg_temp_new();
+ TCGv succ_pc = dest_gpr(ctx, a->rd);
tcg_gen_addi_tl(target_pc, get_gpr(ctx, a->rs1, EXT_NONE), a->imm);
tcg_gen_andi_tl(target_pc, target_pc, (target_ulong)-2);
@@ -68,7 +71,9 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a)
tcg_gen_brcondi_tl(TCG_COND_NE, t0, 0x0, misaligned);
}
- gen_set_gpri(ctx, a->rd, ctx->pc_succ_insn);
+ gen_get_target_pc(succ_pc, ctx, ctx->pc_succ_insn);
+ gen_set_gpr(ctx, a->rd, succ_pc);
+
tcg_gen_mov_tl(cpu_pc, target_pc);
lookup_and_goto_ptr(ctx);
@@ -159,6 +164,7 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
TCGv src1 = get_gpr(ctx, a->rs1, EXT_SIGN);
TCGv src2 = get_gpr(ctx, a->rs2, EXT_SIGN);
target_ulong next_pc;
+ target_ulong orig_pc_save = ctx->pc_save;
if (get_xl(ctx) == MXL_RV128) {
TCGv src1h = get_gprh(ctx, a->rs1);
@@ -175,6 +181,7 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
gen_set_label(l); /* branch taken */
+ ctx->pc_save = orig_pc_save;
next_pc = ctx->base.pc_next + a->imm;
if (!has_ext(ctx, RVC) && (next_pc & 0x3)) {
/* misaligned */
@@ -182,8 +189,9 @@ static bool gen_branch(DisasContext *ctx, arg_b *a, TCGCond cond)
gen_get_target_pc(target_pc, ctx, next_pc);
gen_exception_inst_addr_mis(ctx, target_pc);
} else {
- gen_goto_tb(ctx, 0, ctx->base.pc_next + a->imm);
+ gen_goto_tb(ctx, 0, next_pc);
}
+ ctx->pc_save = -1;
ctx->base.is_jmp = DISAS_NORETURN;
return true;
@@ -59,6 +59,7 @@ typedef struct DisasContext {
DisasContextBase base;
/* pc_succ_insn points to the instruction following base.pc_next */
target_ulong pc_succ_insn;
+ target_ulong pc_save;
target_ulong priv_ver;
RISCVMXL misa_mxl_max;
RISCVMXL xl;
@@ -225,15 +226,24 @@ static void decode_save_opc(DisasContext *ctx)
static void gen_get_target_pc(TCGv target, DisasContext *ctx,
target_ulong dest)
{
- if (get_xl(ctx) == MXL_RV32) {
- dest = (int32_t)dest;
+ assert(ctx->pc_save != -1);
+ if (tb_cflags(ctx->base.tb) & CF_PCREL) {
+ tcg_gen_addi_tl(target, cpu_pc, dest - ctx->pc_save);
+ if (get_xl(ctx) == MXL_RV32) {
+ tcg_gen_ext32s_tl(target, target);
+ }
+ } else {
+ if (get_xl(ctx) == MXL_RV32) {
+ dest = (int32_t)dest;
+ }
+ tcg_gen_movi_tl(target, dest);
}
- tcg_gen_movi_tl(target, dest);
}
static void gen_set_pc_imm(DisasContext *ctx, target_ulong dest)
{
gen_get_target_pc(cpu_pc, ctx, dest);
+ ctx->pc_save = dest;
}
static void generate_exception(DisasContext *ctx, int excp)
@@ -287,8 +297,21 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
* direct block chain benefits will be small.
*/
if (translator_use_goto_tb(&ctx->base, dest) && !ctx->itrigger) {
- tcg_gen_goto_tb(n);
- gen_set_pc_imm(ctx, dest);
+ /*
+ * For pcrel, the pc must always be up-to-date on entry to
+ * the linked TB, so that it can use simple additions for all
+ * further adjustments. For !pcrel, the linked TB is compiled
+ * to know its full virtual address, so we can delay the
+ * update to pc to the unlinked path. A long chain of links
+ * can thus avoid many updates to the PC.
+ */
+ if (tb_cflags(ctx->base.tb) & CF_PCREL) {
+ gen_set_pc_imm(ctx, dest);
+ tcg_gen_goto_tb(n);
+ } else {
+ tcg_gen_goto_tb(n);
+ gen_set_pc_imm(ctx, dest);
+ }
tcg_gen_exit_tb(ctx->base.tb, n);
} else {
gen_set_pc_imm(ctx, dest);
@@ -555,8 +578,16 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm)
}
}
- gen_set_gpri(ctx, rd, ctx->pc_succ_insn);
- gen_goto_tb(ctx, 0, ctx->base.pc_next + imm); /* must use this for safety */
+ assert(ctx->pc_save != -1);
+ if (tb_cflags(ctx->base.tb) & CF_PCREL) {
+ TCGv succ_pc = dest_gpr(ctx, rd);
+ tcg_gen_addi_tl(succ_pc, cpu_pc, ctx->pc_succ_insn - ctx->pc_save);
+ gen_set_gpr(ctx, rd, succ_pc);
+ } else {
+ gen_set_gpri(ctx, rd, ctx->pc_succ_insn);
+ }
+
+ gen_goto_tb(ctx, 0, next_pc); /* must use this for safety */
ctx->base.is_jmp = DISAS_NORETURN;
}
@@ -1150,6 +1181,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
RISCVCPU *cpu = RISCV_CPU(cs);
uint32_t tb_flags = ctx->base.tb->flags;
+ ctx->pc_save = ctx->base.pc_first;
ctx->pc_succ_insn = ctx->base.pc_first;
ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX);
ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS;
@@ -1195,8 +1227,13 @@ static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu)
static void riscv_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
{
DisasContext *ctx = container_of(dcbase, DisasContext, base);
+ target_ulong pc_next = ctx->base.pc_next;
+
+ if (tb_cflags(dcbase->tb) & CF_PCREL) {
+ pc_next &= ~TARGET_PAGE_MASK;
+ }
- tcg_gen_insn_start(ctx->base.pc_next, 0);
+ tcg_gen_insn_start(pc_next, 0);
ctx->insn_start = tcg_last_op();
}