@@ -563,7 +563,7 @@ static inline uint32_t tb_cflags(const TranslationBlock *tb)
}
/* current cflags for hashing/comparison */
-uint32_t curr_cflags(CPUState *cpu);
+// FIXME: docs
/* TranslationBlock invalidate API */
#if defined(CONFIG_USER_ONLY)
@@ -17,6 +17,7 @@ struct TCGModuleOps {
void (*tb_invalidate_phys_range)(tb_page_addr_t start, tb_page_addr_t end);
void (*tb_check_watchpoint)(CPUState *cpu, uintptr_t retaddr);
bool (*cpu_restore_state)(CPUState *cpu, uintptr_t searched_pc, bool will_exit);
+ uint32_t (*curr_cflags)(CPUState *cpu);
};
extern struct TCGModuleOps tcg;
@@ -145,7 +145,7 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
}
#endif /* CONFIG USER ONLY */
-uint32_t curr_cflags(CPUState *cpu)
+static uint32_t curr_cflags(CPUState *cpu)
{
uint32_t cflags = cpu->tcg_cflags;
@@ -1036,6 +1036,7 @@ static void tcg_module_ops_exec(void)
{
tcg.tcg_exec_realizefn = tcg_exec_realizefn;
tcg.tcg_exec_unrealizefn = tcg_exec_unrealizefn;
+ tcg.curr_cflags = curr_cflags;
}
type_init(tcg_module_ops_exec);
@@ -38,6 +38,11 @@ static bool cpu_restore_state_stub(CPUState *cpu, uintptr_t searched_pc, bool wi
return false;
}
+static uint32_t curr_cflags_stub(CPUState *cpu)
+{
+ return 0;
+}
+
struct TCGModuleOps tcg = {
.tlb_flush = update_cpu_stub,
.tlb_flush_page = tlb_flush_page_stub,
@@ -51,4 +56,5 @@ struct TCGModuleOps tcg = {
.tb_invalidate_phys_range = tb_invalidate_phys_range_stub,
.tb_check_watchpoint = tb_check_watchpoint_stub,
.cpu_restore_state = cpu_restore_state_stub,
+ .curr_cflags = curr_cflags_stub,
};
@@ -1761,7 +1761,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
if (current_tb_modified) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | tcg.curr_cflags(cpu);
mmap_unlock();
cpu_loop_exit_noexc(cpu);
}
@@ -1900,7 +1900,7 @@ static bool tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc)
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | tcg.curr_cflags(cpu);
return true;
}
#endif
@@ -1976,7 +1976,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
* operations only (which execute after completion) so we don't
* double instrument the instruction.
*/
- cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
+ cpu->cflags_next_tb = tcg.curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
"cpu_io_recompile: rewound execution of TB to "
@@ -911,7 +911,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
*/
if (!cpu->can_do_io) {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | CF_LAST_IO | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | CF_LAST_IO | tcg.curr_cflags(cpu);
cpu_loop_exit_restore(cpu, ra);
}
/*
@@ -944,7 +944,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
cpu_loop_exit_restore(cpu, ra);
} else {
/* Force execution of one insn next time. */
- cpu->cflags_next_tb = 1 | curr_cflags(cpu);
+ cpu->cflags_next_tb = 1 | tcg.curr_cflags(cpu);
mmap_unlock();
if (ra) {
tcg.cpu_restore_state(cpu, ra, true);
Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> --- include/exec/exec-all.h | 2 +- include/tcg/tcg-module.h | 1 + accel/tcg/cpu-exec.c | 3 ++- accel/tcg/tcg-module.c | 6 ++++++ accel/tcg/translate-all.c | 6 +++--- softmmu/physmem.c | 4 ++-- 6 files changed, 15 insertions(+), 7 deletions(-)