Message ID | 20200605173422.1490-7-robert.foley@linaro.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add Thread Sanitizer support to QEMU | expand |
Robert Foley <robert.foley@linaro.org> writes: > From: "Emilio G. Cota" <cota@braap.org> > > Signed-off-by: Emilio G. Cota <cota@braap.org> > Signed-off-by: Robert Foley <robert.foley@linaro.org> > [RF: Minor changes to fix some checkpatch errors] > --- > accel/tcg/translate-all.c | 10 +++++++++- > include/tcg/tcg.h | 3 ++- > tcg/tcg.c | 19 ++++++++++++++++--- > 3 files changed, 27 insertions(+), 5 deletions(-) > > diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c > index 42ce1dfcff..3708aab36b 100644 > --- a/accel/tcg/translate-all.c > +++ b/accel/tcg/translate-all.c > @@ -384,6 +384,11 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, > return 0; > } > > +static void tb_destroy(TranslationBlock *tb) > +{ > + qemu_spin_destroy(&tb->jmp_lock); > +} > + > bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) > { > TranslationBlock *tb; > @@ -413,6 +418,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) > /* one-shot translation, invalidate it immediately */ > tb_phys_invalidate(tb, -1); > tcg_tb_remove(tb); > + tb_destroy(tb); > } > r = true; > } > @@ -1230,7 +1236,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) > qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); > page_flush_tb(); > > - tcg_region_reset_all(); > + tcg_region_reset_all(tb_destroy); > /* XXX: flush processor icache at this point if cache flush is > expensive */ > atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); > @@ -1886,6 +1892,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, > > orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); > atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); > + tb_destroy(tb); > return existing_tb; > } > tcg_tb_insert(tb); > @@ -2235,6 +2242,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) > tb_phys_invalidate(tb->orig_tb, -1); > } > tcg_tb_remove(tb); > + tb_destroy(tb); > } > > /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not > diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h > index 380014ed80..c8313fdcf0 100644 > --- a/include/tcg/tcg.h > +++ b/include/tcg/tcg.h > @@ -818,8 +818,9 @@ void *tcg_malloc_internal(TCGContext *s, int size); > void tcg_pool_reset(TCGContext *s); > TranslationBlock *tcg_tb_alloc(TCGContext *s); > > +typedef void (*tb_destroy_func)(TranslationBlock *tb); > void tcg_region_init(void); > -void tcg_region_reset_all(void); > +void tcg_region_reset_all(tb_destroy_func tb_destroy); > > size_t tcg_code_size(void); > size_t tcg_code_capacity(void); > diff --git a/tcg/tcg.c b/tcg/tcg.c > index 1aa6cb47f2..7ae9dd7cf8 100644 > --- a/tcg/tcg.c > +++ b/tcg/tcg.c > @@ -502,7 +502,16 @@ size_t tcg_nb_tbs(void) > return nb_tbs; > } > > -static void tcg_region_tree_reset_all(void) > +static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data) > +{ > + TranslationBlock *tb = v; > + tb_destroy_func tb_destroy = data; > + > + tb_destroy(tb); > + return FALSE; > +} > + > +static void tcg_region_tree_reset_all(tb_destroy_func tb_destroy) > { > size_t i; > > @@ -510,6 +519,10 @@ static void tcg_region_tree_reset_all(void) > for (i = 0; i < region.n; i++) { > struct tcg_region_tree *rt = region_trees + i * tree_size; > > + if (tb_destroy != NULL) { > + g_tree_foreach(rt->tree, tcg_region_tree_traverse, tb_destroy); > + } > + Isn't tb_destroy always set? We could assert that is the case rather than make the cleaning up conditional. > /* Increment the refcount first so that destroy acts as a reset */ > g_tree_ref(rt->tree); > g_tree_destroy(rt->tree); > @@ -586,7 +599,7 @@ static inline bool tcg_region_initial_alloc__locked(TCGContext *s) > } > > /* Call from a safe-work context */ > -void tcg_region_reset_all(void) > +void tcg_region_reset_all(tb_destroy_func tb_destroy) > { > unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); > unsigned int i; > @@ -603,7 +616,7 @@ void tcg_region_reset_all(void) > } > qemu_mutex_unlock(®ion.lock); > > - tcg_region_tree_reset_all(); > + tcg_region_tree_reset_all(tb_destroy); Could you name the variables of type tb_destroy_func differently as although the variable is only ever tb_destroy the function it gets confusing real quick when trying to grep for stuff. Maybe tbd_fn? That said given the single usage why a function pointer? Would we be just as well served by an exposed public function call from the appropriate places? Richard do you have a view here?
On Mon, 8 Jun 2020 at 10:44, Alex Bennée <alex.bennee@linaro.org> wrote: <snip> > > +static void tcg_region_tree_reset_all(tb_destroy_func tb_destroy) > > { > > size_t i; > > > > @@ -510,6 +519,10 @@ static void tcg_region_tree_reset_all(void) > > for (i = 0; i < region.n; i++) { > > struct tcg_region_tree *rt = region_trees + i * tree_size; > > > > + if (tb_destroy != NULL) { > > + g_tree_foreach(rt->tree, tcg_region_tree_traverse, tb_destroy); > > + } > > + > > Isn't tb_destroy always set? We could assert that is the case rather > than make the cleaning up conditional. I agree, tb_destroy seems to always be set, so the assert would be reasonable. > > > /* Increment the refcount first so that destroy acts as a reset */ > > g_tree_ref(rt->tree); > > g_tree_destroy(rt->tree); > > @@ -586,7 +599,7 @@ static inline bool tcg_region_initial_alloc__locked(TCGContext *s) > > } > > > > /* Call from a safe-work context */ > > -void tcg_region_reset_all(void) > > +void tcg_region_reset_all(tb_destroy_func tb_destroy) > > { > > unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); > > unsigned int i; > > @@ -603,7 +616,7 @@ void tcg_region_reset_all(void) > > } > > qemu_mutex_unlock(®ion.lock); > > > > - tcg_region_tree_reset_all(); > > + tcg_region_tree_reset_all(tb_destroy); > > Could you name the variables of type tb_destroy_func differently as > although the variable is only ever tb_destroy the function it gets > confusing real quick when trying to grep for stuff. Maybe tbd_fn? > > That said given the single usage why a function pointer? Would we be > just as well served by an exposed public function call from the > appropriate places? Good point. Unless we see an imminent need to pass different values, then it seems reasonable to just use the public function call and remove the need for the function pointer. Thanks & Regards, -Rob > > Richard do you have a view here? > > -- > Alex Bennée
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 42ce1dfcff..3708aab36b 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -384,6 +384,11 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, return 0; } +static void tb_destroy(TranslationBlock *tb) +{ + qemu_spin_destroy(&tb->jmp_lock); +} + bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) { TranslationBlock *tb; @@ -413,6 +418,7 @@ bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc, bool will_exit) /* one-shot translation, invalidate it immediately */ tb_phys_invalidate(tb, -1); tcg_tb_remove(tb); + tb_destroy(tb); } r = true; } @@ -1230,7 +1236,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE); page_flush_tb(); - tcg_region_reset_all(); + tcg_region_reset_all(tb_destroy); /* XXX: flush processor icache at this point if cache flush is expensive */ atomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1); @@ -1886,6 +1892,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, orig_aligned -= ROUND_UP(sizeof(*tb), qemu_icache_linesize); atomic_set(&tcg_ctx->code_gen_ptr, (void *)orig_aligned); + tb_destroy(tb); return existing_tb; } tcg_tb_insert(tb); @@ -2235,6 +2242,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) tb_phys_invalidate(tb->orig_tb, -1); } tcg_tb_remove(tb); + tb_destroy(tb); } /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 380014ed80..c8313fdcf0 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -818,8 +818,9 @@ void *tcg_malloc_internal(TCGContext *s, int size); void tcg_pool_reset(TCGContext *s); TranslationBlock *tcg_tb_alloc(TCGContext *s); +typedef void (*tb_destroy_func)(TranslationBlock *tb); void tcg_region_init(void); -void tcg_region_reset_all(void); +void tcg_region_reset_all(tb_destroy_func tb_destroy); size_t tcg_code_size(void); size_t tcg_code_capacity(void); diff --git a/tcg/tcg.c b/tcg/tcg.c index 1aa6cb47f2..7ae9dd7cf8 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -502,7 +502,16 @@ size_t tcg_nb_tbs(void) return nb_tbs; } -static void tcg_region_tree_reset_all(void) +static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data) +{ + TranslationBlock *tb = v; + tb_destroy_func tb_destroy = data; + + tb_destroy(tb); + return FALSE; +} + +static void tcg_region_tree_reset_all(tb_destroy_func tb_destroy) { size_t i; @@ -510,6 +519,10 @@ static void tcg_region_tree_reset_all(void) for (i = 0; i < region.n; i++) { struct tcg_region_tree *rt = region_trees + i * tree_size; + if (tb_destroy != NULL) { + g_tree_foreach(rt->tree, tcg_region_tree_traverse, tb_destroy); + } + /* Increment the refcount first so that destroy acts as a reset */ g_tree_ref(rt->tree); g_tree_destroy(rt->tree); @@ -586,7 +599,7 @@ static inline bool tcg_region_initial_alloc__locked(TCGContext *s) } /* Call from a safe-work context */ -void tcg_region_reset_all(void) +void tcg_region_reset_all(tb_destroy_func tb_destroy) { unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); unsigned int i; @@ -603,7 +616,7 @@ void tcg_region_reset_all(void) } qemu_mutex_unlock(®ion.lock); - tcg_region_tree_reset_all(); + tcg_region_tree_reset_all(tb_destroy); } #ifdef CONFIG_USER_ONLY