@@ -313,6 +313,8 @@ static TBStatistics *tb_get_stats(tb_page_addr_t phys_pc, target_ulong pc,
new_stats->cs_base = cs_base;
new_stats->flags = flags;
new_stats->stats_enabled = get_default_tbstats_flag();
+ new_stats->tbs = g_ptr_array_sized_new(4);
+ qemu_mutex_init(&new_stats->jit_stats_lock);
/*
* All initialisation must be complete before we insert into qht
@@ -326,6 +328,7 @@ static TBStatistics *tb_get_stats(tb_page_addr_t phys_pc, target_ulong pc,
* If there is already a TBStatistic for this TB from a previous flush
* then just make the new TB point to the older TBStatistic
*/
+ g_ptr_array_free(new_stats->tbs, true);
g_free(new_stats);
return existing_stats;
} else {
@@ -344,9 +347,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb_page_addr_t phys_pc;
tcg_insn_unit *gen_code_buf;
int gen_code_size, search_size, max_insns;
-#ifdef CONFIG_PROFILER
TCGProfile *prof = &tcg_ctx->prof;
-#endif
int64_t ti;
void *host_pc;
@@ -582,6 +583,30 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
return tb;
}
+ /*
+ * Collect JIT stats when enabled. We batch them all up here to
+ * avoid spamming the cache with atomic accesses
+ */
+ if (tb_stats_enabled(tb, TB_JIT_STATS)) {
+ TBStatistics *ts = tb->tb_stats;
+ qemu_mutex_lock(&ts->jit_stats_lock);
+
+ ts->code.num_guest_inst += prof->translation.nb_guest_insns;
+ ts->code.num_tcg_ops += prof->translation.nb_ops_pre_opt;
+ ts->code.num_tcg_ops_opt += tcg_ctx->nb_ops;
+ ts->code.spills += prof->translation.nb_spills;
+ ts->code.out_len += tb->tc.size;
+
+ ts->translations.total++;
+ if (tb_page_addr1(tb) != -1) {
+ ts->translations.spanning++;
+ }
+
+ g_ptr_array_add(ts->tbs, tb);
+
+ qemu_mutex_unlock(&ts->jit_stats_lock);
+ }
+
/*
* Insert TB into the corresponding region tree before publishing it
* through QHT. Otherwise rewinding happened in the TB might fail to
@@ -132,6 +132,9 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
tb->size = db->pc_next - db->pc_first;
tb->icount = db->num_insns;
+ /* Save number of guest instructions for TB_JIT_STATS */
+ tcg_ctx->prof.translation.nb_guest_insns = db->num_insns;
+
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
&& qemu_log_in_addr_range(db->pc_first)) {
@@ -13,6 +13,7 @@
#define TB_NOTHING (1 << 0)
#define TB_EXEC_STATS (1 << 1)
+#define TB_JIT_STATS (1 << 2)
/* TBStatistic collection controls */
void enable_collect_tb_stats(void);
@@ -60,6 +60,29 @@ struct TBStatistics {
unsigned long atomic;
} executions;
+ /* JIT Stats - protected by lock */
+ QemuMutex jit_stats_lock;
+
+ /* Sum of all operations for all translations */
+ struct {
+ unsigned num_guest_inst;
+ unsigned num_tcg_ops;
+ unsigned num_tcg_ops_opt;
+ unsigned spills;
+ unsigned out_len;
+ } code;
+
+ struct {
+ unsigned long total;
+ unsigned long uncached;
+ unsigned long spanning;
+ } translations;
+
+ /*
+ * All persistent (cached) TranslationBlocks using
+ * this TBStats structure. Has to be reset on a tb_flush.
+ */
+ GPtrArray *tbs;
};
bool tb_stats_cmp(const void *ap, const void *bp);
@@ -529,7 +529,26 @@ static inline TCGRegSet output_pref(const TCGOp *op, unsigned i)
return i < ARRAY_SIZE(op->output_pref) ? op->output_pref[i] : 0;
}
+/*
+ * The TCGProfile structure holds data for analysing the quality of
+ * the code generation. The data is split between stuff that is valid
+ * for the lifetime of a single translation and things that are valid
+ * for the lifetime of the translator. As the former is reset for each
+ * new translation so it should be copied elsewhere if you want to
+ * keep it.
+ *
+ * The structure is safe to access within the context of translation
+ * but accessing the data from elsewhere should be done with safe
+ * work.
+ */
typedef struct TCGProfile {
+
+ struct {
+ int nb_guest_insns;
+ int nb_spills;
+ int nb_ops_pre_opt;
+ } translation;
+
int64_t cpu_exec_time;
int64_t tb_count1;
int64_t tb_count;
@@ -576,9 +595,7 @@ struct TCGContext {
tcg_insn_unit *code_buf; /* pointer for start of tb */
tcg_insn_unit *code_ptr; /* pointer for running end of tb */
-#ifdef CONFIG_PROFILER
TCGProfile prof;
-#endif
#ifdef CONFIG_DEBUG_TCG
int goto_tb_issue_mask;
@@ -640,6 +657,7 @@ struct TCGContext {
/* Exit to translator on overflow. */
sigjmp_buf jmp_trans;
+ TranslationBlock *current_tb;
};
static inline bool temp_readonly(TCGTemp *ts)
@@ -1516,6 +1516,7 @@ void tcg_func_start(TCGContext *s)
s->nb_labels = 0;
s->current_frame_offset = s->frame_start;
+ s->prof.translation.nb_spills = 0;
#ifdef CONFIG_DEBUG_TCG
s->goto_tb_issue_mask = 0;
#endif
@@ -4157,6 +4158,7 @@ static TCGReg tcg_reg_alloc(TCGContext *s, TCGRegSet required_regs,
}
/* We must spill something. */
+ s->prof.translation.nb_spills++;
for (j = f; j < 2; j++) {
TCGRegSet set = reg_ct[j];
@@ -5972,12 +5974,12 @@ int64_t tcg_cpu_exec_time(void)
int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
{
-#ifdef CONFIG_PROFILER
TCGProfile *prof = &s->prof;
-#endif
int i, num_insns;
TCGOp *op;
+ s->current_tb = tb;
+
#ifdef CONFIG_PROFILER
{
int n = 0;
@@ -6011,6 +6013,9 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb, uint64_t pc_start)
}
#endif
+ /* save pre-optimisation op count */
+ prof->translation.nb_ops_pre_opt = s->nb_ops;
+
#ifdef CONFIG_DEBUG_TCG
/* Ensure all labels referenced have been emitted. */
{