@@ -280,6 +280,7 @@ struct tb_desc {
CPUArchState *env;
tb_page_addr_t phys_page1;
uint32_t flags;
+ uint32_t trace_vcpu_dstate;
};
static bool tb_cmp(const void *p, const void *d)
@@ -291,6 +292,7 @@ static bool tb_cmp(const void *p, const void *d)
tb->page_addr[0] == desc->phys_page1 &&
tb->cs_base == desc->cs_base &&
tb->flags == desc->flags &&
+ tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
!atomic_read(&tb->invalid)) {
/* check next page if needed */
if (tb->page_addr[1] == -1) {
@@ -319,10 +321,11 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
desc.env = (CPUArchState *)cpu->env_ptr;
desc.cs_base = cs_base;
desc.flags = flags;
+ desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
- h = tb_hash_func(phys_pc, pc, flags);
+ h = tb_hash_func(phys_pc, pc, flags, *cpu->trace_dstate);
return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h);
}
@@ -342,7 +345,8 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]);
if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
- tb->flags != flags)) {
+ tb->flags != flags ||
+ tb->trace_vcpu_dstate != *cpu->trace_dstate)) {
tb = tb_htable_lookup(cpu, pc, cs_base, flags);
if (!tb) {
@@ -54,6 +54,7 @@
#include "exec/tb-hash.h"
#include "translate-all.h"
#include "qemu/bitmap.h"
+#include "qemu/error-report.h"
#include "qemu/timer.h"
#include "qemu/main-loop.h"
#include "exec/log.h"
@@ -112,6 +113,11 @@ typedef struct PageDesc {
#define V_L2_BITS 10
#define V_L2_SIZE (1 << V_L2_BITS)
+/* Make sure all possible CPU event bits fit in tb->trace_ds */
+QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
+ sizeof(((TranslationBlock *)0)->trace_vcpu_dstate)
+ * BITS_PER_BYTE);
+
uintptr_t qemu_host_page_size;
intptr_t qemu_host_page_mask;
@@ -1102,7 +1108,7 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
/* remove the TB from the hash list */
phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
- h = tb_hash_func(phys_pc, tb->pc, tb->flags);
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
qht_remove(&tcg_ctx.tb_ctx.htable, tb, h);
/* remove the TB from the page list */
@@ -1247,7 +1253,7 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
}
/* add in the hash table */
- h = tb_hash_func(phys_pc, tb->pc, tb->flags);
+ h = tb_hash_func(phys_pc, tb->pc, tb->flags, tb->trace_vcpu_dstate);
qht_insert(&tcg_ctx.tb_ctx.htable, tb, h);
#ifdef DEBUG_TB_CHECK
@@ -1293,6 +1299,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
tb->cs_base = cs_base;
tb->flags = flags;
tb->cflags = cflags;
+ tb->trace_vcpu_dstate = *cpu->trace_dstate;
tb->invalid = false;
#ifdef CONFIG_PROFILER
@@ -310,6 +310,10 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
#define USE_DIRECT_JUMP
#endif
+/**
+ * TranslationBlock:
+ * @trace_vcpu_dstate: Per-vCPU dynamic tracing state used to generate this TB.
+ */
struct TranslationBlock {
target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
target_ulong cs_base; /* CS base for this block */
@@ -324,6 +328,8 @@ struct TranslationBlock {
#define CF_USE_ICOUNT 0x20000
#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
+ uint32_t trace_vcpu_dstate;
+
uint16_t invalid;
void *tc_ptr; /* pointer to the translated code */
@@ -49,7 +49,7 @@
* contiguous in memory.
*/
static inline
-uint32_t tb_hash_func5(uint64_t a0, uint64_t b0, uint32_t e)
+uint32_t tb_hash_func6(uint64_t a0, uint64_t b0, uint32_t e, uint32_t f)
{
uint32_t v1 = TB_HASH_XX_SEED + PRIME32_1 + PRIME32_2;
uint32_t v2 = TB_HASH_XX_SEED + PRIME32_2;
@@ -78,11 +78,14 @@ uint32_t tb_hash_func5(uint64_t a0, uint64_t b0, uint32_t e)
v4 *= PRIME32_1;
h32 = rol32(v1, 1) + rol32(v2, 7) + rol32(v3, 12) + rol32(v4, 18);
- h32 += 20;
+ h32 += 24;
h32 += e * PRIME32_3;
h32 = rol32(h32, 17) * PRIME32_4;
+ h32 += f * PRIME32_3;
+ h32 = rol32(h32, 17) * PRIME32_4;
+
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
@@ -58,9 +58,10 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
#endif /* CONFIG_SOFTMMU */
static inline
-uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags)
+uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
+ uint32_t trace_vcpu_dstate)
{
- return tb_hash_func5(phys_pc, pc, flags);
+ return tb_hash_func6(phys_pc, pc, flags, trace_vcpu_dstate);
}
#endif
@@ -158,7 +158,8 @@ void *HELPER(lookup_tb_ptr)(CPUArchState *env, target_ulong addr)
if (unlikely(!(tb
&& tb->pc == addr
&& tb->cs_base == cs_base
- && tb->flags == flags))) {
+ && tb->flags == flags
+ && tb->trace_vcpu_dstate == *cpu->trace_dstate))) {
tb = tb_htable_lookup(cpu, addr, cs_base, flags);
if (!tb) {
return tcg_ctx.code_gen_epilogue;
@@ -103,7 +103,7 @@ static bool is_equal(const void *obj, const void *userp)
static inline uint32_t h(unsigned long v)
{
- return tb_hash_func5(v, 0, 0);
+ return tb_hash_func6(v, 0, 0, 0);
}
/*
@@ -62,6 +62,7 @@ static void trace_event_synchronize_vcpu_state_dynamic(
{
bitmap_copy(vcpu->trace_dstate, vcpu->trace_dstate_delayed,
trace_get_vcpu_event_count());
+ tb_flush_jmp_cache_all(vcpu);
}
void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
@@ -165,6 +165,9 @@ void trace_event_set_state_dynamic(TraceEvent *ev, bool state);
* Set the dynamic tracing state of an event for the given vCPU.
*
* Pre-condition: trace_event_get_vcpu_state_static(ev) == true
+ *
+ * Note: Changes for execution-time events with the 'tcg' property will not be
+ * propagated until the next TB is executed (iff executing in TCG mode).
*/
void trace_event_set_vcpu_state_dynamic(CPUState *vcpu,
TraceEvent *ev, bool state);