@@ -256,21 +256,46 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
hash = tb_jmp_cache_hash_func(pc);
jc = cpu->tb_jmp_cache;
- tb = tb_jmp_cache_get_tb(jc, cflags, hash);
-
- if (likely(tb &&
- tb_jmp_cache_get_pc(jc, hash, tb) == pc &&
- tb->cs_base == cs_base &&
- tb->flags == flags &&
- tb->trace_vcpu_dstate == *cpu->trace_dstate &&
- tb_cflags(tb) == cflags)) {
- return tb;
- }
- tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
- if (tb == NULL) {
- return NULL;
+
+ if (cflags & CF_PCREL) {
+ /* Use acquire to ensure current load of pc from jc. */
+ tb = qatomic_load_acquire(&jc->array[hash].tb);
+
+ if (likely(tb &&
+ jc->array[hash].pc == pc &&
+ tb->cs_base == cs_base &&
+ tb->flags == flags &&
+ tb->trace_vcpu_dstate == *cpu->trace_dstate &&
+ tb_cflags(tb) == cflags)) {
+ return tb;
+ }
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
+ if (tb == NULL) {
+ return NULL;
+ }
+ jc->array[hash].pc = pc;
+ /* Use store_release on tb to ensure pc is written first. */
+ qatomic_store_release(&jc->array[hash].tb, tb);
+ } else {
+ /* Use rcu_read to ensure current load of pc from *tb. */
+ tb = qatomic_rcu_read(&jc->array[hash].tb);
+
+ if (likely(tb &&
+ tb_pc(tb) == pc &&
+ tb->cs_base == cs_base &&
+ tb->flags == flags &&
+ tb->trace_vcpu_dstate == *cpu->trace_dstate &&
+ tb_cflags(tb) == cflags)) {
+ return tb;
+ }
+ tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
+ if (tb == NULL) {
+ return NULL;
+ }
+ /* Use the pc value already stored in tb->pc. */
+ qatomic_set(&jc->array[hash].tb, tb);
}
- tb_jmp_cache_set(jc, hash, tb, pc);
+
return tb;
}
@@ -959,7 +984,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
* for the fast lookup
*/
h = tb_jmp_cache_hash_func(pc);
- tb_jmp_cache_set(cpu->tb_jmp_cache, h, tb, pc);
+ /* Use the pc value already stored in tb->pc. */
+ qatomic_set(&cpu->tb_jmp_cache->array[h].tb, tb);
}
#ifndef CONFIG_USER_ONLY
@@ -25,40 +25,4 @@ struct CPUJumpCache {
} array[TB_JMP_CACHE_SIZE];
};
-static inline TranslationBlock *
-tb_jmp_cache_get_tb(CPUJumpCache *jc, uint32_t cflags, uint32_t hash)
-{
- if (cflags & CF_PCREL) {
- /* Use acquire to ensure current load of pc from jc. */
- return qatomic_load_acquire(&jc->array[hash].tb);
- } else {
- /* Use rcu_read to ensure current load of pc from *tb. */
- return qatomic_rcu_read(&jc->array[hash].tb);
- }
-}
-
-static inline target_ulong
-tb_jmp_cache_get_pc(CPUJumpCache *jc, uint32_t hash, TranslationBlock *tb)
-{
- if (tb_cflags(tb) & CF_PCREL) {
- return jc->array[hash].pc;
- } else {
- return tb_pc(tb);
- }
-}
-
-static inline void
-tb_jmp_cache_set(CPUJumpCache *jc, uint32_t hash,
- TranslationBlock *tb, target_ulong pc)
-{
- if (tb_cflags(tb) & CF_PCREL) {
- jc->array[hash].pc = pc;
- /* Use store_release on tb to ensure pc is written first. */
- qatomic_store_release(&jc->array[hash].tb, tb);
- } else{
- /* Use the pc value already stored in tb->pc. */
- qatomic_set(&jc->array[hash].tb, tb);
- }
-}
-
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
tb-jmp-cache.h contains a few small functions that only exist to hide a CF_PCREL check, however the caller often already performs such a check. This patch moves CF_PCREL checks from the callee to the caller, and also removes these functions which now only hide an access of the jmp-cache. Signed-off-by: Anton Johansson <anjo@rev.ng> --- accel/tcg/cpu-exec.c | 56 +++++++++++++++++++++++++++++----------- accel/tcg/tb-jmp-cache.h | 36 -------------------------- 2 files changed, 41 insertions(+), 51 deletions(-)