@@ -99,7 +99,7 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
desc->window_max_entries = max_entries;
}
-static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
+static void tb_jmp_cache_clear_page(CPUState *cpu, vaddr page_addr)
{
CPUJumpCache *jc = cpu->tb_jmp_cache;
int i, i0;
@@ -35,16 +35,16 @@
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
-static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
+static inline unsigned int tb_jmp_cache_hash_page(vaddr pc)
{
- target_ulong tmp;
+ vaddr tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
}
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
{
- target_ulong tmp;
+ vaddr tmp;
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
| (tmp & TB_JMP_ADDR_MASK));
@@ -53,7 +53,7 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
#else
/* In user-mode we can get better hashing because we do not have a TLB */
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
{
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
}
@@ -61,7 +61,7 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
#endif /* CONFIG_SOFTMMU */
static inline
-uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc,
+uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc,
uint32_t flags, uint64_t flags2, uint32_t cf_mask)
{
return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask);
@@ -21,7 +21,7 @@ struct CPUJumpCache {
struct rcu_head rcu;
struct {
TranslationBlock *tb;
- target_ulong pc;
+ vaddr pc;
} array[TB_JMP_CACHE_SIZE];
};