@@ -26,14 +26,14 @@ void tcg_flush_jmp_cache(CPUState *cpu)
{
}
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{
g_assert_not_reached();
}
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
/* Handled by hardware accelerator. */
@@ -1499,7 +1499,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
}
}
-static int probe_access_internal(CPUArchState *env, target_ulong addr,
+static int probe_access_internal(CPUArchState *env, vaddr addr,
int fault_size, MMUAccessType access_type,
int mmu_idx, bool nonfault,
void **phost, CPUTLBEntryFull **pfull,
@@ -1508,7 +1508,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
uintptr_t index = tlb_index(env, mmu_idx, addr);
CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
uint64_t tlb_addr = tlb_read_idx(entry, access_type);
- target_ulong page_addr = addr & TARGET_PAGE_MASK;
+ vaddr page_addr = addr & TARGET_PAGE_MASK;
int flags = TLB_FLAGS_MASK;
if (!tlb_hit_page(tlb_addr, page_addr)) {
@@ -1551,7 +1551,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
return flags;
}
-int probe_access_full(CPUArchState *env, target_ulong addr, int size,
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, CPUTLBEntryFull **pfull,
uintptr_t retaddr)
@@ -1568,7 +1568,7 @@ int probe_access_full(CPUArchState *env, target_ulong addr, int size,
return flags;
}
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr)
{
@@ -1589,7 +1589,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
return flags;
}
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
CPUTLBEntryFull *full;
@@ -1648,7 +1648,7 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
* NOTE: This function will trigger an exception if the page is
* not executable.
*/
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp)
{
CPUTLBEntryFull *full;
@@ -721,7 +721,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
return current_tb_invalidated ? 2 : 1;
}
-static int probe_access_internal(CPUArchState *env, target_ulong addr,
+static int probe_access_internal(CPUArchState *env, vaddr addr,
int fault_size, MMUAccessType access_type,
bool nonfault, uintptr_t ra)
{
@@ -759,7 +759,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
}
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra)
{
@@ -771,7 +771,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
return flags;
}
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t ra)
{
int flags;
@@ -783,7 +783,7 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size,
return size ? g2h(env_cpu(env), addr) : NULL;
}
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp)
{
int flags;
@@ -413,16 +413,16 @@ static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
* Finally, return the host address for a page that is backed by RAM,
* or NULL if the page requires I/O.
*/
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
-static inline void *probe_write(CPUArchState *env, target_ulong addr, int size,
+static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
}
-static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
+static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
int mmu_idx, uintptr_t retaddr)
{
return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
@@ -447,7 +447,7 @@ static inline void *probe_read(CPUArchState *env, target_ulong addr, int size,
* Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
* For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
*/
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t retaddr);
@@ -460,7 +460,7 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
* and must be consumed or copied immediately, before any further
* access or changes to TLB @mmu_idx.
*/
-int probe_access_full(CPUArchState *env, target_ulong addr, int size,
+int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost,
CPUTLBEntryFull **pfull, uintptr_t retaddr);
@@ -581,7 +581,7 @@ struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
*
* Note: this function can trigger an exception.
*/
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
void **hostp);
/**
@@ -596,7 +596,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
* Note: this function can trigger an exception.
*/
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
- target_ulong addr)
+ vaddr addr)
{
return get_page_addr_code_hostp(env, addr, NULL);
}