@@ -73,7 +73,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@@ -90,7 +91,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
DATA_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, val);
@@ -104,7 +106,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
- haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
+ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, val); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@@ -135,7 +137,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
- haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
+ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
cmp = qatomic_read__nocheck(haddr); \
do { \
@@ -176,7 +178,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE cmpv, ABI_TYPE newv,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
DATA_TYPE ret;
#if DATA_SIZE == 16
@@ -193,7 +196,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
- DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
ABI_TYPE ret;
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
@@ -207,7 +211,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
{ \
DATA_TYPE *haddr, ret; \
- haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
+ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
ret = qatomic_##X(haddr, BSWAP(val)); \
ATOMIC_MMU_CLEANUP; \
atomic_trace_rmw_post(env, addr, oi); \
@@ -235,7 +239,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
{ \
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
- haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
+ haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
smp_mb(); \
ldn = qatomic_read__nocheck(haddr); \
do { \
@@ -1936,7 +1936,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* Probe for an atomic operation. Do not allow unaligned operations,
* or io operations to proceed. Return the host address.
*/
-static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
uintptr_t mmu_idx = get_mmuidx(oi);
@@ -1956,7 +1956,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
/* Enforce guest required alignment. */
if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
- cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
+ cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
@@ -1969,18 +1969,18 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
goto stop_the_world;
}
- index = tlb_index(env_cpu(env), mmu_idx, addr);
- tlbe = tlb_entry(env_cpu(env), mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ tlbe = tlb_entry(cpu, mmu_idx, addr);
/* Check TLB entry and enforce page permissions. */
tlb_addr = tlb_addr_write(tlbe);
if (!tlb_hit(tlb_addr, addr)) {
- if (!victim_tlb_hit(env_cpu(env), mmu_idx, index, MMU_DATA_STORE,
+ if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
addr & TARGET_PAGE_MASK)) {
- tlb_fill(env_cpu(env), addr, size,
+ tlb_fill(cpu, addr, size,
MMU_DATA_STORE, mmu_idx, retaddr);
- index = tlb_index(env_cpu(env), mmu_idx, addr);
- tlbe = tlb_entry(env_cpu(env), mmu_idx, addr);
+ index = tlb_index(cpu, mmu_idx, addr);
+ tlbe = tlb_entry(cpu, mmu_idx, addr);
}
tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
}
@@ -1992,7 +1992,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
* but addr_read will only be -1 if PAGE_READ was unset.
*/
if (unlikely(tlbe->addr_read == -1)) {
- tlb_fill(env_cpu(env), addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
+ tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
/*
* Since we don't support reads and writes to different
* addresses, and we do have the proper page loaded for
@@ -2012,10 +2012,10 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
}
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
- full = &env_tlb(env)->d[mmu_idx].fulltlb[index];
+ full = &cpu_tlb(cpu)->d[mmu_idx].fulltlb[index];
if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
- notdirty_write(env_cpu(env), addr, size, full, retaddr);
+ notdirty_write(cpu, addr, size, full, retaddr);
}
if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
@@ -2028,7 +2028,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
wp_flags |= BP_MEM_READ;
}
if (wp_flags) {
- cpu_check_watchpoint(env_cpu(env), addr, size,
+ cpu_check_watchpoint(cpu, addr, size,
full->attrs, wp_flags, retaddr);
}
}
@@ -2036,7 +2036,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
return hostaddr;
stop_the_world:
- cpu_loop_exit_atomic(env_cpu(env), retaddr);
+ cpu_loop_exit_atomic(cpu, retaddr);
}
/*
@@ -1386,7 +1386,7 @@ uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
/*
* Do not allow unaligned operations to proceed. Return the host address.
*/
-static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
+static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
int size, uintptr_t retaddr)
{
MemOp mop = get_memop(oi);
@@ -1395,15 +1395,15 @@ static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
/* Enforce guest required alignment. */
if (unlikely(addr & ((1 << a_bits) - 1))) {
- cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr);
+ cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr);
}
/* Enforce qemu required alignment. */
if (unlikely(addr & (size - 1))) {
- cpu_loop_exit_atomic(env_cpu(env), retaddr);
+ cpu_loop_exit_atomic(cpu, retaddr);
}
- ret = g2h(env_cpu(env), addr);
+ ret = g2h(cpu, addr);
set_helper_retaddr(retaddr);
return ret;
}
The goal is to (in the future) allow for per-target compilation of functions in atomic_template.h whilst atomic_mmu_lookup() and cputlb.c are compiled once-per user- or system mode. Signed-off-by: Anton Johansson <anjo@rev.ng> --- accel/tcg/atomic_template.h | 20 ++++++++++++-------- accel/tcg/cputlb.c | 26 +++++++++++++------------- accel/tcg/user-exec.c | 8 ++++---- 3 files changed, 29 insertions(+), 25 deletions(-) -- 2.41.0