@@ -148,14 +148,6 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb,
}
#else
-/*
- * In system mode we want L1_MAP to be based on ram offsets.
- */
-#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
-# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
-#else
-# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
-#endif
/* Size of the L2 (and L3, etc) page tables. */
#define V_L2_BITS 10
@@ -186,17 +178,28 @@ struct PageDesc {
void page_table_config_init(void)
{
+ int target_phys_addr_bits = target_phys_addr_space_bits();
+ uint32_t l1_map_addr_space_bits;
uint32_t v_l1_bits;
+ /*
+ * In system mode we want L1_MAP to be based on ram offsets.
+ */
+ if (HOST_LONG_BITS < target_phys_addr_bits) {
+ l1_map_addr_space_bits = HOST_LONG_BITS;
+ } else {
+ l1_map_addr_space_bits = target_phys_addr_bits;
+ }
+
assert(TARGET_PAGE_BITS);
/* The bits remaining after N lower levels of page tables. */
- v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
+ v_l1_bits = (l1_map_addr_space_bits - TARGET_PAGE_BITS) % V_L2_BITS;
if (v_l1_bits < V_L1_MIN_BITS) {
v_l1_bits += V_L2_BITS;
}
v_l1_size = 1 << v_l1_bits;
- v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
+ v_l1_shift = l1_map_addr_space_bits - TARGET_PAGE_BITS - v_l1_bits;
v_l2_levels = v_l1_shift / V_L2_BITS - 1;
assert(v_l1_bits <= V_L1_MAX_BITS);
@@ -1045,14 +1048,15 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
TranslationBlock *tb;
PageForEachNext n;
tb_page_addr_t last;
+ const bool has_precise_smc = target_has_precise_smc();
/*
* Without precise smc semantics, or when outside of a TB,
* we can skip to invalidate.
*/
-#ifndef TARGET_HAS_PRECISE_SMC
- pc = 0;
-#endif
+ if (!has_precise_smc) {
+ pc = 0;
+ }
if (!pc) {
tb_invalidate_phys_page(addr);
return false;
@@ -1102,10 +1106,13 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
{
TranslationBlock *tb;
PageForEachNext n;
-#ifdef TARGET_HAS_PRECISE_SMC
+ const bool has_precise_smc = target_has_precise_smc();
bool current_tb_modified = false;
- TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
-#endif /* TARGET_HAS_PRECISE_SMC */
+ TranslationBlock *current_tb = NULL;
+
+ if (has_precise_smc && retaddr) {
+ current_tb = tcg_tb_lookup(retaddr);
+ }
/* Range may not cross a page. */
tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
@@ -1127,8 +1134,7 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tb_last = tb_start + (tb_last & ~TARGET_PAGE_MASK);
}
if (!(tb_last < start || tb_start > last)) {
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb == tb &&
+ if (has_precise_smc && current_tb == tb &&
(tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
/*
* If we are modifying the current TB, we must stop
@@ -1140,7 +1146,6 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
current_tb_modified = true;
cpu_restore_state_from_tb(current_cpu, current_tb, retaddr);
}
-#endif /* TARGET_HAS_PRECISE_SMC */
tb_phys_invalidate__locked(tb);
}
}
@@ -1150,15 +1155,13 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
tlb_unprotect_code(start);
}
-#ifdef TARGET_HAS_PRECISE_SMC
- if (current_tb_modified) {
+ if (has_precise_smc && current_tb_modified) {
page_collection_unlock(pages);
/* Force execution of one insn next time. */
current_cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu);
mmap_unlock();
cpu_loop_exit_noexc(current_cpu);
}
-#endif
}
/*
Uses target_has_precise_smc() and target_phys_addr_space_bits() to turn ifdefs into runtime branches. Signed-off-by: Anton Johansson <anjo@rev.ng> --- accel/tcg/tb-maint.c | 47 +++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 22 deletions(-)