@@ -73,7 +73,7 @@
#define INIT_DIR_SIZE (PAGE_SIZE * (EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR) + EARLY_SEGMENT_EXTRA_PAGES))
/* the initial ID map may need two extra pages if it needs to be extended */
-#if VA_BITS < 48
+#if VA_BITS_MIN < 48
#define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + 2) * PAGE_SIZE)
#else
#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
@@ -266,40 +266,40 @@ SYM_FUNC_START_LOCAL(create_idmap)
* space for ordinary kernel and user space mappings.
*
* There are three cases to consider here:
- * - 39 <= VA_BITS < 48, and the ID map needs up to 48 VA bits to cover
- * the placement of the image. In this case, we configure one extra
- * level of translation on the fly for the ID map only. (This case
- * also covers 42-bit VA/52-bit PA on 64k pages).
+ * - 39 <= VA_BITS_MIN < 48, and the ID map needs up to 48 VA bits to
+ * cover the placement of the image. In this case, we configure one
+ * extra level of translation on the fly for the ID map only. (This
+ * case also covers 42-bit VA/52-bit PA on 64k pages).
*
- * - VA_BITS == 48, and the ID map needs more than 48 VA bits. This can
- * only happen when using 64k pages, in which case we need to extend
- * the root level table rather than add a level. Note that we can
- * treat this case as 'always extended' as long as we take care not
- * to program an unsupported T0SZ value into the TCR register.
+ * - VA_BITS_MIN == 48, and the ID map needs more than 48 VA bits. This
+ * can only happen when using 64k pages, in which case we need to
+ * extend the root level table rather than add a level. Note that we
+ * can treat this case as 'always extended' as long as we take care
+ * not to program an unsupported T0SZ value into the TCR register.
*
* - Combinations that would require two additional levels of
* translation are not supported, e.g., VA_BITS==36 on 16k pages, or
* VA_BITS==39/4k pages with 5-level paging, where the input address
* requires more than 47 or 48 bits, respectively.
*/
-#if (VA_BITS < 48)
-#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+#if VA_BITS_MIN < 48
+#define EXTRA_SHIFT VA_BITS_MIN
/*
- * If VA_BITS < 48, we have to configure an additional table level.
- * First, we have to verify our assumption that the current value of
- * VA_BITS was chosen such that all translation levels are fully
- * utilised, and that lowering T0SZ will always result in an additional
- * translation level to be configured.
+ * If VA_BITS_MIN < 48, we may have to configure an additional table
+ * level. First, we have to verify our assumption that the current
+ * value of VA_BITS_MIN was chosen such that all translation levels are
+ * fully utilised, and that lowering T0SZ will always result in an
+ * additional translation level to be configured.
*/
-#if VA_BITS != EXTRA_SHIFT
-#error "Mismatch between VA_BITS and page size/number of translation levels"
+#if ((VA_BITS_MIN - PAGE_SHIFT) % (PAGE_SHIFT - 3)) != 0
+#error "Mismatch between VA_BITS_MIN and page size/number of translation levels"
#endif
#else
#define EXTRA_SHIFT
/*
- * If VA_BITS == 48, we don't have to configure an additional
- * translation level, but the top-level table has more entries.
+ * If VA_BITS_MIN == 48, we don't have to configure an additional
+ * translation level, but the top-level table may have more entries.
*/
#endif
adrp x0, init_idmap_pg_dir
@@ -702,9 +702,9 @@ static void __init create_idmap(void)
u64 pgd_phys;
/* check if we need an additional level of translation */
- if (VA_BITS < 48 && idmap_t0sz < (64 - VA_BITS_MIN)) {
+ if (vabits_actual < 48 && idmap_t0sz < (64 - VA_BITS_MIN)) {
pgd_phys = early_pgtable_alloc(PAGE_SHIFT);
- set_pgd(&idmap_pg_dir[start >> VA_BITS],
+ set_pgd(&idmap_pg_dir[start >> VA_BITS_MIN],
__pgd(pgd_phys | P4D_TYPE_TABLE));
pgd = __va(pgd_phys);
}
@@ -430,10 +430,11 @@ SYM_FUNC_START(__cpu_setup)
tcr_clear_errata_bits tcr, x9, x5
-#if VA_BITS < 48
+#if VA_BITS_MIN < 48
idmap_get_t0sz x9
tcr_set_t0sz tcr, x9
-#elif VA_BITS > VA_BITS_MIN
+#endif
+#if VA_BITS > VA_BITS_MIN
mov x9, #64 - VA_BITS
alternative_if ARM64_HAS_LVA
tcr_set_t1sz tcr, x9
On 16k pages with LPA2, we will fall back to 47 bits of VA space in case LPA2 is not implemented. Since we support loading the kernel anywhere in the 48-bit addressable PA space, this means we may have to extend the ID map like we normally do in such cases, even when VA_BITS >= 48. Since VA_BITS_MIN will equal 47 in that case, use that symbolic constant instead to determine whether ID map extension is required. Also, use vabits_actual to determine whether create_idmap() needs to add an extra level as well, as this is never needed if LPA2 is enabled at runtime. Note that VA_BITS, VA_BITS_MIN and vabits_actual all collapse into the same compile time constant on configurations that support ID map extension currently, so there this change should have no effect whatsoever. Note that the use of PGDIR_SHIFT in the calculation of EXTRA_SHIFT is no longer appropriate, as it is derived from VA_BITS not VA_BITS_MIN. So rephrase the check whether VA_BITS_MIN is consistent with the number of levels. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/include/asm/kernel-pgtable.h | 2 +- arch/arm64/kernel/head.S | 40 ++++++++++---------- arch/arm64/mm/mmu.c | 4 +- arch/arm64/mm/proc.S | 5 ++- 4 files changed, 26 insertions(+), 25 deletions(-)