diff mbox series

[v2,06/19] arm64: head: remove order argument from early mapping routine

Message ID 20221124123932.2648991-7-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: Enable LPA2 support for 4k and 16k pages | expand

Commit Message

Ard Biesheuvel Nov. 24, 2022, 12:39 p.m. UTC
When creating mappings in the upper region of the address space, it is
important to know the order of the table being created, i.e., the number
of bits that are being translated at the level in question. Bits beyond
that number do not contribute to the virtual address, and need to be
masked out.

Now that we no longer use the asm kernel page creation code for mappings
in the upper region, those bits are guaranteed to be zero anyway, so we
don't have to account for them in the masking.

This means we can simply use the maximum order for the all tables
including the root level table. Doing so will also allow us to
transparently use the same routines creating the initial ID map covering
4 levels when the VA space is configured for 5.

Note that the root level tables are always statically allocated as full
pages regardless of how many VA bits they translate.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/kernel/head.S | 26 +++++++++-----------
 1 file changed, 11 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 3b3c5e8e84af..a37525a5ee34 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -158,7 +158,6 @@  SYM_CODE_END(preserve_boot_args)
  *	vstart:	virtual address of start of range
  *	vend:	virtual address of end of range - we map [vstart, vend]
  *	shift:	shift used to transform virtual address into index
- *	order:  #imm 2log(number of entries in page table)
  *	istart:	index in table corresponding to vstart
  *	iend:	index in table corresponding to vend
  *	count:	On entry: how many extra entries were required in previous level, scales
@@ -168,10 +167,10 @@  SYM_CODE_END(preserve_boot_args)
  * Preserves:	vstart, vend
  * Returns:	istart, iend, count
  */
-	.macro compute_indices, vstart, vend, shift, order, istart, iend, count
-	ubfx	\istart, \vstart, \shift, \order
-	ubfx	\iend, \vend, \shift, \order
-	add	\iend, \iend, \count, lsl \order
+	.macro compute_indices, vstart, vend, shift, istart, iend, count
+	ubfx	\istart, \vstart, \shift, #PAGE_SHIFT - 3
+	ubfx	\iend, \vend, \shift, #PAGE_SHIFT - 3
+	add	\iend, \iend, \count, lsl #PAGE_SHIFT - 3
 	sub	\count, \iend, \istart
 	.endm
 
@@ -186,7 +185,6 @@  SYM_CODE_END(preserve_boot_args)
  *	vend:	virtual address of end of range - we map [vstart, vend - 1]
  *	flags:	flags to use to map last level entries
  *	phys:	physical address corresponding to vstart - physical memory is contiguous
- *	order:  #imm 2log(number of entries in PGD table)
  *
  * If extra_shift is set, an extra level will be populated if the end address does
  * not fit in 'extra_shift' bits. This assumes vend is in the TTBR0 range.
@@ -195,7 +193,7 @@  SYM_CODE_END(preserve_boot_args)
  * Preserves:	vstart, flags
  * Corrupts:	tbl, rtbl, vend, istart, iend, tmp, count, sv
  */
-	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, order, istart, iend, tmp, count, sv, extra_shift
+	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, istart, iend, tmp, count, sv, extra_shift
 	sub \vend, \vend, #1
 	add \rtbl, \tbl, #PAGE_SIZE
 	mov \count, #0
@@ -203,32 +201,32 @@  SYM_CODE_END(preserve_boot_args)
 	.ifnb	\extra_shift
 	tst	\vend, #~((1 << (\extra_shift)) - 1)
 	b.eq	.L_\@
-	compute_indices \vstart, \vend, #\extra_shift, #(PAGE_SHIFT - 3), \istart, \iend, \count
+	compute_indices \vstart, \vend, #\extra_shift, \istart, \iend, \count
 	mov \sv, \rtbl
 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
 	mov \tbl, \sv
 	.endif
 .L_\@:
-	compute_indices \vstart, \vend, #PGDIR_SHIFT, #\order, \istart, \iend, \count
+	compute_indices \vstart, \vend, #PGDIR_SHIFT, \istart, \iend, \count
 	mov \sv, \rtbl
 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
 	mov \tbl, \sv
 
 #if INIT_IDMAP_TABLE_LEVELS > 3
-	compute_indices \vstart, \vend, #PUD_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
+	compute_indices \vstart, \vend, #PUD_SHIFT, \istart, \iend, \count
 	mov \sv, \rtbl
 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
 	mov \tbl, \sv
 #endif
 
 #if INIT_IDMAP_TABLE_LEVELS > 2
-	compute_indices \vstart, \vend, #INIT_IDMAP_TABLE_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
+	compute_indices \vstart, \vend, #INIT_IDMAP_TABLE_SHIFT, \istart, \iend, \count
 	mov \sv, \rtbl
 	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
 	mov \tbl, \sv
 #endif
 
-	compute_indices \vstart, \vend, #INIT_IDMAP_BLOCK_SHIFT, #(PAGE_SHIFT - 3), \istart, \iend, \count
+	compute_indices \vstart, \vend, #INIT_IDMAP_BLOCK_SHIFT, \istart, \iend, \count
 	bic \rtbl, \phys, #INIT_IDMAP_BLOCK_SIZE - 1
 	populate_entries \tbl, \rtbl, \istart, \iend, \flags, #INIT_IDMAP_BLOCK_SIZE, \tmp
 	.endm
@@ -294,7 +292,6 @@  SYM_FUNC_START_LOCAL(create_idmap)
 	 *   requires more than 47 or 48 bits, respectively.
 	 */
 #if (VA_BITS < 48)
-#define IDMAP_PGD_ORDER	(VA_BITS - PGDIR_SHIFT)
 #define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
 
 	/*
@@ -308,7 +305,6 @@  SYM_FUNC_START_LOCAL(create_idmap)
 #error "Mismatch between VA_BITS and page size/number of translation levels"
 #endif
 #else
-#define IDMAP_PGD_ORDER	(PHYS_MASK_SHIFT - PGDIR_SHIFT)
 #define EXTRA_SHIFT
 	/*
 	 * If VA_BITS == 48, we don't have to configure an additional
@@ -320,7 +316,7 @@  SYM_FUNC_START_LOCAL(create_idmap)
 	adrp	x6, _end + MAX_FDT_SIZE + INIT_IDMAP_BLOCK_SIZE
 	mov	x7, INIT_IDMAP_RX_MMUFLAGS
 
-	map_memory x0, x1, x3, x6, x7, x3, IDMAP_PGD_ORDER, x10, x11, x12, x13, x14, EXTRA_SHIFT
+	map_memory x0, x1, x3, x6, x7, x3, x10, x11, x12, x13, x14, EXTRA_SHIFT
 
 	/* Remap BSS and the kernel page tables r/w in the ID map */
 	adrp	x1, _text