diff mbox series

[PATCHv2,10/10] arm64/head: convert idmap_pg_dir and init_pg_dir to __create_pgd_mapping()

Message ID 20210425141304.32721-11-kernelfans@gmail.com (mailing list archive)
State New, archived
Headers show
Series use __create_pgd_mapping() to implement idmap and unify codes | expand

Commit Message

Pingfan Liu April 25, 2021, 2:13 p.m. UTC
Now, everything is ready for calling __create_pgd_mapping() from head.S.
Switching to these C routine and remove the asm counterpart.

This patch has been successfully tested with the following config value:
    PAGE_SIZE  VA  PA  PGTABLE_LEVEL
    4K         48  48  4
    4K         39  48  3
    16K        48  48  4
    16K        47  48  3
    64K        52  52  3
    64K        42  52  2

Signed-off-by: Pingfan Liu <kernelfans@gmail.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Kristina Martsenko <kristina.martsenko@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Atish Patra <atish.patra@wdc.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Mark Brown <broonie@kernel.org>
To: linux-arm-kernel@lists.infradead.org

---
RFC -> V2:
    correct the asm calling convention.
---
 arch/arm64/include/asm/pgalloc.h |   5 +
 arch/arm64/kernel/head.S         | 193 ++++++++-----------------------
 arch/arm64/mm/mmu.c              |  13 +++
 3 files changed, 66 insertions(+), 145 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index c3875af99432..b1182b656b00 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -8,6 +8,9 @@ 
 #ifndef __ASM_PGALLOC_H
 #define __ASM_PGALLOC_H
 
+#include <vdso/bits.h>
+
+#ifndef __ASSEMBLY__
 #include <asm/pgtable-hwdef.h>
 #include <asm/processor.h>
 #include <asm/cacheflush.h>
@@ -102,6 +105,8 @@  extern void create_idmap(pgd_t *pgdir, phys_addr_t phys,
 		void *info,
 		int flags);
 
+#endif
+
 #define NO_BLOCK_MAPPINGS	BIT(0)
 #define NO_CONT_MAPPINGS	BIT(1)
 #define NO_FIXMAP	BIT(2)
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index e19649dbbafb..ddb9601d61c2 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -28,6 +28,8 @@ 
 #include <asm/memory.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/page.h>
+#include <asm/pgtable-prot.h>
+#include <asm/pgalloc.h>
 #include <asm/scs.h>
 #include <asm/smp.h>
 #include <asm/sysreg.h>
@@ -92,6 +94,8 @@  SYM_CODE_START(primary_entry)
 	bl	init_kernel_el			// w0=cpu_boot_mode
 	adrp	x23, __PHYS_OFFSET
 	and	x23, x23, MIN_KIMG_ALIGN - 1	// KASLR offset, defaults to 0
+	adrp	x4, init_thread_union
+	add	sp, x4, #THREAD_SIZE
 	bl	set_cpu_boot_mode_flag
 	bl	__create_page_tables
 	/*
@@ -121,135 +125,6 @@  SYM_CODE_START_LOCAL(preserve_boot_args)
 	b	__inval_dcache_area		// tail call
 SYM_CODE_END(preserve_boot_args)
 
-/*
- * Macro to create a table entry to the next page.
- *
- *	tbl:	page table address
- *	virt:	virtual address
- *	shift:	#imm page table shift
- *	ptrs:	#imm pointers per table page
- *
- * Preserves:	virt
- * Corrupts:	ptrs, tmp1, tmp2
- * Returns:	tbl -> next level table page address
- */
-	.macro	create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
-	add	\tmp1, \tbl, #PAGE_SIZE
-	phys_to_pte \tmp2, \tmp1
-	orr	\tmp2, \tmp2, #PMD_TYPE_TABLE	// address of next table and entry type
-	lsr	\tmp1, \virt, #\shift
-	sub	\ptrs, \ptrs, #1
-	and	\tmp1, \tmp1, \ptrs		// table index
-	str	\tmp2, [\tbl, \tmp1, lsl #3]
-	add	\tbl, \tbl, #PAGE_SIZE		// next level table page
-	.endm
-
-/*
- * Macro to populate page table entries, these entries can be pointers to the next level
- * or last level entries pointing to physical memory.
- *
- *	tbl:	page table address
- *	rtbl:	pointer to page table or physical memory
- *	index:	start index to write
- *	eindex:	end index to write - [index, eindex] written to
- *	flags:	flags for pagetable entry to or in
- *	inc:	increment to rtbl between each entry
- *	tmp1:	temporary variable
- *
- * Preserves:	tbl, eindex, flags, inc
- * Corrupts:	index, tmp1
- * Returns:	rtbl
- */
-	.macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
-.Lpe\@:	phys_to_pte \tmp1, \rtbl
-	orr	\tmp1, \tmp1, \flags	// tmp1 = table entry
-	str	\tmp1, [\tbl, \index, lsl #3]
-	add	\rtbl, \rtbl, \inc	// rtbl = pa next level
-	add	\index, \index, #1
-	cmp	\index, \eindex
-	b.ls	.Lpe\@
-	.endm
-
-/*
- * Compute indices of table entries from virtual address range. If multiple entries
- * were needed in the previous page table level then the next page table level is assumed
- * to be composed of multiple pages. (This effectively scales the end index).
- *
- *	vstart:	virtual address of start of range
- *	vend:	virtual address of end of range
- *	shift:	shift used to transform virtual address into index
- *	ptrs:	number of entries in page table
- *	istart:	index in table corresponding to vstart
- *	iend:	index in table corresponding to vend
- *	count:	On entry: how many extra entries were required in previous level, scales
- *			  our end index.
- *		On exit: returns how many extra entries required for next page table level
- *
- * Preserves:	vstart, vend, shift, ptrs
- * Returns:	istart, iend, count
- */
-	.macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
-	lsr	\iend, \vend, \shift
-	mov	\istart, \ptrs
-	sub	\istart, \istart, #1
-	and	\iend, \iend, \istart	// iend = (vend >> shift) & (ptrs - 1)
-	mov	\istart, \ptrs
-	mul	\istart, \istart, \count
-	add	\iend, \iend, \istart	// iend += (count - 1) * ptrs
-					// our entries span multiple tables
-
-	lsr	\istart, \vstart, \shift
-	mov	\count, \ptrs
-	sub	\count, \count, #1
-	and	\istart, \istart, \count
-
-	sub	\count, \iend, \istart
-	.endm
-
-/*
- * Map memory for specified virtual address range. Each level of page table needed supports
- * multiple entries. If a level requires n entries the next page table level is assumed to be
- * formed from n pages.
- *
- *	tbl:	location of page table
- *	rtbl:	address to be used for first level page table entry (typically tbl + PAGE_SIZE)
- *	vstart:	start address to map
- *	vend:	end address to map - we map [vstart, vend]
- *	flags:	flags to use to map last level entries
- *	phys:	physical address corresponding to vstart - physical memory is contiguous
- *	pgds:	the number of pgd entries
- *
- * Temporaries:	istart, iend, tmp, count, sv - these need to be different registers
- * Preserves:	vstart, vend, flags
- * Corrupts:	tbl, rtbl, istart, iend, tmp, count, sv
- */
-	.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
-	add \rtbl, \tbl, #PAGE_SIZE
-	mov \sv, \rtbl
-	mov \count, #0
-	compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
-	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
-	mov \tbl, \sv
-	mov \sv, \rtbl
-
-#if SWAPPER_PGTABLE_LEVELS > 3
-	compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
-	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
-	mov \tbl, \sv
-	mov \sv, \rtbl
-#endif
-
-#if SWAPPER_PGTABLE_LEVELS > 2
-	compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
-	populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
-	mov \tbl, \sv
-#endif
-
-	compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
-	bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
-	populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
-	.endm
-
 /*
  * Setup the initial page tables. We only setup the barest amount which is
  * required to get the kernel running. The following sections are required:
@@ -345,8 +220,6 @@  SYM_FUNC_START_LOCAL(__create_page_tables)
 	mov	x5, #1
 	str	x5, [x4]                //require expanded pagetable
 
-	mov	x4, EXTRA_PTRS
-	create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
 #else
 	/*
 	 * If VA_BITS == 48, we don't have to configure an additional
@@ -356,25 +229,55 @@  SYM_FUNC_START_LOCAL(__create_page_tables)
 	str_l	x4, idmap_ptrs_per_pgd, x5
 #endif
 1:
-	ldr_l	x4, idmap_ptrs_per_pgd
-	mov	x5, x3				// __pa(__idmap_text_start)
-	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
-
-	map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
+	stp     x0, x1, [sp, #-64]!
+	stp     x2, x3, [sp, #48]
+	stp     x4, x5, [sp, #32]
+	stp     x6, x7, [sp, #16]
+
+	adrp    x0, idmap_pg_dir
+	adrp    x1, idmap_pg_end
+	sub     x1, x1, x0
+	bl      set_cur_mempool
+	mov	x0, #0
+	mov	x0, #0
+	bl	head_pgtable_alloc	// x0 contains idmap_pg_dir
+
+	adrp    x1, __idmap_text_start
+	adr_l   x2, __idmap_text_end
+	sub     x2, x2, x1
+	ldr     x3, =PAGE_KERNEL_EXEC
+	adr_l   x4, head_pgtable_alloc
+	mov     x5, #0
+	mov     x6, #(NO_FIXMAP | BOOT_HEAD)
+	bl      create_idmap
 
 	/*
 	 * Map the kernel image (starting with PHYS_OFFSET).
 	 */
 	adrp	x0, init_pg_dir
-	mov_q	x5, KIMAGE_VADDR		// compile time __va(_text)
-	add	x5, x5, x23			// add KASLR displacement
-	mov	x4, PTRS_PER_PGD
-	adrp	x6, _end			// runtime __pa(_end)
-	adrp	x3, _text			// runtime __pa(_text)
-	sub	x6, x6, x3			// _end - _text
-	add	x6, x6, x5			// runtime __va(_end)
-
-	map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
+	adrp	x1, init_pg_end
+	sub	x1, x1, x0
+	bl	set_cur_mempool
+	mov	x0, #0
+	mov	x0, #0
+	bl	head_pgtable_alloc		// x0 is init_pg_dir
+
+	adrp	x1, _text			// runtime __pa(_text)
+	mov_q	x2, KIMAGE_VADDR		// compile time __va(_text)
+	add	x2, x2, x23			// add KASLR displacement
+	adrp	x3, _end			// runtime __pa(_end)
+	sub	x3, x3, x1			// _end - _text
+
+	ldr	x4, =PAGE_KERNEL_EXEC
+	adr_l	x5, head_pgtable_alloc
+	mov	x6, #0
+	mov	x7, #(NO_FIXMAP | BOOT_HEAD)
+
+	bl	create_init_pgd_mapping
+	ldp	x6, x7, [sp, #16]
+	ldp	x4, x5, [sp, #32]
+	ldp	x2, x3, [sp, #48]
+	ldp	x0, x1, [sp], #64
 
 	/*
 	 * Since the page tables have been populated with non-cacheable
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index fa1d1d4fee8f..1ae72a3f2d27 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -167,6 +167,19 @@  static phys_addr_t pgd_pgtable_alloc(unsigned long shift, void *unused)
 
 #include "./mmu_include.c"
 
+void create_init_pgd_mapping(pgd_t *pgdir,
+		phys_addr_t phys,
+		unsigned long virt,
+		phys_addr_t size,
+		pgprot_t prot,
+		pgtable_alloc allocator,
+		void* info,
+		int flags)
+{
+	__create_pgd_mapping(pgdir, PTRS_PER_PGD, phys, virt, size,
+		prot, allocator, info, flags);
+}
+
 int idmap_extend_pgtable;
 
 /*