@@ -108,15 +108,11 @@
/*
* Initial memory map attributes.
+ * When using ARM64_SWAPPER_USES_SECTION_MAPS, init_pmd()->pmd_set_huge()
+ * sets up section mapping.
*/
-#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-
-#if ARM64_SWAPPER_USES_SECTION_MAPS
-#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
-#else
-#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
-#endif
+#define SWAPPER_PAGE_FLAGS ((PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) | \
+ PTE_ATTRINDX(MT_NORMAL))
/*
* To make optimal use of block mappings when laying out the linear
@@ -8,6 +8,8 @@
#ifndef __ASM_PGALLOC_H
#define __ASM_PGALLOC_H
+#ifndef __ASSEMBLY__
+
#include <asm/pgtable-hwdef.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
@@ -89,3 +91,10 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
+
+#define NO_BLOCK_MAPPINGS BIT(0)
+#define NO_CONT_MAPPINGS BIT(1)
+#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
+#define BOOT_HEAD BIT(3)
+
+#endif
@@ -28,6 +28,7 @@
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
+#include <asm/pgalloc.h>
#include <asm/scs.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
@@ -93,6 +94,8 @@ SYM_CODE_START(primary_entry)
adrp x23, __PHYS_OFFSET
and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
bl set_cpu_boot_mode_flag
+ adrp x4, init_thread_union
+ add sp, x4, #THREAD_SIZE
bl __create_page_tables
/*
* The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -144,112 +147,6 @@ SYM_CODE_END(preserve_boot_args)
add \tbl, \tbl, #PAGE_SIZE // next level table page
.endm
-/*
- * Macro to populate page table entries, these entries can be pointers to the next level
- * or last level entries pointing to physical memory.
- *
- * tbl: page table address
- * rtbl: pointer to page table or physical memory
- * index: start index to write
- * eindex: end index to write - [index, eindex] written to
- * flags: flags for pagetable entry to or in
- * inc: increment to rtbl between each entry
- * tmp1: temporary variable
- *
- * Preserves: tbl, eindex, flags, inc
- * Corrupts: index, tmp1
- * Returns: rtbl
- */
- .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
-.Lpe\@: phys_to_pte \tmp1, \rtbl
- orr \tmp1, \tmp1, \flags // tmp1 = table entry
- str \tmp1, [\tbl, \index, lsl #3]
- add \rtbl, \rtbl, \inc // rtbl = pa next level
- add \index, \index, #1
- cmp \index, \eindex
- b.ls .Lpe\@
- .endm
-
-/*
- * Compute indices of table entries from virtual address range. If multiple entries
- * were needed in the previous page table level then the next page table level is assumed
- * to be composed of multiple pages. (This effectively scales the end index).
- *
- * vstart: virtual address of start of range
- * vend: virtual address of end of range
- * shift: shift used to transform virtual address into index
- * ptrs: number of entries in page table
- * istart: index in table corresponding to vstart
- * iend: index in table corresponding to vend
- * count: On entry: how many extra entries were required in previous level, scales
- * our end index.
- * On exit: returns how many extra entries required for next page table level
- *
- * Preserves: vstart, vend, shift, ptrs
- * Returns: istart, iend, count
- */
- .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
- lsr \iend, \vend, \shift
- mov \istart, \ptrs
- sub \istart, \istart, #1
- and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
- mov \istart, \ptrs
- mul \istart, \istart, \count
- add \iend, \iend, \istart // iend += (count - 1) * ptrs
- // our entries span multiple tables
-
- lsr \istart, \vstart, \shift
- mov \count, \ptrs
- sub \count, \count, #1
- and \istart, \istart, \count
-
- sub \count, \iend, \istart
- .endm
-
-/*
- * Map memory for specified virtual address range. Each level of page table needed supports
- * multiple entries. If a level requires n entries the next page table level is assumed to be
- * formed from n pages.
- *
- * tbl: location of page table
- * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
- * vstart: start address to map
- * vend: end address to map - we map [vstart, vend]
- * flags: flags to use to map last level entries
- * phys: physical address corresponding to vstart - physical memory is contiguous
- * pgds: the number of pgd entries
- *
- * Temporaries: istart, iend, tmp, count, sv - these need to be different registers
- * Preserves: vstart, vend, flags
- * Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
- */
- .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
- add \rtbl, \tbl, #PAGE_SIZE
- mov \sv, \rtbl
- mov \count, #0
- compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
- mov \sv, \rtbl
-
-#if SWAPPER_PGTABLE_LEVELS > 3
- compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
- mov \sv, \rtbl
-#endif
-
-#if SWAPPER_PGTABLE_LEVELS > 2
- compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
- populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
- mov \tbl, \sv
-#endif
-
- compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
- bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
- populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
- .endm
-
/*
* Setup the initial page tables. We only setup the barest amount which is
* required to get the kernel running. The following sections are required:
@@ -284,8 +181,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
subs x1, x1, #64
b.ne 1b
- mov x7, SWAPPER_MM_MMUFLAGS
-
/*
* Create the identity mapping.
*/
@@ -357,21 +252,54 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
mov x5, x3 // __pa(__idmap_text_start)
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
- map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
+ /*
+ * x0 points to either idmap_pg_dir or idmap_pg_dir + PAGE_SIZE
+ */
+ stp x0, x1, [sp, #-64]!
+ stp x2, x3, [sp, #48]
+ stp x4, x5, [sp, #32]
+ stp x6, x7, [sp, #16]
+
+ adrp x1, idmap_pg_end
+ sub x1, x1, x0
+ bl set_cur_headpool
+ mov x0, #0
+ bl head_pgtable_alloc // return x0, containing the appropriate pgtable level
+
+ adrp x1, __idmap_text_start
+ adrp x2, __idmap_text_start // va = pa for idmap
+ adr_l x3, __idmap_text_end
+ sub x3, x3, x1
+ ldr x4, =SWAPPER_PAGE_FLAGS
+ adr_l x5, head_pgtable_alloc
+ mov x6, #BOOT_HEAD
+ bl __create_pgd_mapping
/*
* Map the kernel image (starting with PHYS_OFFSET).
*/
adrp x0, init_pg_dir
- mov_q x5, KIMAGE_VADDR // compile time __va(_text)
- add x5, x5, x23 // add KASLR displacement
- mov x4, PTRS_PER_PGD
- adrp x6, _end // runtime __pa(_end)
- adrp x3, _text // runtime __pa(_text)
- sub x6, x6, x3 // _end - _text
- add x6, x6, x5 // runtime __va(_end)
-
- map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
+ adrp x1, init_pg_end
+ sub x1, x1, x0
+ bl set_cur_headpool
+ mov x0, #0
+ bl head_pgtable_alloc // return x0, containing init_pg_dir
+
+ adrp x1, _text // runtime __pa(_text)
+ mov_q x2, KIMAGE_VADDR // compile time __va(_text)
+ add x2, x2, x23 // add KASLR displacement
+ adrp x3, _end // runtime __pa(_end)
+ sub x3, x3, x1 // _end - _text
+
+ ldr x4, =SWAPPER_PAGE_FLAGS
+ adr_l x5, head_pgtable_alloc
+ mov x6, #BOOT_HEAD
+ bl __create_pgd_mapping
+
+ ldp x6, x7, [sp, #16]
+ ldp x4, x5, [sp, #32]
+ ldp x2, x3, [sp, #48]
+ ldp x0, x1, [sp], #64
/*
* Since the page tables have been populated with non-cacheable
@@ -37,11 +37,6 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
-#define NO_BLOCK_MAPPINGS BIT(0)
-#define NO_CONT_MAPPINGS BIT(1)
-#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
-#define BOOT_HEAD BIT(3)
-
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
@@ -420,7 +415,7 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
pud_clear_fixmap();
}
-static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
unsigned long virt, phys_addr_t size,
pgprot_t prot,
phys_addr_t (*pgtable_alloc)(int),
Now, everything is ready for calling __create_pgd_mapping() from head.S. Switching to these C routine and remove the asm counterpart. This patch has been successfully tested with the following config value: PAGE_SIZE VA PA PGTABLE_LEVEL 4K 48 48 4 4K 39 48 3 16K 48 48 4 16K 47 48 3 64K 52 52 3 64K 48 52 3 64K 42 52 2 Signed-off-by: Pingfan Liu <kernelfans@gmail.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Marc Zyngier <maz@kernel.org> Cc: Kristina Martsenko <kristina.martsenko@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Steven Price <steven.price@arm.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Atish Patra <atish.patra@wdc.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Mark Brown <broonie@kernel.org> To: linux-arm-kernel@lists.infradead.org --- arch/arm64/include/asm/kernel-pgtable.h | 12 +- arch/arm64/include/asm/pgalloc.h | 9 ++ arch/arm64/kernel/head.S | 164 +++++++----------------- arch/arm64/mm/mmu.c | 7 +- 4 files changed, 60 insertions(+), 132 deletions(-)