diff mbox series

[v2,4/5] RISC-V: Remove redundant trampoline page table

Message ID 20190321094710.16552-5-anup.patel@wdc.com (mailing list archive)
State New, archived
Headers show
Series Boot RISC-V kernel from any 4KB aligned address | expand

Commit Message

Anup Patel March 21, 2019, 9:47 a.m. UTC
The trampoline page table is redundant because:

1. There is no mapping in trampoling page table which is not covered
   by swapper page table.
2. The relocate() in head.S will first load trampoline page table and
   after that it will load swapper page table. Same thing can be achieved
   by straight away loading swapper page table.
3. The trampoline page table is in init section. The relocate() will
   break after trampoline page table has been free'ed by kernel. This
   also means runtime HART hotplug will not work correctly due to broken
   relocate() after kernel is booted.

Due to above, this patch removes trampoline page table and related code
from kernel/head.S and mm/init.c.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
---
 arch/riscv/kernel/head.S | 13 ++-----
 arch/riscv/mm/init.c     | 79 ++++++++--------------------------------
 2 files changed, 19 insertions(+), 73 deletions(-)

Comments

Christoph Hellwig March 22, 2019, 1:33 p.m. UTC | #1
>  
> -	/* Compute satp for kernel page tables, but don't load it yet */
> +	/* Compute satp for kernel page directory, but don't load it yet */


>  	/*
> -	 * Load trampoline page directory, which will cause us to trap to
> +	 * Load kernel page directory, which will cause us to trap to
>  	 * stvec if VA != PA, or simply fall through if VA == PA
>  	 */

If we want to nitpick comments I think this should take about the
page table root or something like that.

Otherwise the idea looks good, but I really think we should do this
before all the changes to the setup_vm code.
Anup Patel March 25, 2019, 4:17 a.m. UTC | #2
On Fri, Mar 22, 2019 at 7:03 PM Christoph Hellwig <hch@infradead.org> wrote:
>
> >
> > -     /* Compute satp for kernel page tables, but don't load it yet */
> > +     /* Compute satp for kernel page directory, but don't load it yet */
>
>
> >       /*
> > -      * Load trampoline page directory, which will cause us to trap to
> > +      * Load kernel page directory, which will cause us to trap to
> >        * stvec if VA != PA, or simply fall through if VA == PA
> >        */
>
> If we want to nitpick comments I think this should take about the
> page table root or something like that.

Okay, I will update comments.

>
> Otherwise the idea looks good, but I really think we should do this
> before all the changes to the setup_vm code.

Sure, I will move it before setup_vm code so that setup_vm code is
further simplified.

Regards,
Anup
diff mbox series

Patch

diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 12a3ec5eb8ab..94f424e2038d 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -93,21 +93,19 @@  relocate:
 	add a0, a0, a1
 	csrw stvec, a0
 
-	/* Compute satp for kernel page tables, but don't load it yet */
+	/* Compute satp for kernel page directory, but don't load it yet */
 	la a2, swapper_pg_dir
 	srl a2, a2, PAGE_SHIFT
 	li a1, SATP_MODE
 	or a2, a2, a1
 
 	/*
-	 * Load trampoline page directory, which will cause us to trap to
+	 * Load kernel page directory, which will cause us to trap to
 	 * stvec if VA != PA, or simply fall through if VA == PA
 	 */
-	la a0, trampoline_pg_dir
-	srl a0, a0, PAGE_SHIFT
-	or a0, a0, a1
 	sfence.vma
-	csrw sptbr, a0
+	csrw sptbr, a2
+
 .align 2
 1:
 	/* Set trap vector to spin forever to help debug */
@@ -120,9 +118,6 @@  relocate:
 	la gp, __global_pointer$
 .option pop
 
-	/* Switch to kernel page tables */
-	csrw sptbr, a2
-
 	ret
 
 .Lsecondary_start:
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index c389fbfeccd8..2e2f2567964c 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -132,7 +132,6 @@  void __init setup_bootmem(void)
 #define MAX_EARLY_MAPPING_SIZE	SZ_128M
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
-pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 
 #ifndef __PAGETABLE_PMD_FOLDED
 #if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
@@ -140,21 +139,14 @@  pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 #else
 #define NUM_SWAPPER_PMDS	(MAX_EARLY_MAPPING_SIZE/PGDIR_SIZE)
 #endif
-#define NUM_TRAMPOLINE_PMDS	1UL
 pmd_t swapper_pmd[PTRS_PER_PMD*NUM_SWAPPER_PMDS] __page_aligned_bss;
-pmd_t trampoline_pmd[PTRS_PER_PMD*NUM_TRAMPOLINE_PMDS]
-	__initdata __aligned(PAGE_SIZE);
 pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
 #define NUM_SWAPPER_PTES	(MAX_EARLY_MAPPING_SIZE/PMD_SIZE)
 #else
 #define NUM_SWAPPER_PTES	(MAX_EARLY_MAPPING_SIZE/PGDIR_SIZE)
 #endif
 
-#define NUM_TRAMPOLINE_PTES	1UL
-
 pte_t swapper_pte[PTRS_PER_PTE*NUM_SWAPPER_PTES] __page_aligned_bss;
-pte_t trampoline_pte[PTRS_PER_PTE*NUM_TRAMPOLINE_PTES]
-	__initdata __aligned(PAGE_SIZE);
 pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
 
 uintptr_t map_size;
@@ -214,19 +206,7 @@  static pte_t *__init final_get_pte_virt(phys_addr_t pa)
 	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
 }
 
-static phys_addr_t __init early_alloc_trampoline_pte(uintptr_t va,
-						     uintptr_t load_pa)
-{
-	pte_t *base = __load_va(trampoline_pte, load_pa);
-	uintptr_t pte_num = ((va - PAGE_OFFSET) >> PMD_SHIFT);
-
-	BUG_ON(pte_num >= NUM_TRAMPOLINE_PTES);
-
-	return (uintptr_t)&base[pte_num * PTRS_PER_PTE];
-}
-
-static phys_addr_t __init early_alloc_swapper_pte(uintptr_t va,
-						  uintptr_t load_pa)
+static phys_addr_t __init early_alloc_pte(uintptr_t va, uintptr_t load_pa)
 {
 	pte_t *base = __load_va(swapper_pte, load_pa);
 	uintptr_t pte_num = ((va - PAGE_OFFSET) >> PMD_SHIFT);
@@ -266,19 +246,7 @@  static pmd_t *__init final_get_pmd_virt(phys_addr_t pa)
 	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
 }
 
-static phys_addr_t __init early_alloc_trampoline_pmd(uintptr_t va,
-						     uintptr_t load_pa)
-{
-	pmd_t *base = __load_va(trampoline_pmd, load_pa);
-	uintptr_t pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
-
-	BUG_ON(pmd_num >= NUM_TRAMPOLINE_PMDS);
-
-	return (uintptr_t)&base[pmd_num * PTRS_PER_PMD];
-}
-
-static phys_addr_t __init early_alloc_swapper_pmd(uintptr_t va,
-						  uintptr_t load_pa)
+static phys_addr_t __init early_alloc_pmd(uintptr_t va, uintptr_t load_pa)
 {
 	pmd_t *base = __load_va(swapper_pmd, load_pa);
 	uintptr_t pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
@@ -418,7 +386,7 @@  asmlinkage void __init setup_vm(uintptr_t load_pa, uintptr_t dtb_pa)
 	uintptr_t load_sz = __load_pa(&_end, load_pa) - load_pa;
 	pgprot_t tableprot = __pgprot(_PAGE_TABLE);
 	pgprot_t prot = __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_EXEC);
-	struct mapping_ops tramp_ops, swap_ops;
+	struct mapping_ops ops;
 
 	va_pa_offset = PAGE_OFFSET - load_pa;
 	pfn_base = PFN_DOWN(load_pa);
@@ -429,51 +397,34 @@  asmlinkage void __init setup_vm(uintptr_t load_pa, uintptr_t dtb_pa)
 	BUG_ON((load_pa % map_size) != 0);
 	BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
 
-	/* Setup trampoline mapping ops */
-	tramp_ops.get_pte_virt = __load_va(early_get_pte_virt, load_pa);
-	tramp_ops.alloc_pte = __load_va(early_alloc_trampoline_pte, load_pa);
-	tramp_ops.get_pmd_virt = NULL;
-	tramp_ops.alloc_pmd = NULL;
-
-	/* Setup swapper mapping ops */
-	swap_ops.get_pte_virt = __load_va(early_get_pte_virt, load_pa);
-	swap_ops.alloc_pte = __load_va(early_alloc_swapper_pte, load_pa);
-	swap_ops.get_pmd_virt = NULL;
-	swap_ops.alloc_pmd = NULL;
+	/* Setup mapping ops */
+	ops.get_pte_virt = __load_va(early_get_pte_virt, load_pa);
+	ops.alloc_pte = __load_va(early_alloc_pte, load_pa);
+	ops.get_pmd_virt = NULL;
+	ops.alloc_pmd = NULL;
 
 #ifndef __PAGETABLE_PMD_FOLDED
-	/* Update trampoline mapping ops for PMD */
-	tramp_ops.get_pmd_virt = __load_va(early_get_pmd_virt, load_pa);
-	tramp_ops.alloc_pmd = __load_va(early_alloc_trampoline_pmd, load_pa);
-
-	/* Update swapper mapping ops for PMD */
-	swap_ops.get_pmd_virt = __load_va(early_get_pmd_virt, load_pa);
-	swap_ops.alloc_pmd = __load_va(early_alloc_swapper_pmd, load_pa);
+	/* Update mapping ops for PMD */
+	ops.get_pmd_virt = __load_va(early_get_pmd_virt, load_pa);
+	ops.alloc_pmd = __load_va(early_alloc_pmd, load_pa);
 
 	/* Setup swapper PGD and PMD for fixmap */
 	map_pa = __load_pa(fixmap_pmd, load_pa);
 	create_pgd_mapping(__load_va(swapper_pg_dir, load_pa),
 			   FIXADDR_START, map_pa, PGDIR_SIZE, tableprot,
-			   load_pa, &swap_ops);
+			   load_pa, &ops);
 	map_pa = __load_pa(fixmap_pte, load_pa);
 	create_pmd_mapping(__load_va(fixmap_pmd, load_pa),
 			   FIXADDR_START, map_pa, PMD_SIZE, tableprot,
-			   load_pa, &swap_ops);
+			   load_pa, &ops);
 #else
 	/* Setup swapper PGD for fixmap */
 	map_pa = __load_pa(fixmap_pte, load_pa);
 	create_pgd_mapping(__load_va(swapper_pg_dir, load_pa),
 			   FIXADDR_START, map_pa, PGDIR_SIZE, tableprot,
-			   load_pa, &swap_ops);
+			   load_pa, &ops);
 #endif
 
-	/* Setup trampoling PGD covering first few MBs of kernel */
-	end_va = PAGE_OFFSET + PAGE_SIZE*PTRS_PER_PTE;
-	for (va = PAGE_OFFSET; va < end_va; va += map_size)
-		create_pgd_mapping(__load_va(trampoline_pg_dir, load_pa),
-				   va, load_pa + (va - PAGE_OFFSET),
-				   map_size, prot, load_pa, &tramp_ops);
-
 	/*
 	 * Setup swapper PGD covering entire kernel which will allows
 	 * us to reach paging_init(). We map all memory banks later in
@@ -483,7 +434,7 @@  asmlinkage void __init setup_vm(uintptr_t load_pa, uintptr_t dtb_pa)
 	for (va = PAGE_OFFSET; va < end_va; va += map_size)
 		create_pgd_mapping(__load_va(swapper_pg_dir, load_pa),
 				   va, load_pa + (va - PAGE_OFFSET),
-				   map_size, prot, load_pa, &swap_ops);
+				   map_size, prot, load_pa, &ops);
 
 	/* Create fixed mapping for early parsing of FDT */
 	end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE;