diff mbox series

[v4,04/26] arm64: head: drop idmap_ptrs_per_pgd

Message ID 20220613144550.3760857-5-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: refactor boot flow and add support for WXN | expand

Commit Message

Ard Biesheuvel June 13, 2022, 2:45 p.m. UTC
The assignment of idmap_ptrs_per_pgd lacks any cache invalidation, even
though it is updated with the MMU and caches disabled. However, we never
bother to read the value again except in the very next instruction, and
so we can just drop the variable entirely.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/mmu_context.h | 1 -
 arch/arm64/kernel/head.S             | 7 +++----
 arch/arm64/mm/mmu.c                  | 1 -
 3 files changed, 3 insertions(+), 6 deletions(-)

Comments

Anshuman Khandual June 15, 2022, 4:07 a.m. UTC | #1
On 6/13/22 20:15, Ard Biesheuvel wrote:
> The assignment of idmap_ptrs_per_pgd lacks any cache invalidation, even
> though it is updated with the MMU and caches disabled. However, we never

Right, seems like an omission.

> bother to read the value again except in the very next instruction, and
> so we can just drop the variable entirely.

Right.

> 
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
>  arch/arm64/include/asm/mmu_context.h | 1 -
>  arch/arm64/kernel/head.S             | 7 +++----
>  arch/arm64/mm/mmu.c                  | 1 -
>  3 files changed, 3 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
> index 6ac0086ebb1a..7b387c3b312a 100644
> --- a/arch/arm64/include/asm/mmu_context.h
> +++ b/arch/arm64/include/asm/mmu_context.h
> @@ -61,7 +61,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
>   * physical memory, in which case it will be smaller.
>   */
>  extern int idmap_t0sz;
> -extern u64 idmap_ptrs_per_pgd;
>  
>  /*
>   * Ensure TCR.T0SZ is set to the provided value.
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index 7f361bc72d12..53126a35d73c 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -300,6 +300,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  	 * range in that case, and configure an additional translation level
>  	 * if needed.
>  	 */
> +	mov	x4, #PTRS_PER_PGD
>  	idmap_get_t0sz x5
>  	cmp	x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
>  	b.ge	1f			// .. then skip VA range extension
> @@ -319,18 +320,16 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
>  #error "Mismatch between VA_BITS and page size/number of translation levels"
>  #endif
>  
> -	mov	x4, EXTRA_PTRS
> -	create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
> +	mov	x2, EXTRA_PTRS
> +	create_table_entry x0, x3, EXTRA_SHIFT, x2, x5, x6

AFAICS should be safe to use 'x2' here instead of 'x4'.

>  #else
>  	/*
>  	 * If VA_BITS == 48, we don't have to configure an additional
>  	 * translation level, but the top-level table has more entries.
>  	 */
>  	mov	x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
> -	str_l	x4, idmap_ptrs_per_pgd, x5
>  #endif
>  1:
> -	ldr_l	x4, idmap_ptrs_per_pgd

'x4' will contain default PTRS_PER_PGD if (VA_BITS = EXTRA_SHIFT), otherwise
it will have  #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT), but without going via
erstwhile 'idmap_ptrs_per_pgd' variable.

>  	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
>  
>  	map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
> index 103bf4ae408d..0f95c91e5a8e 100644
> --- a/arch/arm64/mm/mmu.c
> +++ b/arch/arm64/mm/mmu.c
> @@ -44,7 +44,6 @@
>  #define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
>  
>  int idmap_t0sz __ro_after_init;
> -u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
>  
>  #if VA_BITS > 48
>  u64 vabits_actual __ro_after_init = VA_BITS_MIN;

Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 6ac0086ebb1a..7b387c3b312a 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -61,7 +61,6 @@  static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
  * physical memory, in which case it will be smaller.
  */
 extern int idmap_t0sz;
-extern u64 idmap_ptrs_per_pgd;
 
 /*
  * Ensure TCR.T0SZ is set to the provided value.
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7f361bc72d12..53126a35d73c 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -300,6 +300,7 @@  SYM_FUNC_START_LOCAL(__create_page_tables)
 	 * range in that case, and configure an additional translation level
 	 * if needed.
 	 */
+	mov	x4, #PTRS_PER_PGD
 	idmap_get_t0sz x5
 	cmp	x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
 	b.ge	1f			// .. then skip VA range extension
@@ -319,18 +320,16 @@  SYM_FUNC_START_LOCAL(__create_page_tables)
 #error "Mismatch between VA_BITS and page size/number of translation levels"
 #endif
 
-	mov	x4, EXTRA_PTRS
-	create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
+	mov	x2, EXTRA_PTRS
+	create_table_entry x0, x3, EXTRA_SHIFT, x2, x5, x6
 #else
 	/*
 	 * If VA_BITS == 48, we don't have to configure an additional
 	 * translation level, but the top-level table has more entries.
 	 */
 	mov	x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
-	str_l	x4, idmap_ptrs_per_pgd, x5
 #endif
 1:
-	ldr_l	x4, idmap_ptrs_per_pgd
 	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
 
 	map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 103bf4ae408d..0f95c91e5a8e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -44,7 +44,6 @@ 
 #define NO_EXEC_MAPPINGS	BIT(2)	/* assumes FEAT_HPDS is not used */
 
 int idmap_t0sz __ro_after_init;
-u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
 
 #if VA_BITS > 48
 u64 vabits_actual __ro_after_init = VA_BITS_MIN;