diff mbox series

arm64: head: Switch endianness before populating the ID map

Message ID 20230125185910.962733-1-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: head: Switch endianness before populating the ID map | expand

Commit Message

Ard Biesheuvel Jan. 25, 2023, 6:59 p.m. UTC
Ensure that the endianness used for populating the ID map matches the
endianness that the running kernel will be using, as this is no longer
guaranteed now that create_idmap() is invoked before init_kernel_el().

Note that doing so is only safe if the MMU is off, as switching the
endianness with the MMU on results in the active ID map to become
invalid. So also clear the C and M bits when toggling the EE bit in
SCTLR, and mark the MMU as disabled at boot.

Note that the same issue has resulted in preserve_boot_args() recording
the contents of registers X0 ... X3 in the wrong byte order, although
this is arguably a very minor concern.

Fixes: 32b135a7fafe ("arm64: head: avoid cache invalidation when entering with the MMU on")
Reported-by: Nathan Chancellor <nathan@kernel.org>
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 arch/arm64/include/asm/sysreg.h |  3 ++-
 arch/arm64/kernel/head.S        | 23 ++++++++++++++++++++++-
 2 files changed, 24 insertions(+), 2 deletions(-)

Comments

Ard Biesheuvel Jan. 25, 2023, 7 p.m. UTC | #1
On Wed, 25 Jan 2023 at 19:59, Ard Biesheuvel <ardb@kernel.org> wrote:
>
> Ensure that the endianness used for populating the ID map matches the
> endianness that the running kernel will be using, as this is no longer
> guaranteed now that create_idmap() is invoked before init_kernel_el().
>
> Note that doing so is only safe if the MMU is off, as switching the
> endianness with the MMU on results in the active ID map to become
> invalid. So also clear the C and M bits when toggling the EE bit in

not the C bit - I ripped that out at the last minute

> SCTLR, and mark the MMU as disabled at boot.
>
> Note that the same issue has resulted in preserve_boot_args() recording
> the contents of registers X0 ... X3 in the wrong byte order, although
> this is arguably a very minor concern.
>
> Fixes: 32b135a7fafe ("arm64: head: avoid cache invalidation when entering with the MMU on")
> Reported-by: Nathan Chancellor <nathan@kernel.org>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
>  arch/arm64/include/asm/sysreg.h |  3 ++-
>  arch/arm64/kernel/head.S        | 23 ++++++++++++++++++++++-
>  2 files changed, 24 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 1312fb48f18b5a51..2facd7933948677a 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -575,6 +575,7 @@
>  #define SCTLR_ELx_DSSBS        (BIT(44))
>  #define SCTLR_ELx_ATA  (BIT(43))
>
> +#define SCTLR_ELx_EE_SHIFT     25
>  #define SCTLR_ELx_ENIA_SHIFT   31
>
>  #define SCTLR_ELx_ITFSB         (BIT(37))
> @@ -583,7 +584,7 @@
>  #define SCTLR_ELx_LSMAOE (BIT(29))
>  #define SCTLR_ELx_nTLSMD (BIT(28))
>  #define SCTLR_ELx_ENDA  (BIT(27))
> -#define SCTLR_ELx_EE     (BIT(25))
> +#define SCTLR_ELx_EE     (BIT(SCTLR_ELx_EE_SHIFT))
>  #define SCTLR_ELx_EIS   (BIT(22))
>  #define SCTLR_ELx_IESB  (BIT(21))
>  #define SCTLR_ELx_TSCXT         (BIT(20))
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index dc56e1d8f36eb387..107a2b87577c9b0e 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -129,10 +129,31 @@ SYM_CODE_START_LOCAL(record_mmu_state)
>         mrs     x19, sctlr_el1
>         b.ne    0f
>         mrs     x19, sctlr_el2
> -0:     tst     x19, #SCTLR_ELx_C               // Z := (C == 0)
> +0:
> +CPU_LE( tbnz   x19, #SCTLR_ELx_EE_SHIFT, 1f    )
> +CPU_BE( tbz    x19, #SCTLR_ELx_EE_SHIFT, 1f    )
> +       tst     x19, #SCTLR_ELx_C               // Z := (C == 0)
>         and     x19, x19, #SCTLR_ELx_M          // isolate M bit
>         csel    x19, xzr, x19, eq               // clear x19 if Z
>         ret
> +
> +       /*
> +        * Set the correct endianness early so all memory accesses issued
> +        * before init_kernel_el() occur in the correct byte order. Note that
> +        * this means the MMU must be disabled, or the active ID map will end
> +        * up getting interpreted with the wrong byte order.
> +        */
> +1:     eor     x19, x19, #SCTLR_ELx_EE
> +       bic     x19, x19, #SCTLR_ELx_M
> +       b.ne    2f
> +       pre_disable_mmu_workaround
> +       msr     sctlr_el2, x19
> +       b       3f
> +       pre_disable_mmu_workaround
> +2:     msr     sctlr_el1, x19
> +3:     isb
> +       mov     x19, xzr
> +       ret
>  SYM_CODE_END(record_mmu_state)
>
>  /*
> --
> 2.39.0
>
Nathan Chancellor Jan. 25, 2023, 9:34 p.m. UTC | #2
On Wed, Jan 25, 2023 at 07:59:10PM +0100, Ard Biesheuvel wrote:
> Ensure that the endianness used for populating the ID map matches the
> endianness that the running kernel will be using, as this is no longer
> guaranteed now that create_idmap() is invoked before init_kernel_el().
> 
> Note that doing so is only safe if the MMU is off, as switching the
> endianness with the MMU on results in the active ID map to become
> invalid. So also clear the C and M bits when toggling the EE bit in
> SCTLR, and mark the MMU as disabled at boot.
> 
> Note that the same issue has resulted in preserve_boot_args() recording
> the contents of registers X0 ... X3 in the wrong byte order, although
> this is arguably a very minor concern.
> 
> Fixes: 32b135a7fafe ("arm64: head: avoid cache invalidation when entering with the MMU on")
> Reported-by: Nathan Chancellor <nathan@kernel.org>
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>

Tested-by: Nathan Chancellor <nathan@kernel.org>

Thanks a lot for the quick fix!

> ---
>  arch/arm64/include/asm/sysreg.h |  3 ++-
>  arch/arm64/kernel/head.S        | 23 ++++++++++++++++++++++-
>  2 files changed, 24 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 1312fb48f18b5a51..2facd7933948677a 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -575,6 +575,7 @@
>  #define SCTLR_ELx_DSSBS	(BIT(44))
>  #define SCTLR_ELx_ATA	(BIT(43))
>  
> +#define SCTLR_ELx_EE_SHIFT	25
>  #define SCTLR_ELx_ENIA_SHIFT	31
>  
>  #define SCTLR_ELx_ITFSB	 (BIT(37))
> @@ -583,7 +584,7 @@
>  #define SCTLR_ELx_LSMAOE (BIT(29))
>  #define SCTLR_ELx_nTLSMD (BIT(28))
>  #define SCTLR_ELx_ENDA	 (BIT(27))
> -#define SCTLR_ELx_EE     (BIT(25))
> +#define SCTLR_ELx_EE     (BIT(SCTLR_ELx_EE_SHIFT))
>  #define SCTLR_ELx_EIS	 (BIT(22))
>  #define SCTLR_ELx_IESB	 (BIT(21))
>  #define SCTLR_ELx_TSCXT	 (BIT(20))
> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index dc56e1d8f36eb387..107a2b87577c9b0e 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -129,10 +129,31 @@ SYM_CODE_START_LOCAL(record_mmu_state)
>  	mrs	x19, sctlr_el1
>  	b.ne	0f
>  	mrs	x19, sctlr_el2
> -0:	tst	x19, #SCTLR_ELx_C		// Z := (C == 0)
> +0:
> +CPU_LE( tbnz	x19, #SCTLR_ELx_EE_SHIFT, 1f	)
> +CPU_BE( tbz	x19, #SCTLR_ELx_EE_SHIFT, 1f	)
> +	tst	x19, #SCTLR_ELx_C		// Z := (C == 0)
>  	and	x19, x19, #SCTLR_ELx_M		// isolate M bit
>  	csel	x19, xzr, x19, eq		// clear x19 if Z
>  	ret
> +
> +	/*
> +	 * Set the correct endianness early so all memory accesses issued
> +	 * before init_kernel_el() occur in the correct byte order. Note that
> +	 * this means the MMU must be disabled, or the active ID map will end
> +	 * up getting interpreted with the wrong byte order.
> +	 */
> +1:	eor	x19, x19, #SCTLR_ELx_EE
> +	bic	x19, x19, #SCTLR_ELx_M
> +	b.ne	2f
> +	pre_disable_mmu_workaround
> +	msr	sctlr_el2, x19
> +	b	3f
> +	pre_disable_mmu_workaround
> +2:	msr	sctlr_el1, x19
> +3:	isb
> +	mov	x19, xzr
> +	ret
>  SYM_CODE_END(record_mmu_state)
>  
>  /*
> -- 
> 2.39.0
>
Catalin Marinas Jan. 26, 2023, 6:48 p.m. UTC | #3
On Wed, 25 Jan 2023 19:59:10 +0100, Ard Biesheuvel wrote:
> Ensure that the endianness used for populating the ID map matches the
> endianness that the running kernel will be using, as this is no longer
> guaranteed now that create_idmap() is invoked before init_kernel_el().
> 
> Note that doing so is only safe if the MMU is off, as switching the
> endianness with the MMU on results in the active ID map to become
> invalid. So also clear the C and M bits when toggling the EE bit in
> SCTLR, and mark the MMU as disabled at boot.
> 
> [...]

Applied to arm64 (for-next/efi-boot-mmu-on), thanks!

[1/1] arm64: head: Switch endianness before populating the ID map
      https://git.kernel.org/arm64/c/2ced0f30a426
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 1312fb48f18b5a51..2facd7933948677a 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -575,6 +575,7 @@ 
 #define SCTLR_ELx_DSSBS	(BIT(44))
 #define SCTLR_ELx_ATA	(BIT(43))
 
+#define SCTLR_ELx_EE_SHIFT	25
 #define SCTLR_ELx_ENIA_SHIFT	31
 
 #define SCTLR_ELx_ITFSB	 (BIT(37))
@@ -583,7 +584,7 @@ 
 #define SCTLR_ELx_LSMAOE (BIT(29))
 #define SCTLR_ELx_nTLSMD (BIT(28))
 #define SCTLR_ELx_ENDA	 (BIT(27))
-#define SCTLR_ELx_EE     (BIT(25))
+#define SCTLR_ELx_EE     (BIT(SCTLR_ELx_EE_SHIFT))
 #define SCTLR_ELx_EIS	 (BIT(22))
 #define SCTLR_ELx_IESB	 (BIT(21))
 #define SCTLR_ELx_TSCXT	 (BIT(20))
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index dc56e1d8f36eb387..107a2b87577c9b0e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -129,10 +129,31 @@  SYM_CODE_START_LOCAL(record_mmu_state)
 	mrs	x19, sctlr_el1
 	b.ne	0f
 	mrs	x19, sctlr_el2
-0:	tst	x19, #SCTLR_ELx_C		// Z := (C == 0)
+0:
+CPU_LE( tbnz	x19, #SCTLR_ELx_EE_SHIFT, 1f	)
+CPU_BE( tbz	x19, #SCTLR_ELx_EE_SHIFT, 1f	)
+	tst	x19, #SCTLR_ELx_C		// Z := (C == 0)
 	and	x19, x19, #SCTLR_ELx_M		// isolate M bit
 	csel	x19, xzr, x19, eq		// clear x19 if Z
 	ret
+
+	/*
+	 * Set the correct endianness early so all memory accesses issued
+	 * before init_kernel_el() occur in the correct byte order. Note that
+	 * this means the MMU must be disabled, or the active ID map will end
+	 * up getting interpreted with the wrong byte order.
+	 */
+1:	eor	x19, x19, #SCTLR_ELx_EE
+	bic	x19, x19, #SCTLR_ELx_M
+	b.ne	2f
+	pre_disable_mmu_workaround
+	msr	sctlr_el2, x19
+	b	3f
+	pre_disable_mmu_workaround
+2:	msr	sctlr_el1, x19
+3:	isb
+	mov	x19, xzr
+	ret
 SYM_CODE_END(record_mmu_state)
 
 /*