diff mbox series

[boot-wrapper,v2,8/8] aarch64: Introduce EL2 boot code for Armv8-R AArch64

Message ID 20210521104807.138269-9-jaxson.han@arm.com (mailing list archive)
State New, archived
Headers show
Series Add Armv8-R AArch64 support | expand

Commit Message

Jaxson Han May 21, 2021, 10:48 a.m. UTC
The Armv8-R AArch64 profile does not support the EL3 exception level.
The Armv8-R AArch64 profile allows for an (optional) VMSAv8-64 MMU
at EL1, which allows to run off-the-shelf Linux. However EL2 only
supports a PMSA, which is not supported by Linux, so we need to drop
into EL1 before entering the kernel.

We add a new err_invalid_arch symbol as a dead loop. If we detect the
current Armv8-R aarch64 only supports with PMSA, meaning we cannot boot
Linux anymore, then we jump to err_invalid_arch.

During Armv8-R aarch64 init, to make sure nothing unexpected traps into
EL2, we auto-detect and config FIEN and EnSCXT in HCR_EL2.

The boot sequence is:
If CurrentEL == EL3, then goto EL3 initialisation and drop to lower EL
  before entering the kernel.
If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf (Armv8-R aarch64),
  if id_aa64mmfr0_el1.MSA_frac == 0x2,
    then goto Armv8-R AArch64 initialisation and drop to EL1 before
    entering the kernel.
  else, which means VMSA unsupported and cannot boot Linux,
    goto err_invalid_arch (dead loop).
Else, no initialisation and keep the current EL before entering the
  kernel.

Signed-off-by: Jaxson Han <jaxson.han@arm.com>
---
 arch/aarch64/boot.S | 87 +++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 85 insertions(+), 2 deletions(-)

Comments

Andre Przywara May 24, 2021, 10:19 a.m. UTC | #1
On Fri, 21 May 2021 18:48:07 +0800
Jaxson Han <jaxson.han@arm.com> wrote:

Hi,

> The Armv8-R AArch64 profile does not support the EL3 exception level.
> The Armv8-R AArch64 profile allows for an (optional) VMSAv8-64 MMU
> at EL1, which allows to run off-the-shelf Linux. However EL2 only
> supports a PMSA, which is not supported by Linux, so we need to drop
> into EL1 before entering the kernel.
> 
> We add a new err_invalid_arch symbol as a dead loop. If we detect the
> current Armv8-R aarch64 only supports with PMSA, meaning we cannot boot
> Linux anymore, then we jump to err_invalid_arch.
> 
> During Armv8-R aarch64 init, to make sure nothing unexpected traps into
> EL2, we auto-detect and config FIEN and EnSCXT in HCR_EL2.
> 
> The boot sequence is:
> If CurrentEL == EL3, then goto EL3 initialisation and drop to lower EL
>   before entering the kernel.
> If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf (Armv8-R aarch64),
>   if id_aa64mmfr0_el1.MSA_frac == 0x2,
>     then goto Armv8-R AArch64 initialisation and drop to EL1 before
>     entering the kernel.
>   else, which means VMSA unsupported and cannot boot Linux,
>     goto err_invalid_arch (dead loop).
> Else, no initialisation and keep the current EL before entering the
>   kernel.

This looks good in general, thanks for the commit message and the
comments inline. Some smaller things below:

> 
> Signed-off-by: Jaxson Han <jaxson.han@arm.com>
> ---
>  arch/aarch64/boot.S | 87 +++++++++++++++++++++++++++++++++++++++++++--
>  1 file changed, 85 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S
> index 14fd9cf..0339e19 100644
> --- a/arch/aarch64/boot.S
> +++ b/arch/aarch64/boot.S
> @@ -25,16 +25,24 @@ _start:
>  	 * Boot sequence
>  	 * If CurrentEL == EL3, then goto EL3 initialisation and drop to
>  	 *   lower EL before entering the kernel.
> +	 * If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf, then
> +	 *   If id_aa64mmfr0_el1.MSA_frac == 0x2, then goto
> +	 *     Armv8-R AArch64 initialisation and drop to EL1 before
> +	 *     entering the kernel.
> +	 *   Else, which means VMSA unsupported and cannot boot Linux,
> +	 *     goto err_invalid_arch (dead loop).
>  	 * Else, no initialisation and keep the current EL before
>  	 *   entering the kernel.
>  	 */
>  	mrs	x0, CurrentEL
> -	cmp	x0, #CURRENTEL_EL3
> -	beq	el3_init
> +	cmp	x0, #CURRENTEL_EL2
> +	bgt	el3_init
> +	beq	el2_init
>  
>  	/*
>  	 * We stay in the current EL for entering the kernel
>  	 */
> +keep_el:
>  	mov	w0, #1
>  	ldr	x1, =flag_keep_el
>  	str	w0, [x1]
> @@ -127,6 +135,80 @@ el3_init:
>  	str	w0, [x1]
>  	b	el_max_init
>  
> +	/*
> +	 * EL2 Armv8-R AArch64 initialisation
> +	 */
> +el2_init:
> +	/* Detect Armv8-R AArch64 */
> +	mrs	x1, id_aa64mmfr0_el1
> +	/*
> +	 * Check MSA, bits [51:48]:
> +	 * 0xf means Armv8-R AArch64.
> +	 * If not 0xf, goto keep_el.

Can you please change this last line to:
"If not 0xf, proceed in ARMv8-A EL2."

> +	 */
> +	ubfx	x0, x1, #48, #4			// MSA
> +	cmp	x0, 0xf
> +	bne	keep_el
> +	/*
> +	 * Check MSA_frac, bits [55:52]:
> +	 * 0x2 means EL1&0 translation regime also supports VMSAv8-64.
> +	 */
> +	ubfx	x0, x1, #52, #4			// MSA_frac
> +	cmp	x0, 0x2
> +	/* If not 0x2, no VMSA, so cannot boot Linux and dead loop. */
> +	bne	err_invalid_arch

The architecture guarantees that those CPUID fields never lose features
when the value in a field increases. So there might be some 0x3 feature
addition in the future, which wouldn't be covered right now.
So can you please change this branch to "b.lt" instead?

> +
> +	mrs	x0, midr_el1
> +	msr	vpidr_el2, x0
> +
> +	mrs	x0, mpidr_el1
> +	msr	vmpidr_el2, x0
> +
> +	mov	x0, #(1 << 31)			// VTCR_MSA: VMSAv8-64 support
> +	msr	vtcr_el2, x0
> +
> +	/* Init HCR_EL2 */
> +	mov	x0, #(1 << 31)			// RES1

Please add to the comment that it's RES1 in ARMv8-R64 only.

> +
> +	mrs	x1, id_aa64pfr0_el1
> +	ubfx	x2, x1, #56, 4

Can you please add the CSV2 name of the field as a comment here?

> +	cmp	x2, 0x2
> +	bne	1f

To stay compatible with future additions to the CSV2 field, please use
b.lt here.

> +	/*
> +	 * Disable trap when accessing SCTXNUM_EL0 or SCTXNUM_EL1
> +	 * if FEAT_CSV2.
> +	 */
> +	orr	x0, x0, #(1 << 53)		// EnSCXT
> +
> +1:	ubfx	x2, x1, #28, 4
> +	cmp	x2, 0x2
> +	bne	1f

As above: b.lt.

Rest looks fine to me, compared the bits and values to the manuals.

Thanks,
Andre

> +	/* Disable trap when accessing ERXPFGCDN_EL1 if FEAT_RASv1p1. */
> +	orr	x0, x0, #(1 << 47)		// FIEN
> +
> +	/* Enable pointer authentication if present */
> +1:	mrs	x1, id_aa64isar1_el1
> +	/*
> +	 * If ID_AA64ISAR1_EL1.{GPI, GPA, API, APA} == {0000, 0000, 0000, 0000}
> +	 *   then HCR_EL2.APK and HCR_EL2.API are RES 0.
> +	 * Else
> +	 *   set HCR_EL2.APK and HCR_EL2.API.
> +	 */
> +	ldr	x2, =(((0xff) << 24) | (0xff << 4))
> +	and	x1, x1, x2
> +	cbz	x1, 1f
> +
> +	orr	x0, x0, #(1 << 40)		// APK
> +	orr	x0, x0, #(1 << 41)		// API
> +
> +1:	msr	hcr_el2, x0
> +	isb
> +
> +	mov	w0, #SPSR_KERNEL_EL1
> +	ldr	x1, =spsr_to_elx
> +	str	w0, [x1]
> +	// fall through
> +
>  el_max_init:
>  	ldr	x0, =CNTFRQ
>  	msr	cntfrq_el0, x0
> @@ -136,6 +218,7 @@ el_max_init:
>  	b	start_el_max
>  
>  err_invalid_id:
> +err_invalid_arch:
>  	b	.
>  
>  	/*
Jaxson Han May 25, 2021, 1:59 a.m. UTC | #2
Hi Andre,

> -----Original Message-----
> From: Andre Przywara <andre.przywara@arm.com>
> Sent: Monday, May 24, 2021 6:20 PM
> To: Jaxson Han <Jaxson.Han@arm.com>
> Cc: Mark Rutland <Mark.Rutland@arm.com>; linux-arm-
> kernel@lists.infradead.org; Wei Chen <Wei.Chen@arm.com>
> Subject: Re: [boot-wrapper PATCH v2 8/8] aarch64: Introduce EL2 boot code
> for Armv8-R AArch64
> 
> On Fri, 21 May 2021 18:48:07 +0800
> Jaxson Han <jaxson.han@arm.com> wrote:
> 
> Hi,
> 
> > The Armv8-R AArch64 profile does not support the EL3 exception level.
> > The Armv8-R AArch64 profile allows for an (optional) VMSAv8-64 MMU at
> > EL1, which allows to run off-the-shelf Linux. However EL2 only
> > supports a PMSA, which is not supported by Linux, so we need to drop
> > into EL1 before entering the kernel.
> >
> > We add a new err_invalid_arch symbol as a dead loop. If we detect the
> > current Armv8-R aarch64 only supports with PMSA, meaning we cannot
> > boot Linux anymore, then we jump to err_invalid_arch.
> >
> > During Armv8-R aarch64 init, to make sure nothing unexpected traps
> > into EL2, we auto-detect and config FIEN and EnSCXT in HCR_EL2.
> >
> > The boot sequence is:
> > If CurrentEL == EL3, then goto EL3 initialisation and drop to lower EL
> >   before entering the kernel.
> > If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf (Armv8-R aarch64),
> >   if id_aa64mmfr0_el1.MSA_frac == 0x2,
> >     then goto Armv8-R AArch64 initialisation and drop to EL1 before
> >     entering the kernel.
> >   else, which means VMSA unsupported and cannot boot Linux,
> >     goto err_invalid_arch (dead loop).
> > Else, no initialisation and keep the current EL before entering the
> >   kernel.
> 
> This looks good in general, thanks for the commit message and the
> comments inline. Some smaller things below:
> 
> >
> > Signed-off-by: Jaxson Han <jaxson.han@arm.com>
> > ---
> >  arch/aarch64/boot.S | 87
> > +++++++++++++++++++++++++++++++++++++++++++--
> >  1 file changed, 85 insertions(+), 2 deletions(-)
> >
> > diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S index
> > 14fd9cf..0339e19 100644
> > --- a/arch/aarch64/boot.S
> > +++ b/arch/aarch64/boot.S
> > @@ -25,16 +25,24 @@ _start:
> >  	 * Boot sequence
> >  	 * If CurrentEL == EL3, then goto EL3 initialisation and drop to
> >  	 *   lower EL before entering the kernel.
> > +	 * If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf, then
> > +	 *   If id_aa64mmfr0_el1.MSA_frac == 0x2, then goto
> > +	 *     Armv8-R AArch64 initialisation and drop to EL1 before
> > +	 *     entering the kernel.
> > +	 *   Else, which means VMSA unsupported and cannot boot Linux,
> > +	 *     goto err_invalid_arch (dead loop).
> >  	 * Else, no initialisation and keep the current EL before
> >  	 *   entering the kernel.
> >  	 */
> >  	mrs	x0, CurrentEL
> > -	cmp	x0, #CURRENTEL_EL3
> > -	beq	el3_init
> > +	cmp	x0, #CURRENTEL_EL2
> > +	bgt	el3_init
> > +	beq	el2_init
> >
> >  	/*
> >  	 * We stay in the current EL for entering the kernel
> >  	 */
> > +keep_el:
> >  	mov	w0, #1
> >  	ldr	x1, =flag_keep_el
> >  	str	w0, [x1]
> > @@ -127,6 +135,80 @@ el3_init:
> >  	str	w0, [x1]
> >  	b	el_max_init
> >
> > +	/*
> > +	 * EL2 Armv8-R AArch64 initialisation
> > +	 */
> > +el2_init:
> > +	/* Detect Armv8-R AArch64 */
> > +	mrs	x1, id_aa64mmfr0_el1
> > +	/*
> > +	 * Check MSA, bits [51:48]:
> > +	 * 0xf means Armv8-R AArch64.
> > +	 * If not 0xf, goto keep_el.
> 
> Can you please change this last line to:
> "If not 0xf, proceed in ARMv8-A EL2."
> 
> > +	 */
> > +	ubfx	x0, x1, #48, #4			// MSA
> > +	cmp	x0, 0xf
> > +	bne	keep_el
> > +	/*
> > +	 * Check MSA_frac, bits [55:52]:
> > +	 * 0x2 means EL1&0 translation regime also supports VMSAv8-64.
> > +	 */
> > +	ubfx	x0, x1, #52, #4			// MSA_frac
> > +	cmp	x0, 0x2
> > +	/* If not 0x2, no VMSA, so cannot boot Linux and dead loop. */
> > +	bne	err_invalid_arch
> 
> The architecture guarantees that those CPUID fields never lose features
> when the value in a field increases. So there might be some 0x3 feature
> addition in the future, which wouldn't be covered right now.
> So can you please change this branch to "b.lt" instead?

Yes, I will change it.

> 
> > +
> > +	mrs	x0, midr_el1
> > +	msr	vpidr_el2, x0
> > +
> > +	mrs	x0, mpidr_el1
> > +	msr	vmpidr_el2, x0
> > +
> > +	mov	x0, #(1 << 31)			// VTCR_MSA: VMSAv8-64
> support
> > +	msr	vtcr_el2, x0
> > +
> > +	/* Init HCR_EL2 */
> > +	mov	x0, #(1 << 31)			// RES1
> 
> Please add to the comment that it's RES1 in ARMv8-R64 only.

Yes, no problem.

> 
> > +
> > +	mrs	x1, id_aa64pfr0_el1
> > +	ubfx	x2, x1, #56, 4
> 
> Can you please add the CSV2 name of the field as a comment here?

Yes I will

> 
> > +	cmp	x2, 0x2
> > +	bne	1f
> 
> To stay compatible with future additions to the CSV2 field, please use b.lt
> here.

No problem

> 
> > +	/*
> > +	 * Disable trap when accessing SCTXNUM_EL0 or SCTXNUM_EL1
> > +	 * if FEAT_CSV2.
> > +	 */
> > +	orr	x0, x0, #(1 << 53)		// EnSCXT
> > +
> > +1:	ubfx	x2, x1, #28, 4
> > +	cmp	x2, 0x2
> > +	bne	1f
> 
> As above: b.lt.

Yes I will

Thanks,
Jaxson

> 
> Rest looks fine to me, compared the bits and values to the manuals.
> 
> Thanks,
> Andre
> 
> > +	/* Disable trap when accessing ERXPFGCDN_EL1 if FEAT_RASv1p1. */
> > +	orr	x0, x0, #(1 << 47)		// FIEN
> > +
> > +	/* Enable pointer authentication if present */
> > +1:	mrs	x1, id_aa64isar1_el1
> > +	/*
> > +	 * If ID_AA64ISAR1_EL1.{GPI, GPA, API, APA} == {0000, 0000, 0000,
> 0000}
> > +	 *   then HCR_EL2.APK and HCR_EL2.API are RES 0.
> > +	 * Else
> > +	 *   set HCR_EL2.APK and HCR_EL2.API.
> > +	 */
> > +	ldr	x2, =(((0xff) << 24) | (0xff << 4))
> > +	and	x1, x1, x2
> > +	cbz	x1, 1f
> > +
> > +	orr	x0, x0, #(1 << 40)		// APK
> > +	orr	x0, x0, #(1 << 41)		// API
> > +
> > +1:	msr	hcr_el2, x0
> > +	isb
> > +
> > +	mov	w0, #SPSR_KERNEL_EL1
> > +	ldr	x1, =spsr_to_elx
> > +	str	w0, [x1]
> > +	// fall through
> > +
> >  el_max_init:
> >  	ldr	x0, =CNTFRQ
> >  	msr	cntfrq_el0, x0
> > @@ -136,6 +218,7 @@ el_max_init:
> >  	b	start_el_max
> >
> >  err_invalid_id:
> > +err_invalid_arch:
> >  	b	.
> >
> >  	/*
diff mbox series

Patch

diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S
index 14fd9cf..0339e19 100644
--- a/arch/aarch64/boot.S
+++ b/arch/aarch64/boot.S
@@ -25,16 +25,24 @@  _start:
 	 * Boot sequence
 	 * If CurrentEL == EL3, then goto EL3 initialisation and drop to
 	 *   lower EL before entering the kernel.
+	 * If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf, then
+	 *   If id_aa64mmfr0_el1.MSA_frac == 0x2, then goto
+	 *     Armv8-R AArch64 initialisation and drop to EL1 before
+	 *     entering the kernel.
+	 *   Else, which means VMSA unsupported and cannot boot Linux,
+	 *     goto err_invalid_arch (dead loop).
 	 * Else, no initialisation and keep the current EL before
 	 *   entering the kernel.
 	 */
 	mrs	x0, CurrentEL
-	cmp	x0, #CURRENTEL_EL3
-	beq	el3_init
+	cmp	x0, #CURRENTEL_EL2
+	bgt	el3_init
+	beq	el2_init
 
 	/*
 	 * We stay in the current EL for entering the kernel
 	 */
+keep_el:
 	mov	w0, #1
 	ldr	x1, =flag_keep_el
 	str	w0, [x1]
@@ -127,6 +135,80 @@  el3_init:
 	str	w0, [x1]
 	b	el_max_init
 
+	/*
+	 * EL2 Armv8-R AArch64 initialisation
+	 */
+el2_init:
+	/* Detect Armv8-R AArch64 */
+	mrs	x1, id_aa64mmfr0_el1
+	/*
+	 * Check MSA, bits [51:48]:
+	 * 0xf means Armv8-R AArch64.
+	 * If not 0xf, goto keep_el.
+	 */
+	ubfx	x0, x1, #48, #4			// MSA
+	cmp	x0, 0xf
+	bne	keep_el
+	/*
+	 * Check MSA_frac, bits [55:52]:
+	 * 0x2 means EL1&0 translation regime also supports VMSAv8-64.
+	 */
+	ubfx	x0, x1, #52, #4			// MSA_frac
+	cmp	x0, 0x2
+	/* If not 0x2, no VMSA, so cannot boot Linux and dead loop. */
+	bne	err_invalid_arch
+
+	mrs	x0, midr_el1
+	msr	vpidr_el2, x0
+
+	mrs	x0, mpidr_el1
+	msr	vmpidr_el2, x0
+
+	mov	x0, #(1 << 31)			// VTCR_MSA: VMSAv8-64 support
+	msr	vtcr_el2, x0
+
+	/* Init HCR_EL2 */
+	mov	x0, #(1 << 31)			// RES1
+
+	mrs	x1, id_aa64pfr0_el1
+	ubfx	x2, x1, #56, 4
+	cmp	x2, 0x2
+	bne	1f
+	/*
+	 * Disable trap when accessing SCTXNUM_EL0 or SCTXNUM_EL1
+	 * if FEAT_CSV2.
+	 */
+	orr	x0, x0, #(1 << 53)		// EnSCXT
+
+1:	ubfx	x2, x1, #28, 4
+	cmp	x2, 0x2
+	bne	1f
+	/* Disable trap when accessing ERXPFGCDN_EL1 if FEAT_RASv1p1. */
+	orr	x0, x0, #(1 << 47)		// FIEN
+
+	/* Enable pointer authentication if present */
+1:	mrs	x1, id_aa64isar1_el1
+	/*
+	 * If ID_AA64ISAR1_EL1.{GPI, GPA, API, APA} == {0000, 0000, 0000, 0000}
+	 *   then HCR_EL2.APK and HCR_EL2.API are RES 0.
+	 * Else
+	 *   set HCR_EL2.APK and HCR_EL2.API.
+	 */
+	ldr	x2, =(((0xff) << 24) | (0xff << 4))
+	and	x1, x1, x2
+	cbz	x1, 1f
+
+	orr	x0, x0, #(1 << 40)		// APK
+	orr	x0, x0, #(1 << 41)		// API
+
+1:	msr	hcr_el2, x0
+	isb
+
+	mov	w0, #SPSR_KERNEL_EL1
+	ldr	x1, =spsr_to_elx
+	str	w0, [x1]
+	// fall through
+
 el_max_init:
 	ldr	x0, =CNTFRQ
 	msr	cntfrq_el0, x0
@@ -136,6 +218,7 @@  el_max_init:
 	b	start_el_max
 
 err_invalid_id:
+err_invalid_arch:
 	b	.
 
 	/*