Message ID | 20240606133628.3330423-6-luca.fancellu@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add Armv8-R AArch64 support | expand |
On Thu, 6 Jun 2024 14:36:26 +0100 Luca Fancellu <luca.fancellu@arm.com> wrote: Hi, > The Armv8-R AArch64 profile does not support the EL3 exception level. > The Armv8-R AArch64 profile allows for an (optional) VMSAv8-64 MMU > at EL1, which allows to run off-the-shelf Linux. However EL2 only > supports a PMSA, which is not supported by Linux, so we need to drop > into EL1 before entering the kernel. > > We add a new err_invalid_arch symbol as a dead loop. If we detect the > current Armv8-R aarch64 only supports with PMSA, meaning we cannot boot > Linux anymore, then we jump to err_invalid_arch. > > During Armv8-R aarch64 init, to make sure nothing unexpected traps into > EL2, we auto-detect and config FIEN and EnSCXT in HCR_EL2. > > The boot sequence is: > If CurrentEL == EL3, then goto EL3 initialisation and drop to lower EL > before entering the kernel. > If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf (Armv8-R aarch64), > if id_aa64mmfr0_el1.MSA_frac == 0x2, > then goto Armv8-R AArch64 initialisation and drop to EL1 before > entering the kernel. > else, which means VMSA unsupported and cannot boot Linux, > goto err_invalid_arch (dead loop). > Else, no initialisation and keep the current EL before entering the > kernel. > > Signed-off-by: Luca Fancellu <luca.fancellu@arm.com> > --- > arch/aarch64/boot.S | 63 ++++++++++++++++++++++++++++++++-- > arch/aarch64/include/asm/cpu.h | 10 ++++++ > arch/aarch64/init.c | 24 +++++++++++++ > 3 files changed, 94 insertions(+), 3 deletions(-) > > diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S > index 211077af17c8..b2b9863b8d6a 100644 > --- a/arch/aarch64/boot.S > +++ b/arch/aarch64/boot.S > @@ -22,7 +22,8 @@ > * EL2 must be implemented. > * > * - EL2 (Non-secure) > - * Entering at EL2 is partially supported. > + * Entering at EL2 is partially supported for Armv8-A. > + * Entering at EL2 is supported for Armv8-R. > * PSCI is not supported when entered in this exception level. > */ > ASM_FUNC(_start) > @@ -76,7 +77,50 @@ reset_at_el2: > msr sctlr_el2, x0 > isb > > - b reset_no_el3 > + /* Detect Armv8-R AArch64 */ > + mrs x1, id_aa64mmfr0_el1 > + /* > + * Check MSA, bits [51:48]: > + * 0xf means Armv8-R AArch64. > + * If not 0xf, proceed in Armv8-A EL2. > + */ > + ubfx x0, x1, #48, #4 // MSA > + cmp x0, 0xf > + bne reset_no_el3 > + > + /* > + * Armv8-R AArch64 is found, check if Linux can be booted. > + * Check MSA_frac, bits [55:52]: > + * 0x2 means EL1&0 translation regime also supports VMSAv8-64. > + */ > + ubfx x0, x1, #52, #4 // MSA_frac > + cmp x0, 0x2 > + /* > + * If not 0x2, no VMSA, so cannot boot Linux and dead loop. > + * Also, since the architecture guarantees that those CPUID > + * fields never lose features when the value in a field > + * increases, we use blt to cover it. > + */ > + blt err_invalid_arch For the records: this part above is correct, compared against the Armv8-R AArch64 ARM ARM supplement (DDI 0600A.c) > + > + /* Start Armv8-R Linux at EL1 */ > + mov w0, #SPSR_KERNEL_EL1 > + ldr x1, =spsr_to_elx > + str w0, [x1] > + > + cpuid x0, x1 > + bl find_logical_id > + cmp x0, #MPIDR_INVALID > + b.eq err_invalid_id > + bl setup_stack > + > + bl cpu_init_bootwrapper > + > + bl cpu_init_armv8r_el2 > + > + bl gic_secure_init Do we actually need this? v8-R64 is always secure, so there is nothing special that we'd need to do here? Have you tried dropping this? Also this sequence looks like to have quite some overlap with the existing code. Can't we rewrite it like this: .... blt err_invalid_arch (set SPSR_KERNEL_EL1) bl cpu_init_armv8r_el2 b reset_no_el3 and just find a solution for flag_keep_el? > + > + b start_bootmethod > > /* > * EL1 initialization > @@ -104,6 +148,7 @@ reset_no_el3: > b start_bootmethod > > err_invalid_id: > +err_invalid_arch: > b . > > /* > @@ -121,10 +166,14 @@ ASM_FUNC(jump_kernel) > ldr x0, =SCTLR_EL1_KERNEL > msr sctlr_el1, x0 > > + mrs x5, CurrentEL > + cmp x5, #CURRENTEL_EL2 > + b.eq 1f > + > ldr x0, =SCTLR_EL2_KERNEL > msr sctlr_el2, x0 > > - cpuid x0, x1 > +1: cpuid x0, x1 > bl find_logical_id > bl setup_stack // Reset stack pointer > > @@ -147,10 +196,18 @@ ASM_FUNC(jump_kernel) > */ > bfi x4, x19, #5, #1 > > + mrs x5, CurrentEL > + cmp x5, #CURRENTEL_EL2 > + b.eq 1f > + > msr elr_el3, x19 > msr spsr_el3, x4 > eret > > +1: msr elr_el2, x19 > + msr spsr_el2, x4 > + eret > + > .ltorg > > .data > diff --git a/arch/aarch64/include/asm/cpu.h b/arch/aarch64/include/asm/cpu.h > index 846b89f8405d..6b2f5fbe4502 100644 > --- a/arch/aarch64/include/asm/cpu.h > +++ b/arch/aarch64/include/asm/cpu.h > @@ -58,7 +58,13 @@ > #define SCR_EL3_TCR2EN BIT(43) > #define SCR_EL3_PIEN BIT(45) > > +#define VTCR_EL2_MSA BIT(31) > + > #define HCR_EL2_RES1 BIT(1) > +#define HCR_EL2_APK_NOTRAP BIT(40) > +#define HCR_EL2_API_NOTRAP BIT(41) > +#define HCR_EL2_FIEN_NOTRAP BIT(47) > +#define HCR_EL2_ENSCXT_NOTRAP BIT(53) That looks misaligned (checked in the file), you'd need two tabs. > #define ID_AA64DFR0_EL1_PMSVER BITS(35, 32) > #define ID_AA64DFR0_EL1_TRACEBUFFER BITS(47, 44) > @@ -88,7 +94,9 @@ > > #define ID_AA64PFR1_EL1_MTE BITS(11, 8) > #define ID_AA64PFR1_EL1_SME BITS(27, 24) > +#define ID_AA64PFR0_EL1_RAS BITS(31, 28) > #define ID_AA64PFR0_EL1_SVE BITS(35, 32) > +#define ID_AA64PFR0_EL1_CSV2 BITS(59, 56) Same here, two tabs. The bits and registers are correct, though. > #define ID_AA64SMFR0_EL1 s3_0_c0_c4_5 > #define ID_AA64SMFR0_EL1_FA64 BIT(63) > @@ -114,6 +122,7 @@ > #define SPSR_I (1 << 7) /* IRQ masked */ > #define SPSR_F (1 << 6) /* FIQ masked */ > #define SPSR_T (1 << 5) /* Thumb */ > +#define SPSR_EL1H (5 << 0) /* EL1 Handler mode */ > #define SPSR_EL2H (9 << 0) /* EL2 Handler mode */ > #define SPSR_HYP (0x1a << 0) /* M[3:0] = hyp, M[4] = AArch32 */ > > @@ -153,6 +162,7 @@ > #else > #define SCTLR_EL1_KERNEL SCTLR_EL1_RES1 > #define SPSR_KERNEL (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL2H) > +#define SPSR_KERNEL_EL1 (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL1H) (that one is fine, btw, it just looks off in the diff) > #endif > > #ifndef __ASSEMBLY__ > diff --git a/arch/aarch64/init.c b/arch/aarch64/init.c > index 37cb45fde446..8006f2705193 100644 > --- a/arch/aarch64/init.c > +++ b/arch/aarch64/init.c > @@ -145,6 +145,30 @@ void cpu_init_el3(void) > msr(CNTFRQ_EL0, COUNTER_FREQ); > } > > +void cpu_init_armv8r_el2(void) > +{ > + unsigned long hcr = mrs(hcr_el2); > + > + msr(vpidr_el2, mrs(midr_el1)); > + msr(vmpidr_el2, mrs(mpidr_el1)); > + > + /* VTCR_MSA: VMSAv8-64 support */ > + msr(vtcr_el2, VTCR_EL2_MSA); > + > + if (mrs_field(ID_AA64PFR0_EL1, CSV2) <= 2) What is the significance of "<= 2"? Shall this read ">= 2"? > + hcr |= HCR_EL2_ENSCXT_NOTRAP; > + > + if (mrs_field(ID_AA64PFR0_EL1, RAS) <= 2) Same here, FEAT_RASv1p1 is 0b0010, so it should be ">= 2"? Cheers, Andre > + hcr |= HCR_EL2_FIEN_NOTRAP; > + > + if (cpu_has_pauth()) > + hcr |= HCR_EL2_APK_NOTRAP | HCR_EL2_API_NOTRAP; > + > + msr(hcr_el2, hcr); > + isb(); > + msr(CNTFRQ_EL0, COUNTER_FREQ); > +} > + > #ifdef PSCI > extern char psci_vectors[]; >
Hi Andre, > >> + >> + /* Start Armv8-R Linux at EL1 */ >> + mov w0, #SPSR_KERNEL_EL1 >> + ldr x1, =spsr_to_elx >> + str w0, [x1] >> + >> + cpuid x0, x1 >> + bl find_logical_id >> + cmp x0, #MPIDR_INVALID >> + b.eq err_invalid_id >> + bl setup_stack >> + >> + bl cpu_init_bootwrapper >> + >> + bl cpu_init_armv8r_el2 >> + >> + bl gic_secure_init > > Do we actually need this? v8-R64 is always secure, so there is nothing > special that we'd need to do here? Have you tried dropping this? > > Also this sequence looks like to have quite some overlap with the existing > code. > Can't we rewrite it like this: > > .... > blt err_invalid_arch > (set SPSR_KERNEL_EL1) > > bl cpu_init_armv8r_el2 > > b reset_no_el3 > > and just find a solution for flag_keep_el? What do you think about the below changes to be applied to this patch? diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S index b2b9863b8d6a..2a8234f7a17d 100644 --- a/arch/aarch64/boot.S +++ b/arch/aarch64/boot.S @@ -108,19 +108,9 @@ reset_at_el2: ldr x1, =spsr_to_elx str w0, [x1] - cpuid x0, x1 - bl find_logical_id - cmp x0, #MPIDR_INVALID - b.eq err_invalid_id - bl setup_stack - - bl cpu_init_bootwrapper - bl cpu_init_armv8r_el2 - bl gic_secure_init - - b start_bootmethod + b reset_no_el3 /* * EL1 initialization @@ -139,10 +129,16 @@ reset_no_el3: b.eq err_invalid_id bl setup_stack + ldr w1, spsr_to_elx + and w0, w1, 0xf + cmp w0, #SPSR_EL1H + b.eq drop_el + mov w0, #1 ldr x1, =flag_keep_el str w0, [x1] +drop_el: bl cpu_init_bootwrapper b start_bootmethod > >> + >> + b start_bootmethod >> >> /* >> * EL1 initialization >> @@ -104,6 +148,7 @@ reset_no_el3: >> b start_bootmethod >> >> err_invalid_id: >> +err_invalid_arch: >> b . >> >> /* >> @@ -121,10 +166,14 @@ ASM_FUNC(jump_kernel) >> ldr x0, =SCTLR_EL1_KERNEL >> msr sctlr_el1, x0 >> >> + mrs x5, CurrentEL >> + cmp x5, #CURRENTEL_EL2 >> + b.eq 1f >> + >> ldr x0, =SCTLR_EL2_KERNEL >> msr sctlr_el2, x0 >> >> - cpuid x0, x1 >> +1: cpuid x0, x1 >> bl find_logical_id >> bl setup_stack // Reset stack pointer >> >> @@ -147,10 +196,18 @@ ASM_FUNC(jump_kernel) >> */ >> bfi x4, x19, #5, #1 >> >> + mrs x5, CurrentEL >> + cmp x5, #CURRENTEL_EL2 >> + b.eq 1f >> + >> msr elr_el3, x19 >> msr spsr_el3, x4 >> eret >> >> +1: msr elr_el2, x19 >> + msr spsr_el2, x4 >> + eret >> + >> .ltorg >> >> .data >> diff --git a/arch/aarch64/include/asm/cpu.h b/arch/aarch64/include/asm/cpu.h >> index 846b89f8405d..6b2f5fbe4502 100644 >> --- a/arch/aarch64/include/asm/cpu.h >> +++ b/arch/aarch64/include/asm/cpu.h >> @@ -58,7 +58,13 @@ >> #define SCR_EL3_TCR2EN BIT(43) >> #define SCR_EL3_PIEN BIT(45) >> >> +#define VTCR_EL2_MSA BIT(31) >> + >> #define HCR_EL2_RES1 BIT(1) >> +#define HCR_EL2_APK_NOTRAP BIT(40) >> +#define HCR_EL2_API_NOTRAP BIT(41) >> +#define HCR_EL2_FIEN_NOTRAP BIT(47) >> +#define HCR_EL2_ENSCXT_NOTRAP BIT(53) > > That looks misaligned (checked in the file), you'd need two tabs. > >> #define ID_AA64DFR0_EL1_PMSVER BITS(35, 32) >> #define ID_AA64DFR0_EL1_TRACEBUFFER BITS(47, 44) >> @@ -88,7 +94,9 @@ >> >> #define ID_AA64PFR1_EL1_MTE BITS(11, 8) >> #define ID_AA64PFR1_EL1_SME BITS(27, 24) >> +#define ID_AA64PFR0_EL1_RAS BITS(31, 28) >> #define ID_AA64PFR0_EL1_SVE BITS(35, 32) >> +#define ID_AA64PFR0_EL1_CSV2 BITS(59, 56) > > Same here, two tabs. > > The bits and registers are correct, though. > >> #define ID_AA64SMFR0_EL1 s3_0_c0_c4_5 >> #define ID_AA64SMFR0_EL1_FA64 BIT(63) >> @@ -114,6 +122,7 @@ >> #define SPSR_I (1 << 7) /* IRQ masked */ >> #define SPSR_F (1 << 6) /* FIQ masked */ >> #define SPSR_T (1 << 5) /* Thumb */ >> +#define SPSR_EL1H (5 << 0) /* EL1 Handler mode */ >> #define SPSR_EL2H (9 << 0) /* EL2 Handler mode */ >> #define SPSR_HYP (0x1a << 0) /* M[3:0] = hyp, M[4] = AArch32 */ >> >> @@ -153,6 +162,7 @@ >> #else >> #define SCTLR_EL1_KERNEL SCTLR_EL1_RES1 >> #define SPSR_KERNEL (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL2H) >> +#define SPSR_KERNEL_EL1 (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL1H) > > (that one is fine, btw, it just looks off in the diff) Thanks, I’ll fix all the alignments, probably some misconfiguration of my editor. > >> #endif >> >> #ifndef __ASSEMBLY__ >> diff --git a/arch/aarch64/init.c b/arch/aarch64/init.c >> index 37cb45fde446..8006f2705193 100644 >> --- a/arch/aarch64/init.c >> +++ b/arch/aarch64/init.c >> @@ -145,6 +145,30 @@ void cpu_init_el3(void) >> msr(CNTFRQ_EL0, COUNTER_FREQ); >> } >> >> +void cpu_init_armv8r_el2(void) >> +{ >> + unsigned long hcr = mrs(hcr_el2); >> + >> + msr(vpidr_el2, mrs(midr_el1)); >> + msr(vmpidr_el2, mrs(mpidr_el1)); >> + >> + /* VTCR_MSA: VMSAv8-64 support */ >> + msr(vtcr_el2, VTCR_EL2_MSA); >> + >> + if (mrs_field(ID_AA64PFR0_EL1, CSV2) <= 2) > > What is the significance of "<= 2"? Shall this read ">= 2"? I think you are right, also, reading again the manual we have the HCR_EL2.EnSCXT bit implemented only when FEAT_CSV2_2 is implemented or FEAT_CSV2_1p2 is implemented, otherwise it’s res0, so I think I should check: 1) FEAT_CSV2_2 which is implemented if ID_AA64PFR0_EL1.CSV2 is 0010 (2) 2) FEAT_CSV2_1p2 which is implemented if ID_AA64PFR1_EL1.CSV2_frac is 0010 (2) Does it sounds ok for you? > >> + hcr |= HCR_EL2_ENSCXT_NOTRAP; >> + >> + if (mrs_field(ID_AA64PFR0_EL1, RAS) <= 2) > > Same here, FEAT_RASv1p1 is 0b0010, so it should be ">= 2"? I’ll fix it. Cheers, Luca
Hi Andre, > On 24 Jun 2024, at 13:22, Luca Fancellu <Luca.Fancellu@arm.com> wrote: > > Hi Andre, > >> >>> + >>> + /* Start Armv8-R Linux at EL1 */ >>> + mov w0, #SPSR_KERNEL_EL1 >>> + ldr x1, =spsr_to_elx >>> + str w0, [x1] >>> + >>> + cpuid x0, x1 >>> + bl find_logical_id >>> + cmp x0, #MPIDR_INVALID >>> + b.eq err_invalid_id >>> + bl setup_stack >>> + >>> + bl cpu_init_bootwrapper >>> + >>> + bl cpu_init_armv8r_el2 >>> + >>> + bl gic_secure_init >> >> Do we actually need this? v8-R64 is always secure, so there is nothing >> special that we'd need to do here? Have you tried dropping this? >> >> Also this sequence looks like to have quite some overlap with the existing >> code. >> Can't we rewrite it like this: >> >> .... >> blt err_invalid_arch >> (set SPSR_KERNEL_EL1) >> >> bl cpu_init_armv8r_el2 >> >> b reset_no_el3 >> >> and just find a solution for flag_keep_el? > > What do you think about the below changes to be applied to this patch? > > diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S > index b2b9863b8d6a..2a8234f7a17d 100644 > --- a/arch/aarch64/boot.S > +++ b/arch/aarch64/boot.S > @@ -108,19 +108,9 @@ reset_at_el2: > ldr x1, =spsr_to_elx > str w0, [x1] > > - cpuid x0, x1 > - bl find_logical_id > - cmp x0, #MPIDR_INVALID > - b.eq err_invalid_id > - bl setup_stack > - > - bl cpu_init_bootwrapper > - > bl cpu_init_armv8r_el2 > > - bl gic_secure_init > - > - b start_bootmethod > + b reset_no_el3 > > /* > * EL1 initialization > @@ -139,10 +129,16 @@ reset_no_el3: > b.eq err_invalid_id > bl setup_stack > > + ldr w1, spsr_to_elx > + and w0, w1, 0xf > + cmp w0, #SPSR_EL1H > + b.eq drop_el > + > mov w0, #1 > ldr x1, =flag_keep_el > str w0, [x1] > > +drop_el: > bl cpu_init_bootwrapper > > b start_bootmethod > > Gentle ping on this... >> >>> #endif >>> >>> #ifndef __ASSEMBLY__ >>> diff --git a/arch/aarch64/init.c b/arch/aarch64/init.c >>> index 37cb45fde446..8006f2705193 100644 >>> --- a/arch/aarch64/init.c >>> +++ b/arch/aarch64/init.c >>> @@ -145,6 +145,30 @@ void cpu_init_el3(void) >>> msr(CNTFRQ_EL0, COUNTER_FREQ); >>> } >>> >>> +void cpu_init_armv8r_el2(void) >>> +{ >>> + unsigned long hcr = mrs(hcr_el2); >>> + >>> + msr(vpidr_el2, mrs(midr_el1)); >>> + msr(vmpidr_el2, mrs(mpidr_el1)); >>> + >>> + /* VTCR_MSA: VMSAv8-64 support */ >>> + msr(vtcr_el2, VTCR_EL2_MSA); >>> + >>> + if (mrs_field(ID_AA64PFR0_EL1, CSV2) <= 2) >> >> What is the significance of "<= 2"? Shall this read ">= 2"? > > I think you are right, also, reading again the manual we have the HCR_EL2.EnSCXT bit implemented only when > FEAT_CSV2_2 is implemented or FEAT_CSV2_1p2 is implemented, otherwise it’s res0, so I think I should check: > > 1) FEAT_CSV2_2 which is implemented if ID_AA64PFR0_EL1.CSV2 is 0010 (2) > 2) FEAT_CSV2_1p2 which is implemented if ID_AA64PFR1_EL1.CSV2_frac is 0010 (2) > > Does it sounds ok for you? And this. Cheers, Luca
On Mon, 24 Jun 2024 13:22:38 +0100 Luca Fancellu <Luca.Fancellu@arm.com> wrote: Hi Luca, > Hi Andre, > > > > >> + > >> + /* Start Armv8-R Linux at EL1 */ > >> + mov w0, #SPSR_KERNEL_EL1 > >> + ldr x1, =spsr_to_elx > >> + str w0, [x1] > >> + > >> + cpuid x0, x1 > >> + bl find_logical_id > >> + cmp x0, #MPIDR_INVALID > >> + b.eq err_invalid_id > >> + bl setup_stack > >> + > >> + bl cpu_init_bootwrapper > >> + > >> + bl cpu_init_armv8r_el2 > >> + > >> + bl gic_secure_init > > > > Do we actually need this? v8-R64 is always secure, so there is nothing > > special that we'd need to do here? Have you tried dropping this? > > > > Also this sequence looks like to have quite some overlap with the existing > > code. > > Can't we rewrite it like this: > > > > .... > > blt err_invalid_arch > > (set SPSR_KERNEL_EL1) > > > > bl cpu_init_armv8r_el2 > > > > b reset_no_el3 > > > > and just find a solution for flag_keep_el? > > What do you think about the below changes to be applied to this patch? A patch to a patch is really hard to read, please send a new version with your proposal. > diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S > index b2b9863b8d6a..2a8234f7a17d 100644 > --- a/arch/aarch64/boot.S > +++ b/arch/aarch64/boot.S > @@ -108,19 +108,9 @@ reset_at_el2: > ldr x1, =spsr_to_elx > str w0, [x1] > > - cpuid x0, x1 > - bl find_logical_id > - cmp x0, #MPIDR_INVALID > - b.eq err_invalid_id > - bl setup_stack > - > - bl cpu_init_bootwrapper > - > bl cpu_init_armv8r_el2 > > - bl gic_secure_init > - > - b start_bootmethod > + b reset_no_el3 > > /* > * EL1 initialization > @@ -139,10 +129,16 @@ reset_no_el3: > b.eq err_invalid_id > bl setup_stack > > + ldr w1, spsr_to_elx > + and w0, w1, 0xf > + cmp w0, #SPSR_EL1H > + b.eq drop_el > + > mov w0, #1 > ldr x1, =flag_keep_el > str w0, [x1] > > +drop_el: > bl cpu_init_bootwrapper > > b start_bootmethod > > > > > >> + > >> + b start_bootmethod > >> > >> /* > >> * EL1 initialization > >> @@ -104,6 +148,7 @@ reset_no_el3: > >> b start_bootmethod > >> > >> err_invalid_id: > >> +err_invalid_arch: > >> b . > >> > >> /* > >> @@ -121,10 +166,14 @@ ASM_FUNC(jump_kernel) > >> ldr x0, =SCTLR_EL1_KERNEL > >> msr sctlr_el1, x0 > >> > >> + mrs x5, CurrentEL > >> + cmp x5, #CURRENTEL_EL2 > >> + b.eq 1f > >> + > >> ldr x0, =SCTLR_EL2_KERNEL > >> msr sctlr_el2, x0 > >> > >> - cpuid x0, x1 > >> +1: cpuid x0, x1 > >> bl find_logical_id > >> bl setup_stack // Reset stack pointer > >> > >> @@ -147,10 +196,18 @@ ASM_FUNC(jump_kernel) > >> */ > >> bfi x4, x19, #5, #1 > >> > >> + mrs x5, CurrentEL > >> + cmp x5, #CURRENTEL_EL2 > >> + b.eq 1f > >> + > >> msr elr_el3, x19 > >> msr spsr_el3, x4 > >> eret > >> > >> +1: msr elr_el2, x19 > >> + msr spsr_el2, x4 > >> + eret > >> + > >> .ltorg > >> > >> .data > >> diff --git a/arch/aarch64/include/asm/cpu.h b/arch/aarch64/include/asm/cpu.h > >> index 846b89f8405d..6b2f5fbe4502 100644 > >> --- a/arch/aarch64/include/asm/cpu.h > >> +++ b/arch/aarch64/include/asm/cpu.h > >> @@ -58,7 +58,13 @@ > >> #define SCR_EL3_TCR2EN BIT(43) > >> #define SCR_EL3_PIEN BIT(45) > >> > >> +#define VTCR_EL2_MSA BIT(31) > >> + > >> #define HCR_EL2_RES1 BIT(1) > >> +#define HCR_EL2_APK_NOTRAP BIT(40) > >> +#define HCR_EL2_API_NOTRAP BIT(41) > >> +#define HCR_EL2_FIEN_NOTRAP BIT(47) > >> +#define HCR_EL2_ENSCXT_NOTRAP BIT(53) > > > > That looks misaligned (checked in the file), you'd need two tabs. > > > >> #define ID_AA64DFR0_EL1_PMSVER BITS(35, 32) > >> #define ID_AA64DFR0_EL1_TRACEBUFFER BITS(47, 44) > >> @@ -88,7 +94,9 @@ > >> > >> #define ID_AA64PFR1_EL1_MTE BITS(11, 8) > >> #define ID_AA64PFR1_EL1_SME BITS(27, 24) > >> +#define ID_AA64PFR0_EL1_RAS BITS(31, 28) > >> #define ID_AA64PFR0_EL1_SVE BITS(35, 32) > >> +#define ID_AA64PFR0_EL1_CSV2 BITS(59, 56) > > > > Same here, two tabs. > > > > The bits and registers are correct, though. > > > >> #define ID_AA64SMFR0_EL1 s3_0_c0_c4_5 > >> #define ID_AA64SMFR0_EL1_FA64 BIT(63) > >> @@ -114,6 +122,7 @@ > >> #define SPSR_I (1 << 7) /* IRQ masked */ > >> #define SPSR_F (1 << 6) /* FIQ masked */ > >> #define SPSR_T (1 << 5) /* Thumb */ > >> +#define SPSR_EL1H (5 << 0) /* EL1 Handler mode */ > >> #define SPSR_EL2H (9 << 0) /* EL2 Handler mode */ > >> #define SPSR_HYP (0x1a << 0) /* M[3:0] = hyp, M[4] = AArch32 */ > >> > >> @@ -153,6 +162,7 @@ > >> #else > >> #define SCTLR_EL1_KERNEL SCTLR_EL1_RES1 > >> #define SPSR_KERNEL (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL2H) > >> +#define SPSR_KERNEL_EL1 (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL1H) > > > > (that one is fine, btw, it just looks off in the diff) > > Thanks, I’ll fix all the alignments, probably some misconfiguration of my editor. > > > > >> #endif > >> > >> #ifndef __ASSEMBLY__ > >> diff --git a/arch/aarch64/init.c b/arch/aarch64/init.c > >> index 37cb45fde446..8006f2705193 100644 > >> --- a/arch/aarch64/init.c > >> +++ b/arch/aarch64/init.c > >> @@ -145,6 +145,30 @@ void cpu_init_el3(void) > >> msr(CNTFRQ_EL0, COUNTER_FREQ); > >> } > >> > >> +void cpu_init_armv8r_el2(void) > >> +{ > >> + unsigned long hcr = mrs(hcr_el2); > >> + > >> + msr(vpidr_el2, mrs(midr_el1)); > >> + msr(vmpidr_el2, mrs(mpidr_el1)); > >> + > >> + /* VTCR_MSA: VMSAv8-64 support */ > >> + msr(vtcr_el2, VTCR_EL2_MSA); > >> + > >> + if (mrs_field(ID_AA64PFR0_EL1, CSV2) <= 2) > > > > What is the significance of "<= 2"? Shall this read ">= 2"? > > I think you are right, also, reading again the manual we have the HCR_EL2.EnSCXT bit implemented only when > FEAT_CSV2_2 is implemented or FEAT_CSV2_1p2 is implemented, otherwise it’s res0, so I think I should check: > > 1) FEAT_CSV2_2 which is implemented if ID_AA64PFR0_EL1.CSV2 is 0010 (2) ... or larger than 2, since CSV2_3 includes all functionality of CSV2_2. > 2) FEAT_CSV2_1p2 which is implemented if ID_AA64PFR1_EL1.CSV2_frac is 0010 (2) ... and ID_AA64PFR0_EL1.CSV2 is 0b0001. Also I think CSV2_frac >= 0b0010. > Does it sounds ok for you? Before this gets too complicated, please check two things: 1) HCR_EL2.EnSCXT is RES0 otherwise, which means we can always write this unconditionally, without adverse effects? 2) The Armv8-R64 supplement defines minimum values for PFR0.CSV2, maybe this simplifies some checks? Cheers, Andre > >> + hcr |= HCR_EL2_ENSCXT_NOTRAP; > >> + > >> + if (mrs_field(ID_AA64PFR0_EL1, RAS) <= 2) > > > > Same here, FEAT_RASv1p1 is 0b0010, so it should be ">= 2"? > > I’ll fix it. > > Cheers, > Luca > >
Hi Andre, > > A patch to a patch is really hard to read, please send a new version with > your proposal. Sure I will. >>> >> >> I think you are right, also, reading again the manual we have the HCR_EL2.EnSCXT bit implemented only when >> FEAT_CSV2_2 is implemented or FEAT_CSV2_1p2 is implemented, otherwise it’s res0, so I think I should check: >> >> 1) FEAT_CSV2_2 which is implemented if ID_AA64PFR0_EL1.CSV2 is 0010 (2) > > ... or larger than 2, since CSV2_3 includes all functionality of CSV2_2. > >> 2) FEAT_CSV2_1p2 which is implemented if ID_AA64PFR1_EL1.CSV2_frac is 0010 (2) > > ... and ID_AA64PFR0_EL1.CSV2 is 0b0001. Also I think CSV2_frac >= 0b0010. > >> Does it sounds ok for you? > > Before this gets too complicated, please check two things: > 1) HCR_EL2.EnSCXT is RES0 otherwise, which means we can always write this > unconditionally, without adverse effects? Yes, HCR_EL2.EnSCXT is RES0 otherwise, I guess that if we write 1 when it is not implemented, it is ignored? And for the cases where it is implemented, then 1 would be the right value. Would it be the right approach? I’ve always avoided to write on reserved fields. > 2) The Armv8-R64 supplement defines minimum values for PFR0.CSV2, maybe > this simplifies some checks? I checked but it doesn’t simplifies, so I think I should do: if ((mrs_field(ID_AA64PFR0_EL1, CSV2) >= 2) || ((mrs_field(ID_AA64PFR0_EL1, CSV2) >= 1) && (mrs_field(ID_AA64PFR1_EL1, CSV2_frac) >= 2))) hcr |= HCR_EL2_ENSCXT_NOTRAP; Please let me know your preference. Cheers, Luca > > Cheers, > Andre
On Tue, 16 Jul 2024 14:33:08 +0100 Luca Fancellu <Luca.Fancellu@arm.com> wrote: Hi Luca, > > A patch to a patch is really hard to read, please send a new version with > > your proposal. > > Sure I will. > > >>> > >> > >> I think you are right, also, reading again the manual we have the HCR_EL2.EnSCXT bit implemented only when > >> FEAT_CSV2_2 is implemented or FEAT_CSV2_1p2 is implemented, otherwise it’s res0, so I think I should check: > >> > >> 1) FEAT_CSV2_2 which is implemented if ID_AA64PFR0_EL1.CSV2 is 0010 (2) > > > > ... or larger than 2, since CSV2_3 includes all functionality of CSV2_2. > > > >> 2) FEAT_CSV2_1p2 which is implemented if ID_AA64PFR1_EL1.CSV2_frac is 0010 (2) > > > > ... and ID_AA64PFR0_EL1.CSV2 is 0b0001. Also I think CSV2_frac >= 0b0010. > > > >> Does it sounds ok for you? > > > > Before this gets too complicated, please check two things: > > 1) HCR_EL2.EnSCXT is RES0 otherwise, which means we can always write this > > unconditionally, without adverse effects? > > Yes, HCR_EL2.EnSCXT is RES0 otherwise, I guess that if we write 1 when it is not implemented, it is ignored? And > for the cases where it is implemented, then 1 would be the right value. > Would it be the right approach? I’ve always avoided to write on reserved fields. Yeah, I agree it leaves some bitter taste, though the definition of RES0 in the glossary seems to suggest it's safe to do. In this particular case, which is confined to v8-R64, which implements at least some kind of CSV2, I personally tend to set HCR_EL2.EnSCXT unconditionally, accompanied by a comment why it is safe to do so. Cheers, Andre > > 2) The Armv8-R64 supplement defines minimum values for PFR0.CSV2, maybe > > this simplifies some checks? > > I checked but it doesn’t simplifies, so I think I should do: > > if ((mrs_field(ID_AA64PFR0_EL1, CSV2) >= 2) || > ((mrs_field(ID_AA64PFR0_EL1, CSV2) >= 1) && (mrs_field(ID_AA64PFR1_EL1, CSV2_frac) >= 2))) > hcr |= HCR_EL2_ENSCXT_NOTRAP; > > Please let me know your preference. > > Cheers, > Luca > > > > > Cheers, > > Andre >
diff --git a/arch/aarch64/boot.S b/arch/aarch64/boot.S index 211077af17c8..b2b9863b8d6a 100644 --- a/arch/aarch64/boot.S +++ b/arch/aarch64/boot.S @@ -22,7 +22,8 @@ * EL2 must be implemented. * * - EL2 (Non-secure) - * Entering at EL2 is partially supported. + * Entering at EL2 is partially supported for Armv8-A. + * Entering at EL2 is supported for Armv8-R. * PSCI is not supported when entered in this exception level. */ ASM_FUNC(_start) @@ -76,7 +77,50 @@ reset_at_el2: msr sctlr_el2, x0 isb - b reset_no_el3 + /* Detect Armv8-R AArch64 */ + mrs x1, id_aa64mmfr0_el1 + /* + * Check MSA, bits [51:48]: + * 0xf means Armv8-R AArch64. + * If not 0xf, proceed in Armv8-A EL2. + */ + ubfx x0, x1, #48, #4 // MSA + cmp x0, 0xf + bne reset_no_el3 + + /* + * Armv8-R AArch64 is found, check if Linux can be booted. + * Check MSA_frac, bits [55:52]: + * 0x2 means EL1&0 translation regime also supports VMSAv8-64. + */ + ubfx x0, x1, #52, #4 // MSA_frac + cmp x0, 0x2 + /* + * If not 0x2, no VMSA, so cannot boot Linux and dead loop. + * Also, since the architecture guarantees that those CPUID + * fields never lose features when the value in a field + * increases, we use blt to cover it. + */ + blt err_invalid_arch + + /* Start Armv8-R Linux at EL1 */ + mov w0, #SPSR_KERNEL_EL1 + ldr x1, =spsr_to_elx + str w0, [x1] + + cpuid x0, x1 + bl find_logical_id + cmp x0, #MPIDR_INVALID + b.eq err_invalid_id + bl setup_stack + + bl cpu_init_bootwrapper + + bl cpu_init_armv8r_el2 + + bl gic_secure_init + + b start_bootmethod /* * EL1 initialization @@ -104,6 +148,7 @@ reset_no_el3: b start_bootmethod err_invalid_id: +err_invalid_arch: b . /* @@ -121,10 +166,14 @@ ASM_FUNC(jump_kernel) ldr x0, =SCTLR_EL1_KERNEL msr sctlr_el1, x0 + mrs x5, CurrentEL + cmp x5, #CURRENTEL_EL2 + b.eq 1f + ldr x0, =SCTLR_EL2_KERNEL msr sctlr_el2, x0 - cpuid x0, x1 +1: cpuid x0, x1 bl find_logical_id bl setup_stack // Reset stack pointer @@ -147,10 +196,18 @@ ASM_FUNC(jump_kernel) */ bfi x4, x19, #5, #1 + mrs x5, CurrentEL + cmp x5, #CURRENTEL_EL2 + b.eq 1f + msr elr_el3, x19 msr spsr_el3, x4 eret +1: msr elr_el2, x19 + msr spsr_el2, x4 + eret + .ltorg .data diff --git a/arch/aarch64/include/asm/cpu.h b/arch/aarch64/include/asm/cpu.h index 846b89f8405d..6b2f5fbe4502 100644 --- a/arch/aarch64/include/asm/cpu.h +++ b/arch/aarch64/include/asm/cpu.h @@ -58,7 +58,13 @@ #define SCR_EL3_TCR2EN BIT(43) #define SCR_EL3_PIEN BIT(45) +#define VTCR_EL2_MSA BIT(31) + #define HCR_EL2_RES1 BIT(1) +#define HCR_EL2_APK_NOTRAP BIT(40) +#define HCR_EL2_API_NOTRAP BIT(41) +#define HCR_EL2_FIEN_NOTRAP BIT(47) +#define HCR_EL2_ENSCXT_NOTRAP BIT(53) #define ID_AA64DFR0_EL1_PMSVER BITS(35, 32) #define ID_AA64DFR0_EL1_TRACEBUFFER BITS(47, 44) @@ -88,7 +94,9 @@ #define ID_AA64PFR1_EL1_MTE BITS(11, 8) #define ID_AA64PFR1_EL1_SME BITS(27, 24) +#define ID_AA64PFR0_EL1_RAS BITS(31, 28) #define ID_AA64PFR0_EL1_SVE BITS(35, 32) +#define ID_AA64PFR0_EL1_CSV2 BITS(59, 56) #define ID_AA64SMFR0_EL1 s3_0_c0_c4_5 #define ID_AA64SMFR0_EL1_FA64 BIT(63) @@ -114,6 +122,7 @@ #define SPSR_I (1 << 7) /* IRQ masked */ #define SPSR_F (1 << 6) /* FIQ masked */ #define SPSR_T (1 << 5) /* Thumb */ +#define SPSR_EL1H (5 << 0) /* EL1 Handler mode */ #define SPSR_EL2H (9 << 0) /* EL2 Handler mode */ #define SPSR_HYP (0x1a << 0) /* M[3:0] = hyp, M[4] = AArch32 */ @@ -153,6 +162,7 @@ #else #define SCTLR_EL1_KERNEL SCTLR_EL1_RES1 #define SPSR_KERNEL (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL2H) +#define SPSR_KERNEL_EL1 (SPSR_A | SPSR_D | SPSR_I | SPSR_F | SPSR_EL1H) #endif #ifndef __ASSEMBLY__ diff --git a/arch/aarch64/init.c b/arch/aarch64/init.c index 37cb45fde446..8006f2705193 100644 --- a/arch/aarch64/init.c +++ b/arch/aarch64/init.c @@ -145,6 +145,30 @@ void cpu_init_el3(void) msr(CNTFRQ_EL0, COUNTER_FREQ); } +void cpu_init_armv8r_el2(void) +{ + unsigned long hcr = mrs(hcr_el2); + + msr(vpidr_el2, mrs(midr_el1)); + msr(vmpidr_el2, mrs(mpidr_el1)); + + /* VTCR_MSA: VMSAv8-64 support */ + msr(vtcr_el2, VTCR_EL2_MSA); + + if (mrs_field(ID_AA64PFR0_EL1, CSV2) <= 2) + hcr |= HCR_EL2_ENSCXT_NOTRAP; + + if (mrs_field(ID_AA64PFR0_EL1, RAS) <= 2) + hcr |= HCR_EL2_FIEN_NOTRAP; + + if (cpu_has_pauth()) + hcr |= HCR_EL2_APK_NOTRAP | HCR_EL2_API_NOTRAP; + + msr(hcr_el2, hcr); + isb(); + msr(CNTFRQ_EL0, COUNTER_FREQ); +} + #ifdef PSCI extern char psci_vectors[];
The Armv8-R AArch64 profile does not support the EL3 exception level. The Armv8-R AArch64 profile allows for an (optional) VMSAv8-64 MMU at EL1, which allows to run off-the-shelf Linux. However EL2 only supports a PMSA, which is not supported by Linux, so we need to drop into EL1 before entering the kernel. We add a new err_invalid_arch symbol as a dead loop. If we detect the current Armv8-R aarch64 only supports with PMSA, meaning we cannot boot Linux anymore, then we jump to err_invalid_arch. During Armv8-R aarch64 init, to make sure nothing unexpected traps into EL2, we auto-detect and config FIEN and EnSCXT in HCR_EL2. The boot sequence is: If CurrentEL == EL3, then goto EL3 initialisation and drop to lower EL before entering the kernel. If CurrentEL == EL2 && id_aa64mmfr0_el1.MSA == 0xf (Armv8-R aarch64), if id_aa64mmfr0_el1.MSA_frac == 0x2, then goto Armv8-R AArch64 initialisation and drop to EL1 before entering the kernel. else, which means VMSA unsupported and cannot boot Linux, goto err_invalid_arch (dead loop). Else, no initialisation and keep the current EL before entering the kernel. Signed-off-by: Luca Fancellu <luca.fancellu@arm.com> --- arch/aarch64/boot.S | 63 ++++++++++++++++++++++++++++++++-- arch/aarch64/include/asm/cpu.h | 10 ++++++ arch/aarch64/init.c | 24 +++++++++++++ 3 files changed, 94 insertions(+), 3 deletions(-)