Message ID | 1583476525-13505-7-git-send-email-amit.kachhap@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: return address signing | expand |
Hi Amit, On 06/03/2020 06:35, Amit Daniel Kachhap wrote: > This patch allows __cpu_setup to be invoked with one of these flags, > ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_SECONDARY or ARM64_CPU_RUNTIME. > This is required as some cpufeatures need different handling during > different scenarios. > > The input parameter in x0 is preserved till the end to be used inside > this function. > > There should be no functional change with this patch and is useful > for the subsequent ptrauth patch which utilizes it. Some upcoming > arm cpufeatures can also utilize these flags. Reviewed-by: James Morse <james.morse@arm.com> (this will conflict with Ionela's AMU series, which will need to not clobber x0 during __cpu_setup.) Thanks, James
On Fri, Mar 06, 2020 at 07:07:19PM +0000, James Morse wrote: > Hi Amit, > > On 06/03/2020 06:35, Amit Daniel Kachhap wrote: > > This patch allows __cpu_setup to be invoked with one of these flags, > > ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_SECONDARY or ARM64_CPU_RUNTIME. > > This is required as some cpufeatures need different handling during > > different scenarios. > > > > The input parameter in x0 is preserved till the end to be used inside > > this function. > > > > There should be no functional change with this patch and is useful > > for the subsequent ptrauth patch which utilizes it. Some upcoming > > arm cpufeatures can also utilize these flags. > > Reviewed-by: James Morse <james.morse@arm.com> > > (this will conflict with Ionela's AMU series, which will need to not clobber x0 during > __cpu_setup.) Thanks for the heads-up. I'll fix it up when merging the series.
Hi Amit, On 3/6/20 6:35 AM, Amit Daniel Kachhap wrote: > This patch allows __cpu_setup to be invoked with one of these flags, > ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_SECONDARY or ARM64_CPU_RUNTIME. > This is required as some cpufeatures need different handling during > different scenarios. > I could not find any explanation in this patch on what these flags stand for. Could you please add it? Maybe near where you define them. With this: Reviewed-by: Vincenzo Frascino <Vincenzo.Frascino@arm.com> > The input parameter in x0 is preserved till the end to be used inside > this function. > > There should be no functional change with this patch and is useful > for the subsequent ptrauth patch which utilizes it. Some upcoming > arm cpufeatures can also utilize these flags. > > Suggested-by: James Morse <james.morse@arm.com> > Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> > Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com> > --- > arch/arm64/include/asm/smp.h | 5 +++++ > arch/arm64/kernel/head.S | 2 ++ > arch/arm64/kernel/sleep.S | 2 ++ > arch/arm64/mm/proc.S | 26 +++++++++++++++----------- > 4 files changed, 24 insertions(+), 11 deletions(-) > > diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h > index a0c8a0b..8159000 100644 > --- a/arch/arm64/include/asm/smp.h > +++ b/arch/arm64/include/asm/smp.h > @@ -23,6 +23,11 @@ > #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) > #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) > > +/* Options for __cpu_setup */ > +#define ARM64_CPU_BOOT_PRIMARY (1) > +#define ARM64_CPU_BOOT_SECONDARY (2) > +#define ARM64_CPU_RUNTIME (3) > + > #ifndef __ASSEMBLY__ > > #include <asm/percpu.h> > diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S > index 3d18163..5a7ce15 100644 > --- a/arch/arm64/kernel/head.S > +++ b/arch/arm64/kernel/head.S > @@ -118,6 +118,7 @@ ENTRY(stext) > * On return, the CPU will be ready for the MMU to be turned on and > * the TCR will have been set. > */ > + mov x0, #ARM64_CPU_BOOT_PRIMARY > bl __cpu_setup // initialise processor > b __primary_switch > ENDPROC(stext) > @@ -712,6 +713,7 @@ secondary_startup: > * Common entry point for secondary CPUs. > */ > bl __cpu_secondary_check52bitva > + mov x0, #ARM64_CPU_BOOT_SECONDARY > bl __cpu_setup // initialise processor > adrp x1, swapper_pg_dir > bl __enable_mmu > diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S > index f5b04dd..7b2f2e6 100644 > --- a/arch/arm64/kernel/sleep.S > +++ b/arch/arm64/kernel/sleep.S > @@ -3,6 +3,7 @@ > #include <linux/linkage.h> > #include <asm/asm-offsets.h> > #include <asm/assembler.h> > +#include <asm/smp.h> > > .text > /* > @@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter) > .pushsection ".idmap.text", "awx" > ENTRY(cpu_resume) > bl el2_setup // if in EL2 drop to EL1 cleanly > + mov x0, #ARM64_CPU_RUNTIME > bl __cpu_setup > /* enable the MMU early - so we can access sleep_save_stash by va */ > adrp x1, swapper_pg_dir > diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S > index aafed69..ea0db17 100644 > --- a/arch/arm64/mm/proc.S > +++ b/arch/arm64/mm/proc.S > @@ -408,31 +408,31 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) > /* > * __cpu_setup > * > - * Initialise the processor for turning the MMU on. Return in x0 the > - * value of the SCTLR_EL1 register. > + * Initialise the processor for turning the MMU on. > + * > + * Input: > + * x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME. > + * Output: > + * Return in x0 the value of the SCTLR_EL1 register. > */ > .pushsection ".idmap.text", "awx" > SYM_FUNC_START(__cpu_setup) > tlbi vmalle1 // Invalidate local TLB > dsb nsh > > - mov x0, #3 << 20 > - msr cpacr_el1, x0 // Enable FP/ASIMD > - mov x0, #1 << 12 // Reset mdscr_el1 and disable > - msr mdscr_el1, x0 // access to the DCC from EL0 > + mov x1, #3 << 20 > + msr cpacr_el1, x1 // Enable FP/ASIMD > + mov x1, #1 << 12 // Reset mdscr_el1 and disable > + msr mdscr_el1, x1 // access to the DCC from EL0 > isb // Unmask debug exceptions now, > enable_dbg // since this is per-cpu > - reset_pmuserenr_el0 x0 // Disable PMU access from EL0 > + reset_pmuserenr_el0 x1 // Disable PMU access from EL0 > /* > * Memory region attributes > */ > mov_q x5, MAIR_EL1_SET > msr mair_el1, x5 > /* > - * Prepare SCTLR > - */ > - mov_q x0, SCTLR_EL1_SET > - /* > * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for > * both user and kernel. > */ > @@ -468,5 +468,9 @@ SYM_FUNC_START(__cpu_setup) > 1: > #endif /* CONFIG_ARM64_HW_AFDBM */ > msr tcr_el1, x10 > + /* > + * Prepare SCTLR > + */ > + mov_q x0, SCTLR_EL1_SET > ret // return to head.S > SYM_FUNC_END(__cpu_setup) >
Hi, On 3/10/20 5:44 PM, Vincenzo Frascino wrote: > Hi Amit, > > On 3/6/20 6:35 AM, Amit Daniel Kachhap wrote: >> This patch allows __cpu_setup to be invoked with one of these flags, >> ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_SECONDARY or ARM64_CPU_RUNTIME. >> This is required as some cpufeatures need different handling during >> different scenarios. >> > > I could not find any explanation in this patch on what these flags stand for. > Could you please add it? Maybe near where you define them. I will add in my V7 version. > > With this: > > Reviewed-by: Vincenzo Frascino <Vincenzo.Frascino@arm.com> Thanks. > >> The input parameter in x0 is preserved till the end to be used inside >> this function. >> >> There should be no functional change with this patch and is useful >> for the subsequent ptrauth patch which utilizes it. Some upcoming >> arm cpufeatures can also utilize these flags. >> >> Suggested-by: James Morse <james.morse@arm.com> >> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index a0c8a0b..8159000 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -23,6 +23,11 @@ #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) +/* Options for __cpu_setup */ +#define ARM64_CPU_BOOT_PRIMARY (1) +#define ARM64_CPU_BOOT_SECONDARY (2) +#define ARM64_CPU_RUNTIME (3) + #ifndef __ASSEMBLY__ #include <asm/percpu.h> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 3d18163..5a7ce15 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -118,6 +118,7 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ + mov x0, #ARM64_CPU_BOOT_PRIMARY bl __cpu_setup // initialise processor b __primary_switch ENDPROC(stext) @@ -712,6 +713,7 @@ secondary_startup: * Common entry point for secondary CPUs. */ bl __cpu_secondary_check52bitva + mov x0, #ARM64_CPU_BOOT_SECONDARY bl __cpu_setup // initialise processor adrp x1, swapper_pg_dir bl __enable_mmu diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index f5b04dd..7b2f2e6 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -3,6 +3,7 @@ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> +#include <asm/smp.h> .text /* @@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly + mov x0, #ARM64_CPU_RUNTIME bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ adrp x1, swapper_pg_dir diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index aafed69..ea0db17 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -408,31 +408,31 @@ SYM_FUNC_END(idmap_kpti_install_ng_mappings) /* * __cpu_setup * - * Initialise the processor for turning the MMU on. Return in x0 the - * value of the SCTLR_EL1 register. + * Initialise the processor for turning the MMU on. + * + * Input: + * x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_SECONDARY/ARM64_CPU_RUNTIME. + * Output: + * Return in x0 the value of the SCTLR_EL1 register. */ .pushsection ".idmap.text", "awx" SYM_FUNC_START(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh - mov x0, #3 << 20 - msr cpacr_el1, x0 // Enable FP/ASIMD - mov x0, #1 << 12 // Reset mdscr_el1 and disable - msr mdscr_el1, x0 // access to the DCC from EL0 + mov x1, #3 << 20 + msr cpacr_el1, x1 // Enable FP/ASIMD + mov x1, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x1 // access to the DCC from EL0 isb // Unmask debug exceptions now, enable_dbg // since this is per-cpu - reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_pmuserenr_el0 x1 // Disable PMU access from EL0 /* * Memory region attributes */ mov_q x5, MAIR_EL1_SET msr mair_el1, x5 /* - * Prepare SCTLR - */ - mov_q x0, SCTLR_EL1_SET - /* * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * both user and kernel. */ @@ -468,5 +468,9 @@ SYM_FUNC_START(__cpu_setup) 1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 + /* + * Prepare SCTLR + */ + mov_q x0, SCTLR_EL1_SET ret // return to head.S SYM_FUNC_END(__cpu_setup)