Message ID | 1576486038-9899-6-git-send-email-amit.kachhap@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: return address signing | expand |
On 16/12/2019 08:47, Amit Daniel Kachhap wrote: > This patch allows __cpu_setup to be invoked with one of these flags, > ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_LATE or ARM64_CPU_RUNTIME. > This is required as some cpufeatures need different handling during > different scenarios. > > The input parameter in x0 is preserved till the end to be used inside > this function. > > There should be no functional change with this patch and is useful > for the subsequent ptrauth patch which utilizes it. Some upcoming > arm cpufeatures can also utilize these flags. > > Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com> You may add: Suggested-by: James Morse <james.morse@arm.com> Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
On 1/7/20 4:48 PM, Suzuki Kuruppassery Poulose wrote: > On 16/12/2019 08:47, Amit Daniel Kachhap wrote: >> This patch allows __cpu_setup to be invoked with one of these flags, >> ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_LATE or ARM64_CPU_RUNTIME. >> This is required as some cpufeatures need different handling during >> different scenarios. >> >> The input parameter in x0 is preserved till the end to be used inside >> this function. >> >> There should be no functional change with this patch and is useful >> for the subsequent ptrauth patch which utilizes it. Some upcoming >> arm cpufeatures can also utilize these flags. >> >> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com> > > You may add: > > Suggested-by: James Morse <james.morse@arm.com> Sure. I missed it. > > > Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Thanks for reviewing. > >
On Mon, Dec 16, 2019 at 02:17:07PM +0530, Amit Daniel Kachhap wrote: > diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h > index a0c8a0b..008d004 100644 > --- a/arch/arm64/include/asm/smp.h > +++ b/arch/arm64/include/asm/smp.h > @@ -23,6 +23,11 @@ > #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) > #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) > > +/* Options for __cpu_setup */ > +#define ARM64_CPU_BOOT_PRIMARY (1) > +#define ARM64_CPU_BOOT_LATE (2) Nitpick: I'd call this ARM64_CPU_BOOT_SECONDARY. I thought we tend to use "late" for CPUs brought up after user space started (at least that was my impression from the cpufeature.c code).
On 1/15/20 11:00 PM, Catalin Marinas wrote: > On Mon, Dec 16, 2019 at 02:17:07PM +0530, Amit Daniel Kachhap wrote: >> diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h >> index a0c8a0b..008d004 100644 >> --- a/arch/arm64/include/asm/smp.h >> +++ b/arch/arm64/include/asm/smp.h >> @@ -23,6 +23,11 @@ >> #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) >> #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) >> >> +/* Options for __cpu_setup */ >> +#define ARM64_CPU_BOOT_PRIMARY (1) >> +#define ARM64_CPU_BOOT_LATE (2) > > Nitpick: I'd call this ARM64_CPU_BOOT_SECONDARY. I thought we tend to > use "late" for CPUs brought up after user space started (at least that > was my impression from the cpufeature.c code). Sure. I will update it in the next version. >
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index a0c8a0b..008d004 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -23,6 +23,11 @@ #define CPU_STUCK_REASON_52_BIT_VA (UL(1) << CPU_STUCK_REASON_SHIFT) #define CPU_STUCK_REASON_NO_GRAN (UL(2) << CPU_STUCK_REASON_SHIFT) +/* Options for __cpu_setup */ +#define ARM64_CPU_BOOT_PRIMARY (1) +#define ARM64_CPU_BOOT_LATE (2) +#define ARM64_CPU_RUNTIME (3) + #ifndef __ASSEMBLY__ #include <asm/percpu.h> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 3d18163..5aaf1bb 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -118,6 +118,7 @@ ENTRY(stext) * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ + mov x0, #ARM64_CPU_BOOT_PRIMARY bl __cpu_setup // initialise processor b __primary_switch ENDPROC(stext) @@ -712,6 +713,7 @@ secondary_startup: * Common entry point for secondary CPUs. */ bl __cpu_secondary_check52bitva + mov x0, #ARM64_CPU_BOOT_LATE bl __cpu_setup // initialise processor adrp x1, swapper_pg_dir bl __enable_mmu diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index f5b04dd..7b2f2e6 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -3,6 +3,7 @@ #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/assembler.h> +#include <asm/smp.h> .text /* @@ -99,6 +100,7 @@ ENDPROC(__cpu_suspend_enter) .pushsection ".idmap.text", "awx" ENTRY(cpu_resume) bl el2_setup // if in EL2 drop to EL1 cleanly + mov x0, #ARM64_CPU_RUNTIME bl __cpu_setup /* enable the MMU early - so we can access sleep_save_stash by va */ adrp x1, swapper_pg_dir diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a1e0592..88cf7e4 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -400,21 +400,25 @@ ENDPROC(idmap_kpti_install_ng_mappings) /* * __cpu_setup * - * Initialise the processor for turning the MMU on. Return in x0 the - * value of the SCTLR_EL1 register. + * Initialise the processor for turning the MMU on. + * + * Input: + * x0 with a flag ARM64_CPU_BOOT_PRIMARY/ARM64_CPU_BOOT_LATE/ARM64_CPU_RUNTIME. + * Output: + * Return in x0 the value of the SCTLR_EL1 register. */ .pushsection ".idmap.text", "awx" ENTRY(__cpu_setup) tlbi vmalle1 // Invalidate local TLB dsb nsh - mov x0, #3 << 20 - msr cpacr_el1, x0 // Enable FP/ASIMD - mov x0, #1 << 12 // Reset mdscr_el1 and disable - msr mdscr_el1, x0 // access to the DCC from EL0 + mov x1, #3 << 20 + msr cpacr_el1, x1 // Enable FP/ASIMD + mov x1, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x1 // access to the DCC from EL0 isb // Unmask debug exceptions now, enable_dbg // since this is per-cpu - reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_pmuserenr_el0 x1 // Disable PMU access from EL0 /* * Memory region attributes for LPAE: * @@ -435,10 +439,6 @@ ENTRY(__cpu_setup) MAIR(0xbb, MT_NORMAL_WT) msr mair_el1, x5 /* - * Prepare SCTLR - */ - mov_q x0, SCTLR_EL1_SET - /* * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for * both user and kernel. */ @@ -474,5 +474,9 @@ ENTRY(__cpu_setup) 1: #endif /* CONFIG_ARM64_HW_AFDBM */ msr tcr_el1, x10 + /* + * Prepare SCTLR + */ + mov_q x0, SCTLR_EL1_SET ret // return to head.S ENDPROC(__cpu_setup)
This patch allows __cpu_setup to be invoked with one of these flags, ARM64_CPU_BOOT_PRIMARY, ARM64_CPU_BOOT_LATE or ARM64_CPU_RUNTIME. This is required as some cpufeatures need different handling during different scenarios. The input parameter in x0 is preserved till the end to be used inside this function. There should be no functional change with this patch and is useful for the subsequent ptrauth patch which utilizes it. Some upcoming arm cpufeatures can also utilize these flags. Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com> --- Changes since last version: * None. arch/arm64/include/asm/smp.h | 5 +++++ arch/arm64/kernel/head.S | 2 ++ arch/arm64/kernel/sleep.S | 2 ++ arch/arm64/mm/proc.S | 26 +++++++++++++++----------- 4 files changed, 24 insertions(+), 11 deletions(-)