diff mbox series

[05/11] arm64: enable ptrauth earlier

Message ID 1571300065-10236-6-git-send-email-amit.kachhap@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: return address signing | expand

Commit Message

Amit Daniel Kachhap Oct. 17, 2019, 8:14 a.m. UTC
From: Kristina Martsenko <kristina.martsenko@arm.com>

When the kernel is compiled with pointer auth instructions, the boot CPU
needs to start using address auth very early, so change the cpucap to
account for this.

Pointer auth must be enabled before we call C functions, because it is
not possible to enter a function with pointer auth disabled and exit it
with pointer auth enabled. Note, mismatches between architected and
IMPDEF algorithms will still be caught by the cpufeature framework (the
separate *_ARCH and *_IMP_DEF cpucaps).

Note the change in behavior: if the boot CPU has address auth and a late
CPU does not, then we offline the late CPU. Until now we would have just
disabled address auth in this case.

Leave generic authentication as a "system scope" cpucap for now, since
initially the kernel will only use address authentication.

Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
---
Changes since RFC v2:
 - Moved early enabling from C to assembly, and no longer use the pointer auth
   C function attribute [Suzuki]

 arch/arm64/Kconfig                  |  4 ++++
 arch/arm64/include/asm/cpufeature.h |  9 +++++++
 arch/arm64/include/asm/smp.h        |  1 +
 arch/arm64/kernel/cpufeature.c      | 13 +++-------
 arch/arm64/kernel/head.S            | 48 +++++++++++++++++++++++++++++++++++++
 arch/arm64/kernel/smp.c             |  2 ++
 6 files changed, 67 insertions(+), 10 deletions(-)

Comments

Suzuki K Poulose Oct. 17, 2019, 2:13 p.m. UTC | #1
Hi Amit,

On 17/10/2019 09:14, Amit Daniel Kachhap wrote:
> From: Kristina Martsenko <kristina.martsenko@arm.com>
> 
> When the kernel is compiled with pointer auth instructions, the boot CPU
> needs to start using address auth very early, so change the cpucap to
> account for this.
> 
> Pointer auth must be enabled before we call C functions, because it is
> not possible to enter a function with pointer auth disabled and exit it
> with pointer auth enabled. Note, mismatches between architected and
> IMPDEF algorithms will still be caught by the cpufeature framework (the
> separate *_ARCH and *_IMP_DEF cpucaps).
> 
> Note the change in behavior: if the boot CPU has address auth and a late
> CPU does not, then we offline the late CPU. Until now we would have just
> disabled address auth in this case.
> 
> Leave generic authentication as a "system scope" cpucap for now, since
> initially the kernel will only use address authentication.
> 
> Reviewed-by: Kees Cook <keescook@chromium.org>
> Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
> ---
> Changes since RFC v2:
>   - Moved early enabling from C to assembly, and no longer use the pointer auth
>     C function attribute [Suzuki]
> 
>   arch/arm64/Kconfig                  |  4 ++++
>   arch/arm64/include/asm/cpufeature.h |  9 +++++++
>   arch/arm64/include/asm/smp.h        |  1 +
>   arch/arm64/kernel/cpufeature.c      | 13 +++-------
>   arch/arm64/kernel/head.S            | 48 +++++++++++++++++++++++++++++++++++++
>   arch/arm64/kernel/smp.c             |  2 ++
>   6 files changed, 67 insertions(+), 10 deletions(-)
> 
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 41a9b42..253e3c5 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -1408,6 +1408,10 @@ config ARM64_PTR_AUTH
>   	  be enabled. However, KVM guest also require VHE mode and hence
>   	  CONFIG_ARM64_VHE=y option to use this feature.
>   
> +	  If the feature is present on the primary CPU but not a secondary CPU,
> +	  then the secondary CPU will be offlined. On such a system, this
> +	  option should not be selected.

We don't offline the CPU, but simply park them. You may want to update this to
reflect the reality.

> +
>   endmenu
>   
>   config ARM64_SVE
> diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
> index 011a665..5d61749 100644
> --- a/arch/arm64/include/asm/cpufeature.h
> +++ b/arch/arm64/include/asm/cpufeature.h
> @@ -291,6 +291,15 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
>   #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE		\
>   	(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
>   
> +/*
> + * CPU feature used early in the boot based on the boot CPU. It is safe for a
> + * late CPU to have this feature even though the boot CPU hasn't enabled it,
> + * although the feature will not be used by Linux in this case. If the boot CPU
> + * has enabled this feature already, then every late CPU must have it.
> + */
> +#define ARM64_CPUCAP_BOOT_CPU_FEATURE			\
> +	 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)

As mentioned in the previous patch, I think this must panic the system if ever a
CPU turns up without the ptr_auth.

Otherwise looks fine to me.

Cheers
Suzuki
Amit Daniel Kachhap Oct. 18, 2019, 10:07 a.m. UTC | #2
Hi,

On 10/17/19 7:43 PM, Suzuki K Poulose wrote:
>
> Hi Amit,
>
> On 17/10/2019 09:14, Amit Daniel Kachhap wrote:
>> From: Kristina Martsenko <kristina.martsenko@arm.com>
>>
>> When the kernel is compiled with pointer auth instructions, the boot CPU
>> needs to start using address auth very early, so change the cpucap to
>> account for this.
>>
>> Pointer auth must be enabled before we call C functions, because it is
>> not possible to enter a function with pointer auth disabled and exit it
>> with pointer auth enabled. Note, mismatches between architected and
>> IMPDEF algorithms will still be caught by the cpufeature framework (the
>> separate *_ARCH and *_IMP_DEF cpucaps).
>>
>> Note the change in behavior: if the boot CPU has address auth and a late
>> CPU does not, then we offline the late CPU. Until now we would have just
>> disabled address auth in this case.
>>
>> Leave generic authentication as a "system scope" cpucap for now, since
>> initially the kernel will only use address authentication.
>>
>> Reviewed-by: Kees Cook <keescook@chromium.org>
>> Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
>> ---
>> Changes since RFC v2:
>>   - Moved early enabling from C to assembly, and no longer use the
>> pointer auth
>>     C function attribute [Suzuki]
>>
>>   arch/arm64/Kconfig                  |  4 ++++
>>   arch/arm64/include/asm/cpufeature.h |  9 +++++++
>>   arch/arm64/include/asm/smp.h        |  1 +
>>   arch/arm64/kernel/cpufeature.c      | 13 +++-------
>>   arch/arm64/kernel/head.S            | 48
>> +++++++++++++++++++++++++++++++++++++
>>   arch/arm64/kernel/smp.c             |  2 ++
>>   6 files changed, 67 insertions(+), 10 deletions(-)
>>
>> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
>> index 41a9b42..253e3c5 100644
>> --- a/arch/arm64/Kconfig
>> +++ b/arch/arm64/Kconfig
>> @@ -1408,6 +1408,10 @@ config ARM64_PTR_AUTH
>>         be enabled. However, KVM guest also require VHE mode and hence
>>         CONFIG_ARM64_VHE=y option to use this feature.
>> +      If the feature is present on the primary CPU but not a
>> secondary CPU,
>> +      then the secondary CPU will be offlined. On such a system, this
>> +      option should not be selected.
>
> We don't offline the CPU, but simply park them. You may want to update
> this to
> reflect the reality.
Yes agreed. I will do it in next iteration and in all other places where
offline is mentioned.
>
>> +
>>   endmenu
>>   config ARM64_SVE
>> diff --git a/arch/arm64/include/asm/cpufeature.h
>> b/arch/arm64/include/asm/cpufeature.h
>> index 011a665..5d61749 100644
>> --- a/arch/arm64/include/asm/cpufeature.h
>> +++ b/arch/arm64/include/asm/cpufeature.h
>> @@ -291,6 +291,15 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
>>   #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE        \
>>       (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
>> +/*
>> + * CPU feature used early in the boot based on the boot CPU. It is
>> safe for a
>> + * late CPU to have this feature even though the boot CPU hasn't
>> enabled it,
>> + * although the feature will not be used by Linux in this case. If
>> the boot CPU
>> + * has enabled this feature already, then every late CPU must have it.
>> + */
>> +#define ARM64_CPUCAP_BOOT_CPU_FEATURE            \
>> +     (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
>
> As mentioned in the previous patch, I think this must panic the system
> if ever a
> CPU turns up without the ptr_auth.
Yes. Makes sense.
>
> Otherwise looks fine to me.
Thanks.
Amit
>
> Cheers
> Suzuki
IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
James Morse Oct. 23, 2019, 5:32 p.m. UTC | #3
Hi Amit,

On 17/10/2019 09:14, Amit Daniel Kachhap wrote:
> From: Kristina Martsenko <kristina.martsenko@arm.com>
> 
> When the kernel is compiled with pointer auth instructions, the boot CPU
> needs to start using address auth very early, so change the cpucap to
> account for this.
> 
> Pointer auth must be enabled before we call C functions, because it is
> not possible to enter a function with pointer auth disabled and exit it
> with pointer auth enabled. Note, mismatches between architected and
> IMPDEF algorithms will still be caught by the cpufeature framework (the
> separate *_ARCH and *_IMP_DEF cpucaps).
> 
> Note the change in behavior: if the boot CPU has address auth and a late
> CPU does not, then we offline the late CPU. Until now we would have just
> disabled address auth in this case.
> 
> Leave generic authentication as a "system scope" cpucap for now, since
> initially the kernel will only use address authentication.


> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
> index e58e5975..157c811 100644
> --- a/arch/arm64/kernel/head.S
> +++ b/arch/arm64/kernel/head.S
> @@ -13,6 +13,7 @@
>  #include <linux/init.h>
>  #include <linux/irqchip/arm-gic-v3.h>
>  
> +#include <asm/alternative.h>
>  #include <asm/assembler.h>
>  #include <asm/boot.h>
>  #include <asm/ptrace.h>
> @@ -119,6 +120,8 @@ ENTRY(stext)
>  	 * the TCR will have been set.
>  	 */
>  	bl	__cpu_setup			// initialise processor
> +	mov	x1, #1
> +	bl	__ptrauth_setup
>  	b	__primary_switch
>  ENDPROC(stext)
>  
> @@ -713,6 +716,8 @@ secondary_startup:
>  	 */
>  	bl	__cpu_secondary_check52bitva
>  	bl	__cpu_setup			// initialise processor
> +	mov	x1, #0
> +	bl	__ptrauth_setup
>  	adrp	x1, swapper_pg_dir
>  	bl	__enable_mmu
>  	ldr	x8, =__secondary_switched

__cpu_setup creates the SCTLR_EL1 value for us, it already reads ID registers for stuff
like AFDBM. It seems odd that you don't do the ptrauth check in there.

Not putting it in __cpu_setup means you've missed the other caller: sleep.S's cpu_resume,
which brings the wakeup CPU back as if it were a secondary. (although the value set at
boot will be restored in _cpu_resume).


It looks like you only need this to be separate to pass in the primary/secondary flag, as
__ptrauth_setup has to work with 3 cases: the boot-CPU and secondary CPUs that must have
the feature, or can ignore the feature. Three cases with one alternative isn't possible.

Could we pull the '__cpu_secondary_checkptrauth' out, and run it earlier? This means the
setup call doesn't need to consider secondary CPUs that don't support ptrauth. (and it
matches what we do for 52bit support)

I think passing primary-boot-cpu into __cpu_setup is something we are going to need for
other features, so it makes sense to add it as a preparatory patch.

Now the setup call can enable the feature if its supported and we are the boot cpu.
If the feature is discovered, cpufeature can change that code to enable it unconditionally
as we know secondaries without support will be caught in __cpu_secondary_checkptrauth.

I think this would be simpler, but the proof is in the writing... what do you think?


Thanks,

James

> @@ -832,6 +837,49 @@ __no_granule_support:
>  	early_park_cpu
>  ENDPROC(__no_granule_support)
>  
> +/*
> + * Enable pointer authentication.
> + *   x0 = SCTLR_EL1
> + *   x1 = 1 for primary, 0 for secondary
> + */
> +__ptrauth_setup:
> +#ifdef CONFIG_ARM64_PTR_AUTH
> +	/* Check if the CPU supports ptrauth */
> +	mrs	x2, id_aa64isar1_el1
> +	ubfx	x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8
> +	cbz	x2, 2f
> +
> +	/* x2 = system_supports_address_auth() */
> +alternative_if ARM64_HAS_ADDRESS_AUTH
> +	mov	x2, 1
> +alternative_else
> +	mov	x2, 0
> +alternative_endif
> +	orr	x2, x2, x1	// primary || system_supports_address_auth()
> +	cbz	x2, 3f
> +
> +	/* Enable ptrauth instructions */
> +	ldr	x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
> +		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB
> +	orr	x0, x0, x2
> +	b	3f
> +
> +2:	/* No ptrauth support */
> +alternative_if ARM64_HAS_ADDRESS_AUTH
> +	b	4f
> +alternative_else_nop_endif
> +3:
> +#endif
> +	ret
> +
> +#ifdef CONFIG_ARM64_PTR_AUTH
> +4:	/* Park the secondary CPU */
> +	update_early_cpu_boot_status \
> +		CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_PTRAUTH, x0, x1
> +	early_park_cpu
> +#endif
> +ENDPROC(__ptrauth_setup)
> +
>  #ifdef CONFIG_RELOCATABLE
>  __relocate_kernel:
>  	/*
Amit Daniel Kachhap Oct. 30, 2019, 4:01 a.m. UTC | #4
Hi,

On 10/23/19 11:02 PM, James Morse wrote:
> Hi Amit,
> 
> On 17/10/2019 09:14, Amit Daniel Kachhap wrote:
>> From: Kristina Martsenko <kristina.martsenko@arm.com>
>>
>> When the kernel is compiled with pointer auth instructions, the boot CPU
>> needs to start using address auth very early, so change the cpucap to
>> account for this.
>>
>> Pointer auth must be enabled before we call C functions, because it is
>> not possible to enter a function with pointer auth disabled and exit it
>> with pointer auth enabled. Note, mismatches between architected and
>> IMPDEF algorithms will still be caught by the cpufeature framework (the
>> separate *_ARCH and *_IMP_DEF cpucaps).
>>
>> Note the change in behavior: if the boot CPU has address auth and a late
>> CPU does not, then we offline the late CPU. Until now we would have just
>> disabled address auth in this case.
>>
>> Leave generic authentication as a "system scope" cpucap for now, since
>> initially the kernel will only use address authentication.
> 
> 
>> diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
>> index e58e5975..157c811 100644
>> --- a/arch/arm64/kernel/head.S
>> +++ b/arch/arm64/kernel/head.S
>> @@ -13,6 +13,7 @@
>>   #include <linux/init.h>
>>   #include <linux/irqchip/arm-gic-v3.h>
>>   
>> +#include <asm/alternative.h>
>>   #include <asm/assembler.h>
>>   #include <asm/boot.h>
>>   #include <asm/ptrace.h>
>> @@ -119,6 +120,8 @@ ENTRY(stext)
>>   	 * the TCR will have been set.
>>   	 */
>>   	bl	__cpu_setup			// initialise processor
>> +	mov	x1, #1
>> +	bl	__ptrauth_setup
>>   	b	__primary_switch
>>   ENDPROC(stext)
>>   
>> @@ -713,6 +716,8 @@ secondary_startup:
>>   	 */
>>   	bl	__cpu_secondary_check52bitva
>>   	bl	__cpu_setup			// initialise processor
>> +	mov	x1, #0
>> +	bl	__ptrauth_setup
>>   	adrp	x1, swapper_pg_dir
>>   	bl	__enable_mmu
>>   	ldr	x8, =__secondary_switched
> 
> __cpu_setup creates the SCTLR_EL1 value for us, it already reads ID registers for stuff
> like AFDBM. It seems odd that you don't do the ptrauth check in there.
Yes it makes sense to do __ptrauth_checkup before those stuffs.
> 
> Not putting it in __cpu_setup means you've missed the other caller: sleep.S's cpu_resume,
> which brings the wakeup CPU back as if it were a secondary. (although the value set at
> boot will be restored in _cpu_resume).
Yes sctlr_el1 is overridden later.
	
> 
> 
> It looks like you only need this to be separate to pass in the primary/secondary flag, as
> __ptrauth_setup has to work with 3 cases: the boot-CPU and secondary CPUs that must have
> the feature, or can ignore the feature. Three cases with one alternative isn't possible.
> 
> Could we pull the '__cpu_secondary_checkptrauth' out, and run it earlier? This means the
> setup call doesn't need to consider secondary CPUs that don't support ptrauth. (and it
> matches what we do for 52bit support)
Ok, separinting __cpu_secondary_checkptrauth makes sense. I will add it 
by bifurcating __ptrauth_setup.
> 
> I think passing primary-boot-cpu into __cpu_setup is something we are going to need for
> other features, so it makes sense to add it as a preparatory patch.
Setting __cpu_setup flag for boot cpu and secondary cpu seems easy in 
head.S but __cpu_setup flag in sleep.S is tricky as it already has some 
context stored so only need to set remaining context. So may be 3 flags 
can be passed to __cpu_setup like primary-cpu-full-ctxt, 
secondary-cpu-full-ctxt and cpu-partial-ctxt. In case of ptrauth, no 
change required
for cpu-partial-ctxt flag. I will check more on this.
> 
> Now the setup call can enable the feature if its supported and we are the boot cpu.
> If the feature is discovered, cpufeature can change that code to enable it unconditionally
> as we know secondaries without support will be caught in __cpu_secondary_checkptrauth.
ok.
> 
> I think this would be simpler, but the proof is in the writing... what do you think?
It is possible to implement also.

Thanks,
Amit
> 
> 
> Thanks,
> 
> James
> 
>> @@ -832,6 +837,49 @@ __no_granule_support:
>>   	early_park_cpu
>>   ENDPROC(__no_granule_support)
>>   
>> +/*
>> + * Enable pointer authentication.
>> + *   x0 = SCTLR_EL1
>> + *   x1 = 1 for primary, 0 for secondary
>> + */
>> +__ptrauth_setup:
>> +#ifdef CONFIG_ARM64_PTR_AUTH
>> +	/* Check if the CPU supports ptrauth */
>> +	mrs	x2, id_aa64isar1_el1
>> +	ubfx	x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8
>> +	cbz	x2, 2f
>> +
>> +	/* x2 = system_supports_address_auth() */
>> +alternative_if ARM64_HAS_ADDRESS_AUTH
>> +	mov	x2, 1
>> +alternative_else
>> +	mov	x2, 0
>> +alternative_endif
>> +	orr	x2, x2, x1	// primary || system_supports_address_auth()
>> +	cbz	x2, 3f
>> +
>> +	/* Enable ptrauth instructions */
>> +	ldr	x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
>> +		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB
>> +	orr	x0, x0, x2
>> +	b	3f
>> +
>> +2:	/* No ptrauth support */
>> +alternative_if ARM64_HAS_ADDRESS_AUTH
>> +	b	4f
>> +alternative_else_nop_endif
>> +3:
>> +#endif
>> +	ret
>> +
>> +#ifdef CONFIG_ARM64_PTR_AUTH
>> +4:	/* Park the secondary CPU */
>> +	update_early_cpu_boot_status \
>> +		CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_PTRAUTH, x0, x1
>> +	early_park_cpu
>> +#endif
>> +ENDPROC(__ptrauth_setup)
>> +
>>   #ifdef CONFIG_RELOCATABLE
>>   __relocate_kernel:
>>   	/*
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 41a9b42..253e3c5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1408,6 +1408,10 @@  config ARM64_PTR_AUTH
 	  be enabled. However, KVM guest also require VHE mode and hence
 	  CONFIG_ARM64_VHE=y option to use this feature.
 
+	  If the feature is present on the primary CPU but not a secondary CPU,
+	  then the secondary CPU will be offlined. On such a system, this
+	  option should not be selected.
+
 endmenu
 
 config ARM64_SVE
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 011a665..5d61749 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -291,6 +291,15 @@  extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
 #define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE		\
 	(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
 
+/*
+ * CPU feature used early in the boot based on the boot CPU. It is safe for a
+ * late CPU to have this feature even though the boot CPU hasn't enabled it,
+ * although the feature will not be used by Linux in this case. If the boot CPU
+ * has enabled this feature already, then every late CPU must have it.
+ */
+#define ARM64_CPUCAP_BOOT_CPU_FEATURE			\
+	 (ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU)
+
 struct arm64_cpu_capabilities {
 	const char *desc;
 	u16 capability;
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index a0c8a0b..46e2b05 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -22,6 +22,7 @@ 
 
 #define CPU_STUCK_REASON_52_BIT_VA	(UL(1) << CPU_STUCK_REASON_SHIFT)
 #define CPU_STUCK_REASON_NO_GRAN	(UL(2) << CPU_STUCK_REASON_SHIFT)
+#define CPU_STUCK_REASON_NO_PTRAUTH	(UL(4) << CPU_STUCK_REASON_SHIFT)
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4ef40c9..507c057 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1237,12 +1237,6 @@  static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
 #endif /* CONFIG_ARM64_RAS_EXTN */
 
 #ifdef CONFIG_ARM64_PTR_AUTH
-static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
-{
-	sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
-				       SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
-}
-
 static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
 			     int __unused)
 {
@@ -1519,7 +1513,7 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "Address authentication (architected algorithm)",
 		.capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
 		.sign = FTR_UNSIGNED,
 		.field_pos = ID_AA64ISAR1_APA_SHIFT,
@@ -1529,7 +1523,7 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "Address authentication (IMP DEF algorithm)",
 		.capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
 		.sys_reg = SYS_ID_AA64ISAR1_EL1,
 		.sign = FTR_UNSIGNED,
 		.field_pos = ID_AA64ISAR1_API_SHIFT,
@@ -1538,9 +1532,8 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 	},
 	{
 		.capability = ARM64_HAS_ADDRESS_AUTH,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
 		.matches = has_address_auth,
-		.cpu_enable = cpu_enable_address_auth,
 	},
 	{
 		.desc = "Generic authentication (architected algorithm)",
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index e58e5975..157c811 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -13,6 +13,7 @@ 
 #include <linux/init.h>
 #include <linux/irqchip/arm-gic-v3.h>
 
+#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/boot.h>
 #include <asm/ptrace.h>
@@ -119,6 +120,8 @@  ENTRY(stext)
 	 * the TCR will have been set.
 	 */
 	bl	__cpu_setup			// initialise processor
+	mov	x1, #1
+	bl	__ptrauth_setup
 	b	__primary_switch
 ENDPROC(stext)
 
@@ -713,6 +716,8 @@  secondary_startup:
 	 */
 	bl	__cpu_secondary_check52bitva
 	bl	__cpu_setup			// initialise processor
+	mov	x1, #0
+	bl	__ptrauth_setup
 	adrp	x1, swapper_pg_dir
 	bl	__enable_mmu
 	ldr	x8, =__secondary_switched
@@ -832,6 +837,49 @@  __no_granule_support:
 	early_park_cpu
 ENDPROC(__no_granule_support)
 
+/*
+ * Enable pointer authentication.
+ *   x0 = SCTLR_EL1
+ *   x1 = 1 for primary, 0 for secondary
+ */
+__ptrauth_setup:
+#ifdef CONFIG_ARM64_PTR_AUTH
+	/* Check if the CPU supports ptrauth */
+	mrs	x2, id_aa64isar1_el1
+	ubfx	x2, x2, #ID_AA64ISAR1_APA_SHIFT, #8
+	cbz	x2, 2f
+
+	/* x2 = system_supports_address_auth() */
+alternative_if ARM64_HAS_ADDRESS_AUTH
+	mov	x2, 1
+alternative_else
+	mov	x2, 0
+alternative_endif
+	orr	x2, x2, x1	// primary || system_supports_address_auth()
+	cbz	x2, 3f
+
+	/* Enable ptrauth instructions */
+	ldr	x2, =SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
+		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB
+	orr	x0, x0, x2
+	b	3f
+
+2:	/* No ptrauth support */
+alternative_if ARM64_HAS_ADDRESS_AUTH
+	b	4f
+alternative_else_nop_endif
+3:
+#endif
+	ret
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+4:	/* Park the secondary CPU */
+	update_early_cpu_boot_status \
+		CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_PTRAUTH, x0, x1
+	early_park_cpu
+#endif
+ENDPROC(__ptrauth_setup)
+
 #ifdef CONFIG_RELOCATABLE
 __relocate_kernel:
 	/*
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index dc9fe87..a6a5f24 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -162,6 +162,8 @@  int __cpu_up(unsigned int cpu, struct task_struct *idle)
 				pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
 			if (status & CPU_STUCK_REASON_NO_GRAN)
 				pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K);
+			if (status & CPU_STUCK_REASON_NO_PTRAUTH)
+				pr_crit("CPU%u: does not support pointer authentication\n", cpu);
 			cpus_stuck_in_kernel++;
 			break;
 		case CPU_PANIC_KERNEL: