diff mbox series

[v3,1/3] arm64: ptrauth: add pointer authentication Armv8.6 enhanced feature

Message ID 1592457029-18547-2-git-send-email-amit.kachhap@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: add Armv8.6 pointer authentication | expand

Commit Message

Amit Daniel Kachhap June 18, 2020, 5:10 a.m. UTC
This patch add changes for Pointer Authentication enhanced features
mandatory for Armv8.6. These features are,

* An enhanced PAC generation logic which hardens finding the correct
  PAC value of the authenticated pointer. However, no code change done
  for this.

* Fault(FPAC) is generated now when the ptrauth authentication instruction
  fails in authenticating the PAC present in the address. This is different
  from earlier case when such failures just adds an error code in the top
  byte and waits for subsequent load/store to abort. The ptrauth
  instructions which may cause this fault are autiasp, retaa etc.

The above features are now represented by additional configurations
for the Address Authentication cpufeature.

The fault received in the kernel due to FPAC is treated as Illegal
instruction and hence signal SIGILL is injected with ILL_ILLOPN as the
signal code. Note that this is different from earlier ARMv8.3 ptrauth
where signal SIGSEGV is issued due to Pointer authentication failures.

Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
---
No change since v2.

 arch/arm64/include/asm/esr.h       |  4 +++-
 arch/arm64/include/asm/exception.h |  1 +
 arch/arm64/include/asm/sysreg.h    | 24 ++++++++++++++++--------
 arch/arm64/kernel/entry-common.c   | 24 ++++++++++++++++++++++++
 arch/arm64/kernel/traps.c          |  9 +++++++++
 5 files changed, 53 insertions(+), 9 deletions(-)

Comments

Dave Martin June 22, 2020, 2:22 p.m. UTC | #1
On Thu, Jun 18, 2020 at 10:40:27AM +0530, Amit Daniel Kachhap wrote:
> This patch add changes for Pointer Authentication enhanced features
> mandatory for Armv8.6. These features are,
> 
> * An enhanced PAC generation logic which hardens finding the correct
>   PAC value of the authenticated pointer. However, no code change done
>   for this.
> 
> * Fault(FPAC) is generated now when the ptrauth authentication instruction
>   fails in authenticating the PAC present in the address. This is different
>   from earlier case when such failures just adds an error code in the top
>   byte and waits for subsequent load/store to abort. The ptrauth
>   instructions which may cause this fault are autiasp, retaa etc.
> 
> The above features are now represented by additional configurations
> for the Address Authentication cpufeature.
> 
> The fault received in the kernel due to FPAC is treated as Illegal
> instruction and hence signal SIGILL is injected with ILL_ILLOPN as the
> signal code. Note that this is different from earlier ARMv8.3 ptrauth
> where signal SIGSEGV is issued due to Pointer authentication failures.

Seems reasonable.  It's a little unfortunate that the behaviour changes
here, but I don't see what alternative we have.  And getting a
sunchronous fault is certainly better for debugging.

> 
> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
> ---
> No change since v2.
> 
>  arch/arm64/include/asm/esr.h       |  4 +++-
>  arch/arm64/include/asm/exception.h |  1 +
>  arch/arm64/include/asm/sysreg.h    | 24 ++++++++++++++++--------
>  arch/arm64/kernel/entry-common.c   | 24 ++++++++++++++++++++++++
>  arch/arm64/kernel/traps.c          |  9 +++++++++
>  5 files changed, 53 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
> index 035003acfa87..22c81f1edda2 100644
> --- a/arch/arm64/include/asm/esr.h
> +++ b/arch/arm64/include/asm/esr.h
> @@ -35,7 +35,9 @@
>  #define ESR_ELx_EC_SYS64	(0x18)
>  #define ESR_ELx_EC_SVE		(0x19)
>  #define ESR_ELx_EC_ERET		(0x1a)	/* EL2 only */
> -/* Unallocated EC: 0x1b - 0x1E */
> +/* Unallocated EC: 0x1B */
> +#define ESR_ELx_EC_FPAC		(0x1C)	/* EL1 and above */
> +/* Unallocated EC: 0x1D - 0x1E */
>  #define ESR_ELx_EC_IMP_DEF	(0x1f)	/* EL3 only */
>  #define ESR_ELx_EC_IABT_LOW	(0x20)
>  #define ESR_ELx_EC_IABT_CUR	(0x21)
> diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
> index 7577a754d443..3140b90f2e4f 100644
> --- a/arch/arm64/include/asm/exception.h
> +++ b/arch/arm64/include/asm/exception.h
> @@ -47,4 +47,5 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
>  void do_cp15instr(unsigned int esr, struct pt_regs *regs);
>  void do_el0_svc(struct pt_regs *regs);
>  void do_el0_svc_compat(struct pt_regs *regs);
> +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
>  #endif	/* __ASM_EXCEPTION_H */
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 463175f80341..c71bcd0c002a 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -633,14 +633,22 @@
>  #define ID_AA64ISAR1_APA_SHIFT		4
>  #define ID_AA64ISAR1_DPB_SHIFT		0
>  
> -#define ID_AA64ISAR1_APA_NI		0x0
> -#define ID_AA64ISAR1_APA_ARCHITECTED	0x1
> -#define ID_AA64ISAR1_API_NI		0x0
> -#define ID_AA64ISAR1_API_IMP_DEF	0x1
> -#define ID_AA64ISAR1_GPA_NI		0x0
> -#define ID_AA64ISAR1_GPA_ARCHITECTED	0x1
> -#define ID_AA64ISAR1_GPI_NI		0x0
> -#define ID_AA64ISAR1_GPI_IMP_DEF	0x1
> +#define ID_AA64ISAR1_APA_NI			0x0
> +#define ID_AA64ISAR1_APA_ARCHITECTED		0x1
> +#define ID_AA64ISAR1_APA_ARCH_EPAC		0x2
> +#define ID_AA64ISAR1_APA_ARCH_EPAC2		0x3
> +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC	0x4
> +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB	0x5
> +#define ID_AA64ISAR1_API_NI			0x0
> +#define ID_AA64ISAR1_API_IMP_DEF		0x1
> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC		0x2
> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2		0x3
> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC	0x4
> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB	0x5
> +#define ID_AA64ISAR1_GPA_NI			0x0
> +#define ID_AA64ISAR1_GPA_ARCHITECTED		0x1
> +#define ID_AA64ISAR1_GPI_NI			0x0
> +#define ID_AA64ISAR1_GPI_IMP_DEF		0x1
>  
>  /* id_aa64pfr0 */
>  #define ID_AA64PFR0_CSV3_SHIFT		60
> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> index 3dbdf9752b11..08a4805a5cca 100644
> --- a/arch/arm64/kernel/entry-common.c
> +++ b/arch/arm64/kernel/entry-common.c
> @@ -66,6 +66,13 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
>  }
>  NOKPROBE_SYMBOL(el1_dbg);
>  
> +static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
> +{
> +	local_daif_inherit(regs);
> +	do_ptrauth_fault(regs, esr);
> +}
> +NOKPROBE_SYMBOL(el1_fpac);
> +
>  asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
>  {
>  	unsigned long esr = read_sysreg(esr_el1);
> @@ -92,6 +99,9 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
>  	case ESR_ELx_EC_BRK64:
>  		el1_dbg(regs, esr);
>  		break;
> +	case ESR_ELx_EC_FPAC:
> +		el1_fpac(regs, esr);
> +		break;
>  	default:
>  		el1_inv(regs, esr);
>  	}
> @@ -227,6 +237,14 @@ static void notrace el0_svc(struct pt_regs *regs)
>  }
>  NOKPROBE_SYMBOL(el0_svc);
>  
> +static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
> +{
> +	user_exit_irqoff();
> +	local_daif_restore(DAIF_PROCCTX);
> +	do_ptrauth_fault(regs, esr);
> +}
> +NOKPROBE_SYMBOL(el0_fpac);
> +
>  asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
>  {
>  	unsigned long esr = read_sysreg(esr_el1);
> @@ -272,6 +290,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
>  	case ESR_ELx_EC_BRK64:
>  		el0_dbg(regs, esr);
>  		break;
> +	case ESR_ELx_EC_FPAC:
> +		el0_fpac(regs, esr);
> +		break;
>  	default:
>  		el0_inv(regs, esr);
>  	}
> @@ -335,6 +356,9 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
>  	case ESR_ELx_EC_BKPT32:
>  		el0_dbg(regs, esr);
>  		break;
> +	case ESR_ELx_EC_FPAC:
> +		el0_fpac(regs, esr);
> +		break;

Can this exception ever happen?  I thought ptrauth doesn't exist for
AArch32.

>  	default:
>  		el0_inv(regs, esr);
>  	}
> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
> index 50cc30acf106..f596ef585560 100644
> --- a/arch/arm64/kernel/traps.c
> +++ b/arch/arm64/kernel/traps.c
> @@ -479,6 +479,14 @@ void do_bti(struct pt_regs *regs)
>  }
>  NOKPROBE_SYMBOL(do_bti);
>  
> +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
> +{
> +	BUG_ON(!user_mode(regs));

Doesn't el1_fpac() call this?  How does this interact with in-kernel
pointer auth?

> +	arm64_notify_die("pointer authentication fault", regs, SIGILL,
> +			 ILL_ILLOPN, (void __user *)regs->pc, esr);
> +}
> +NOKPROBE_SYMBOL(do_ptrauth_fault);

[...]

Cheers
---Dave
Amit Daniel Kachhap June 23, 2020, 1:16 p.m. UTC | #2
Hi,

On 6/22/20 7:52 PM, Dave Martin wrote:
> On Thu, Jun 18, 2020 at 10:40:27AM +0530, Amit Daniel Kachhap wrote:
>> This patch add changes for Pointer Authentication enhanced features
>> mandatory for Armv8.6. These features are,
>>
>> * An enhanced PAC generation logic which hardens finding the correct
>>    PAC value of the authenticated pointer. However, no code change done
>>    for this.
>>
>> * Fault(FPAC) is generated now when the ptrauth authentication instruction
>>    fails in authenticating the PAC present in the address. This is different
>>    from earlier case when such failures just adds an error code in the top
>>    byte and waits for subsequent load/store to abort. The ptrauth
>>    instructions which may cause this fault are autiasp, retaa etc.
>>
>> The above features are now represented by additional configurations
>> for the Address Authentication cpufeature.
>>
>> The fault received in the kernel due to FPAC is treated as Illegal
>> instruction and hence signal SIGILL is injected with ILL_ILLOPN as the
>> signal code. Note that this is different from earlier ARMv8.3 ptrauth
>> where signal SIGSEGV is issued due to Pointer authentication failures.
> 
> Seems reasonable.  It's a little unfortunate that the behaviour changes
> here, but I don't see what alternative we have.  And getting a
> sunchronous fault is certainly better for debugging.
> 
>>
>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
>> ---
>> No change since v2.
>>
>>   arch/arm64/include/asm/esr.h       |  4 +++-
>>   arch/arm64/include/asm/exception.h |  1 +
>>   arch/arm64/include/asm/sysreg.h    | 24 ++++++++++++++++--------
>>   arch/arm64/kernel/entry-common.c   | 24 ++++++++++++++++++++++++
>>   arch/arm64/kernel/traps.c          |  9 +++++++++
>>   5 files changed, 53 insertions(+), 9 deletions(-)
>>
>> diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
>> index 035003acfa87..22c81f1edda2 100644
>> --- a/arch/arm64/include/asm/esr.h
>> +++ b/arch/arm64/include/asm/esr.h
>> @@ -35,7 +35,9 @@
>>   #define ESR_ELx_EC_SYS64	(0x18)
>>   #define ESR_ELx_EC_SVE		(0x19)
>>   #define ESR_ELx_EC_ERET		(0x1a)	/* EL2 only */
>> -/* Unallocated EC: 0x1b - 0x1E */
>> +/* Unallocated EC: 0x1B */
>> +#define ESR_ELx_EC_FPAC		(0x1C)	/* EL1 and above */
>> +/* Unallocated EC: 0x1D - 0x1E */
>>   #define ESR_ELx_EC_IMP_DEF	(0x1f)	/* EL3 only */
>>   #define ESR_ELx_EC_IABT_LOW	(0x20)
>>   #define ESR_ELx_EC_IABT_CUR	(0x21)
>> diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
>> index 7577a754d443..3140b90f2e4f 100644
>> --- a/arch/arm64/include/asm/exception.h
>> +++ b/arch/arm64/include/asm/exception.h
>> @@ -47,4 +47,5 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
>>   void do_cp15instr(unsigned int esr, struct pt_regs *regs);
>>   void do_el0_svc(struct pt_regs *regs);
>>   void do_el0_svc_compat(struct pt_regs *regs);
>> +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
>>   #endif	/* __ASM_EXCEPTION_H */
>> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
>> index 463175f80341..c71bcd0c002a 100644
>> --- a/arch/arm64/include/asm/sysreg.h
>> +++ b/arch/arm64/include/asm/sysreg.h
>> @@ -633,14 +633,22 @@
>>   #define ID_AA64ISAR1_APA_SHIFT		4
>>   #define ID_AA64ISAR1_DPB_SHIFT		0
>>   
>> -#define ID_AA64ISAR1_APA_NI		0x0
>> -#define ID_AA64ISAR1_APA_ARCHITECTED	0x1
>> -#define ID_AA64ISAR1_API_NI		0x0
>> -#define ID_AA64ISAR1_API_IMP_DEF	0x1
>> -#define ID_AA64ISAR1_GPA_NI		0x0
>> -#define ID_AA64ISAR1_GPA_ARCHITECTED	0x1
>> -#define ID_AA64ISAR1_GPI_NI		0x0
>> -#define ID_AA64ISAR1_GPI_IMP_DEF	0x1
>> +#define ID_AA64ISAR1_APA_NI			0x0
>> +#define ID_AA64ISAR1_APA_ARCHITECTED		0x1
>> +#define ID_AA64ISAR1_APA_ARCH_EPAC		0x2
>> +#define ID_AA64ISAR1_APA_ARCH_EPAC2		0x3
>> +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC	0x4
>> +#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB	0x5
>> +#define ID_AA64ISAR1_API_NI			0x0
>> +#define ID_AA64ISAR1_API_IMP_DEF		0x1
>> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC		0x2
>> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2		0x3
>> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC	0x4
>> +#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB	0x5
>> +#define ID_AA64ISAR1_GPA_NI			0x0
>> +#define ID_AA64ISAR1_GPA_ARCHITECTED		0x1
>> +#define ID_AA64ISAR1_GPI_NI			0x0
>> +#define ID_AA64ISAR1_GPI_IMP_DEF		0x1
>>   
>>   /* id_aa64pfr0 */
>>   #define ID_AA64PFR0_CSV3_SHIFT		60
>> diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
>> index 3dbdf9752b11..08a4805a5cca 100644
>> --- a/arch/arm64/kernel/entry-common.c
>> +++ b/arch/arm64/kernel/entry-common.c
>> @@ -66,6 +66,13 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
>>   }
>>   NOKPROBE_SYMBOL(el1_dbg);
>>   
>> +static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
>> +{
>> +	local_daif_inherit(regs);
>> +	do_ptrauth_fault(regs, esr);
>> +}
>> +NOKPROBE_SYMBOL(el1_fpac);
>> +
>>   asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
>>   {
>>   	unsigned long esr = read_sysreg(esr_el1);
>> @@ -92,6 +99,9 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
>>   	case ESR_ELx_EC_BRK64:
>>   		el1_dbg(regs, esr);
>>   		break;
>> +	case ESR_ELx_EC_FPAC:
>> +		el1_fpac(regs, esr);
>> +		break;
>>   	default:
>>   		el1_inv(regs, esr);
>>   	}
>> @@ -227,6 +237,14 @@ static void notrace el0_svc(struct pt_regs *regs)
>>   }
>>   NOKPROBE_SYMBOL(el0_svc);
>>   
>> +static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
>> +{
>> +	user_exit_irqoff();
>> +	local_daif_restore(DAIF_PROCCTX);
>> +	do_ptrauth_fault(regs, esr);
>> +}
>> +NOKPROBE_SYMBOL(el0_fpac);
>> +
>>   asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
>>   {
>>   	unsigned long esr = read_sysreg(esr_el1);
>> @@ -272,6 +290,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
>>   	case ESR_ELx_EC_BRK64:
>>   		el0_dbg(regs, esr);
>>   		break;
>> +	case ESR_ELx_EC_FPAC:
>> +		el0_fpac(regs, esr);
>> +		break;
>>   	default:
>>   		el0_inv(regs, esr);
>>   	}
>> @@ -335,6 +356,9 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
>>   	case ESR_ELx_EC_BKPT32:
>>   		el0_dbg(regs, esr);
>>   		break;
>> +	case ESR_ELx_EC_FPAC:
>> +		el0_fpac(regs, esr);
>> +		break;
> 
> Can this exception ever happen?  I thought ptrauth doesn't exist for
> AArch32.

This path will be taken during FPAC fault from userspace(EL0). Am I 
missing something here?

> 
>>   	default:
>>   		el0_inv(regs, esr);
>>   	}
>> diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
>> index 50cc30acf106..f596ef585560 100644
>> --- a/arch/arm64/kernel/traps.c
>> +++ b/arch/arm64/kernel/traps.c
>> @@ -479,6 +479,14 @@ void do_bti(struct pt_regs *regs)
>>   }
>>   NOKPROBE_SYMBOL(do_bti);
>>   
>> +void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
>> +{
>> +	BUG_ON(!user_mode(regs));
> 
> Doesn't el1_fpac() call this?  How does this interact with in-kernel
> pointer auth?

Yes called from el1_fpac. Currently we crash the kernel when this fpac 
occurs. This is similar to do_undefinstr implementation. Anyway it can 
be crashed from el1_fpac also.

Thanks,
Amit

> 
>> +	arm64_notify_die("pointer authentication fault", regs, SIGILL,
>> +			 ILL_ILLOPN, (void __user *)regs->pc, esr);
>> +}
>> +NOKPROBE_SYMBOL(do_ptrauth_fault);
> 
> [...]
> 
> Cheers
> ---Dave
>
Mark Brown June 23, 2020, 1:56 p.m. UTC | #3
On Tue, Jun 23, 2020 at 06:46:28PM +0530, Amit Daniel Kachhap wrote:
> On 6/22/20 7:52 PM, Dave Martin wrote:

> > > @@ -335,6 +356,9 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
> > >   	case ESR_ELx_EC_BKPT32:
> > >   		el0_dbg(regs, esr);
> > >   		break;
> > > +	case ESR_ELx_EC_FPAC:
> > > +		el0_fpac(regs, esr);
> > > +		break;

> > Can this exception ever happen?  I thought ptrauth doesn't exist for
> > AArch32.

> This path will be taken during FPAC fault from userspace(EL0). Am I missing
> something here?

This one is in the compat handler so should only be generated from
AArch32 context.
Dave Martin June 23, 2020, 2:45 p.m. UTC | #4
On Tue, Jun 23, 2020 at 06:46:28PM +0530, Amit Daniel Kachhap wrote:
> Hi,
> 
> On 6/22/20 7:52 PM, Dave Martin wrote:
> >On Thu, Jun 18, 2020 at 10:40:27AM +0530, Amit Daniel Kachhap wrote:
> >>This patch add changes for Pointer Authentication enhanced features
> >>mandatory for Armv8.6. These features are,
> >>
> >>* An enhanced PAC generation logic which hardens finding the correct
> >>   PAC value of the authenticated pointer. However, no code change done
> >>   for this.
> >>
> >>* Fault(FPAC) is generated now when the ptrauth authentication instruction
> >>   fails in authenticating the PAC present in the address. This is different
> >>   from earlier case when such failures just adds an error code in the top
> >>   byte and waits for subsequent load/store to abort. The ptrauth
> >>   instructions which may cause this fault are autiasp, retaa etc.
> >>
> >>The above features are now represented by additional configurations
> >>for the Address Authentication cpufeature.
> >>
> >>The fault received in the kernel due to FPAC is treated as Illegal
> >>instruction and hence signal SIGILL is injected with ILL_ILLOPN as the
> >>signal code. Note that this is different from earlier ARMv8.3 ptrauth
> >>where signal SIGSEGV is issued due to Pointer authentication failures.
> >
> >Seems reasonable.  It's a little unfortunate that the behaviour changes
> >here, but I don't see what alternative we have.  And getting a
> >sunchronous fault is certainly better for debugging.
> >
> >>
> >>Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>

[...]

> >>diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
> >>index 3dbdf9752b11..08a4805a5cca 100644
> >>--- a/arch/arm64/kernel/entry-common.c
> >>+++ b/arch/arm64/kernel/entry-common.c
> >>@@ -66,6 +66,13 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
> >>  }
> >>  NOKPROBE_SYMBOL(el1_dbg);
> >>+static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
> >>+{
> >>+	local_daif_inherit(regs);
> >>+	do_ptrauth_fault(regs, esr);
> >>+}
> >>+NOKPROBE_SYMBOL(el1_fpac);
> >>+
> >>  asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
> >>  {
> >>  	unsigned long esr = read_sysreg(esr_el1);
> >>@@ -92,6 +99,9 @@ asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
> >>  	case ESR_ELx_EC_BRK64:
> >>  		el1_dbg(regs, esr);
> >>  		break;
> >>+	case ESR_ELx_EC_FPAC:
> >>+		el1_fpac(regs, esr);
> >>+		break;
> >>  	default:
> >>  		el1_inv(regs, esr);
> >>  	}
> >>@@ -227,6 +237,14 @@ static void notrace el0_svc(struct pt_regs *regs)
> >>  }
> >>  NOKPROBE_SYMBOL(el0_svc);
> >>+static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
> >>+{
> >>+	user_exit_irqoff();
> >>+	local_daif_restore(DAIF_PROCCTX);
> >>+	do_ptrauth_fault(regs, esr);
> >>+}
> >>+NOKPROBE_SYMBOL(el0_fpac);
> >>+
> >>  asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
> >>  {
> >>  	unsigned long esr = read_sysreg(esr_el1);
> >>@@ -272,6 +290,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
> >>  	case ESR_ELx_EC_BRK64:
> >>  		el0_dbg(regs, esr);
> >>  		break;
> >>+	case ESR_ELx_EC_FPAC:
> >>+		el0_fpac(regs, esr);
> >>+		break;
> >>  	default:
> >>  		el0_inv(regs, esr);
> >>  	}
> >>@@ -335,6 +356,9 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
> >>  	case ESR_ELx_EC_BKPT32:
> >>  		el0_dbg(regs, esr);
> >>  		break;
> >>+	case ESR_ELx_EC_FPAC:
> >>+		el0_fpac(regs, esr);
> >>+		break;
> >
> >Can this exception ever happen?  I thought ptrauth doesn't exist for
> >AArch32.
> 
> This path will be taken during FPAC fault from userspace(EL0). Am I missing
> something here?

I thought AArch32 doesn't have pointer auth at all, due to a lack of
spare address bits.

el0_sync_compat_handler() is presumably only for exceptions coming from
AArch32?
> 
> >
> >>  	default:
> >>  		el0_inv(regs, esr);
> >>  	}
> >>diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
> >>index 50cc30acf106..f596ef585560 100644
> >>--- a/arch/arm64/kernel/traps.c
> >>+++ b/arch/arm64/kernel/traps.c
> >>@@ -479,6 +479,14 @@ void do_bti(struct pt_regs *regs)
> >>  }
> >>  NOKPROBE_SYMBOL(do_bti);
> >>+void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
> >>+{
> >>+	BUG_ON(!user_mode(regs));
> >
> >Doesn't el1_fpac() call this?  How does this interact with in-kernel
> >pointer auth?
> 
> Yes called from el1_fpac. Currently we crash the kernel when this fpac
> occurs. This is similar to do_undefinstr implementation. Anyway it can be
> crashed from el1_fpac also.

OK, might be worth a comment saying what this is checking -- or simply
replace the body of el1_fpac() with a BUG() and remove the BUG_ON()
here?

In its current form, this looks like this function should not be called
in a kernel context, but el1_fpac() explicitly does make such a call.

Cheers
---Dave
Amit Daniel Kachhap June 24, 2020, 7:07 a.m. UTC | #5
On 6/23/20 8:15 PM, Dave Martin wrote:
> On Tue, Jun 23, 2020 at 06:46:28PM +0530, Amit Daniel Kachhap wrote:
>> Hi,
>>
>> On 6/22/20 7:52 PM, Dave Martin wrote:
>>> On Thu, Jun 18, 2020 at 10:40:27AM +0530, Amit Daniel Kachhap wrote:
>>>> This patch add changes for Pointer Authentication enhanced features
>>>> mandatory for Armv8.6. These features are,
>>>>

>>>> +
>>>>   asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
>>>>   {
>>>>   	unsigned long esr = read_sysreg(esr_el1);
>>>> @@ -272,6 +290,9 @@ asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
>>>>   	case ESR_ELx_EC_BRK64:
>>>>   		el0_dbg(regs, esr);
>>>>   		break;
>>>> +	case ESR_ELx_EC_FPAC:
>>>> +		el0_fpac(regs, esr);
>>>> +		break;
>>>>   	default:
>>>>   		el0_inv(regs, esr);
>>>>   	}
>>>> @@ -335,6 +356,9 @@ asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
>>>>   	case ESR_ELx_EC_BKPT32:
>>>>   		el0_dbg(regs, esr);
>>>>   		break;
>>>> +	case ESR_ELx_EC_FPAC:
>>>> +		el0_fpac(regs, esr);
>>>> +		break;
>>>
>>> Can this exception ever happen?  I thought ptrauth doesn't exist for
>>> AArch32.
>>
>> This path will be taken during FPAC fault from userspace(EL0). Am I missing
>> something here?
> 
> I thought AArch32 doesn't have pointer auth at all, due to a lack of
> spare address bits.
> 
> el0_sync_compat_handler() is presumably only for exceptions coming from
> AArch32?

ok got it. I will remove el0_fpac from here. Thanks for pointing this issue.

>>
>>>
>>>>   	default:
>>>>   		el0_inv(regs, esr);
>>>>   	}

>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 035003acfa87..22c81f1edda2 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -35,7 +35,9 @@ 
 #define ESR_ELx_EC_SYS64	(0x18)
 #define ESR_ELx_EC_SVE		(0x19)
 #define ESR_ELx_EC_ERET		(0x1a)	/* EL2 only */
-/* Unallocated EC: 0x1b - 0x1E */
+/* Unallocated EC: 0x1B */
+#define ESR_ELx_EC_FPAC		(0x1C)	/* EL1 and above */
+/* Unallocated EC: 0x1D - 0x1E */
 #define ESR_ELx_EC_IMP_DEF	(0x1f)	/* EL3 only */
 #define ESR_ELx_EC_IABT_LOW	(0x20)
 #define ESR_ELx_EC_IABT_CUR	(0x21)
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 7577a754d443..3140b90f2e4f 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -47,4 +47,5 @@  void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
 void do_cp15instr(unsigned int esr, struct pt_regs *regs);
 void do_el0_svc(struct pt_regs *regs);
 void do_el0_svc_compat(struct pt_regs *regs);
+void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr);
 #endif	/* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 463175f80341..c71bcd0c002a 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -633,14 +633,22 @@ 
 #define ID_AA64ISAR1_APA_SHIFT		4
 #define ID_AA64ISAR1_DPB_SHIFT		0
 
-#define ID_AA64ISAR1_APA_NI		0x0
-#define ID_AA64ISAR1_APA_ARCHITECTED	0x1
-#define ID_AA64ISAR1_API_NI		0x0
-#define ID_AA64ISAR1_API_IMP_DEF	0x1
-#define ID_AA64ISAR1_GPA_NI		0x0
-#define ID_AA64ISAR1_GPA_ARCHITECTED	0x1
-#define ID_AA64ISAR1_GPI_NI		0x0
-#define ID_AA64ISAR1_GPI_IMP_DEF	0x1
+#define ID_AA64ISAR1_APA_NI			0x0
+#define ID_AA64ISAR1_APA_ARCHITECTED		0x1
+#define ID_AA64ISAR1_APA_ARCH_EPAC		0x2
+#define ID_AA64ISAR1_APA_ARCH_EPAC2		0x3
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC	0x4
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB	0x5
+#define ID_AA64ISAR1_API_NI			0x0
+#define ID_AA64ISAR1_API_IMP_DEF		0x1
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC		0x2
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2		0x3
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC	0x4
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB	0x5
+#define ID_AA64ISAR1_GPA_NI			0x0
+#define ID_AA64ISAR1_GPA_ARCHITECTED		0x1
+#define ID_AA64ISAR1_GPI_NI			0x0
+#define ID_AA64ISAR1_GPI_IMP_DEF		0x1
 
 /* id_aa64pfr0 */
 #define ID_AA64PFR0_CSV3_SHIFT		60
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 3dbdf9752b11..08a4805a5cca 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -66,6 +66,13 @@  static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
 }
 NOKPROBE_SYMBOL(el1_dbg);
 
+static void notrace el1_fpac(struct pt_regs *regs, unsigned long esr)
+{
+	local_daif_inherit(regs);
+	do_ptrauth_fault(regs, esr);
+}
+NOKPROBE_SYMBOL(el1_fpac);
+
 asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
 {
 	unsigned long esr = read_sysreg(esr_el1);
@@ -92,6 +99,9 @@  asmlinkage void notrace el1_sync_handler(struct pt_regs *regs)
 	case ESR_ELx_EC_BRK64:
 		el1_dbg(regs, esr);
 		break;
+	case ESR_ELx_EC_FPAC:
+		el1_fpac(regs, esr);
+		break;
 	default:
 		el1_inv(regs, esr);
 	}
@@ -227,6 +237,14 @@  static void notrace el0_svc(struct pt_regs *regs)
 }
 NOKPROBE_SYMBOL(el0_svc);
 
+static void notrace el0_fpac(struct pt_regs *regs, unsigned long esr)
+{
+	user_exit_irqoff();
+	local_daif_restore(DAIF_PROCCTX);
+	do_ptrauth_fault(regs, esr);
+}
+NOKPROBE_SYMBOL(el0_fpac);
+
 asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
 {
 	unsigned long esr = read_sysreg(esr_el1);
@@ -272,6 +290,9 @@  asmlinkage void notrace el0_sync_handler(struct pt_regs *regs)
 	case ESR_ELx_EC_BRK64:
 		el0_dbg(regs, esr);
 		break;
+	case ESR_ELx_EC_FPAC:
+		el0_fpac(regs, esr);
+		break;
 	default:
 		el0_inv(regs, esr);
 	}
@@ -335,6 +356,9 @@  asmlinkage void notrace el0_sync_compat_handler(struct pt_regs *regs)
 	case ESR_ELx_EC_BKPT32:
 		el0_dbg(regs, esr);
 		break;
+	case ESR_ELx_EC_FPAC:
+		el0_fpac(regs, esr);
+		break;
 	default:
 		el0_inv(regs, esr);
 	}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 50cc30acf106..f596ef585560 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -479,6 +479,14 @@  void do_bti(struct pt_regs *regs)
 }
 NOKPROBE_SYMBOL(do_bti);
 
+void do_ptrauth_fault(struct pt_regs *regs, unsigned long esr)
+{
+	BUG_ON(!user_mode(regs));
+	arm64_notify_die("pointer authentication fault", regs, SIGILL,
+			 ILL_ILLOPN, (void __user *)regs->pc, esr);
+}
+NOKPROBE_SYMBOL(do_ptrauth_fault);
+
 #define __user_cache_maint(insn, address, res)			\
 	if (address >= user_addr_max()) {			\
 		res = -EFAULT;					\
@@ -775,6 +783,7 @@  static const char *esr_class_str[] = {
 	[ESR_ELx_EC_SYS64]		= "MSR/MRS (AArch64)",
 	[ESR_ELx_EC_SVE]		= "SVE",
 	[ESR_ELx_EC_ERET]		= "ERET/ERETAA/ERETAB",
+	[ESR_ELx_EC_FPAC]		= "FPAC",
 	[ESR_ELx_EC_IMP_DEF]		= "EL3 IMP DEF",
 	[ESR_ELx_EC_IABT_LOW]		= "IABT (lower EL)",
 	[ESR_ELx_EC_IABT_CUR]		= "IABT (current EL)",