diff mbox series

[v3,2/3] arm64: cpufeature: Modify address authentication cpufeature to exact

Message ID 1592457029-18547-3-git-send-email-amit.kachhap@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: add Armv8.6 pointer authentication | expand

Commit Message

Amit Daniel Kachhap June 18, 2020, 5:10 a.m. UTC
This patch modifies the address authentication cpufeature type to EXACT
from earlier LOWER_SAFE as the different configurations added for Armv8.6
enhanced PAC have different behaviour and there is no tunable to enable the
lower safe versions.

The current cpufeature framework does not interfere in the booting of
non-exact secondary cpus but rather marks them as tainted. This patch fixes
it by replacing the generic match handler with a new handler specific to
ptrauth.

After this change, if there is any variation in ptrauth configurations in
secondary cpus from boot cpu then those mismatched cpus are parked in an
infinite loop.

Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
[Suzuki: Introduce new matching function for address authentication]
Suggested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
---
Change since v2:
 * Added new matching helper function has_address_auth_cpucap as address
   authentication cpufeature is now FTR_EXACT. The existing matching function
   has_cpuid_feature is specific to LOWER_SAFE.

 arch/arm64/kernel/cpufeature.c | 56 ++++++++++++++++++++++++++++------
 1 file changed, 47 insertions(+), 9 deletions(-)

Comments

Dave Martin June 22, 2020, 2:35 p.m. UTC | #1
On Thu, Jun 18, 2020 at 10:40:28AM +0530, Amit Daniel Kachhap wrote:
> This patch modifies the address authentication cpufeature type to EXACT
> from earlier LOWER_SAFE as the different configurations added for Armv8.6
> enhanced PAC have different behaviour and there is no tunable to enable the
> lower safe versions.

The enancements add no new instructions at EL0, right?  And no
behavioural change, provided that userspace doesn't care what signing/
authentication algorithm is used?

If so then I guess you're correct not to add a new hwcap, but I thought
it was worth asking.

> non-exact secondary cpus but rather marks them as tainted. This patch fixes
> it by replacing the generic match handler with a new handler specific to
> ptrauth.
> 
> After this change, if there is any variation in ptrauth configurations in
> secondary cpus from boot cpu then those mismatched cpus are parked in an
> infinite loop.
> 
> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
> [Suzuki: Introduce new matching function for address authentication]
> Suggested-by: Suzuki K Poulose <suzuki.poulose@arm.com>

Does a corresponding patch need to go to stable?  As things stand, I
think older kernels that support pointer auth will go wrong on v8.7+
hardware that has these features.

This patch in its current form may be too heavy for stable, though.

See comment below though.

> ---
> Change since v2:
>  * Added new matching helper function has_address_auth_cpucap as address
>    authentication cpufeature is now FTR_EXACT. The existing matching function
>    has_cpuid_feature is specific to LOWER_SAFE.
> 
>  arch/arm64/kernel/cpufeature.c | 56 ++++++++++++++++++++++++++++------
>  1 file changed, 47 insertions(+), 9 deletions(-)
> 
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index 4ae41670c2e6..42670d615555 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -210,9 +210,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
>  	ARM64_FTR_END,
>  };
> @@ -1601,11 +1601,49 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
>  #endif /* CONFIG_ARM64_RAS_EXTN */
>  
>  #ifdef CONFIG_ARM64_PTR_AUTH
> -static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
> -			     int __unused)
> +static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
>  {
> -	return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
> -	       __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
> +	int local_cpu_auth;
> +	static int boot_cpu_auth_arch;
> +	static int boot_cpu_auth_imp_def;
> +
> +	/* We don't expect to be called with SCOPE_SYSTEM */
> +	WARN_ON(scope == SCOPE_SYSTEM);
> +
> +	local_cpu_auth = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
> +						     entry->field_pos, entry->sign);
> +	/*
> +	 * The ptr-auth feature levels are not intercompatible with
> +	 * lower levels. Hence we must match all the CPUs with that
> +	 * of the boot CPU. So cache the level of boot CPU and compare
> +	 * it against the secondary CPUs.
> +	 */

Thinking about this, does it actually matter if different CPUs have
different trapping behaviours for ptrauth?

If we are guaranteed that the signing algorithm is still compatible
across all CPUs, we might get away with it.

Possibly a dumb question -- I haven't checked the specs to find out
whether this makes any sense or not.

> +	if (scope & SCOPE_BOOT_CPU) {
> +		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_IMP_DEF) {
> +			boot_cpu_auth_imp_def = local_cpu_auth;
> +			return boot_cpu_auth_imp_def >= entry->min_field_value;
> +		} else if (entry->capability == ARM64_HAS_ADDRESS_AUTH_ARCH) {
> +			boot_cpu_auth_arch = local_cpu_auth;
> +			return boot_cpu_auth_arch >= entry->min_field_value;
> +		}
> +	} else if (scope & SCOPE_LOCAL_CPU) {
> +		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_IMP_DEF) {
> +			return (local_cpu_auth >= entry->min_field_value) &&
> +			       (local_cpu_auth == boot_cpu_auth_imp_def);
> +		}
> +		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_ARCH) {
> +			return (local_cpu_auth >= entry->min_field_value) &&
> +			       (local_cpu_auth == boot_cpu_auth_arch);
> +		}
> +	}
> +	return false;
> +}
> +
> +static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
> +			     int scope)
> +{
> +	return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
> +	       has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
>  }
>  
>  static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
> @@ -1954,7 +1992,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>  		.sign = FTR_UNSIGNED,
>  		.field_pos = ID_AA64ISAR1_APA_SHIFT,
>  		.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
> -		.matches = has_cpuid_feature,
> +		.matches = has_address_auth_cpucap,
>  	},
>  	{
>  		.desc = "Address authentication (IMP DEF algorithm)",
> @@ -1964,12 +2002,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>  		.sign = FTR_UNSIGNED,
>  		.field_pos = ID_AA64ISAR1_API_SHIFT,
>  		.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
> -		.matches = has_cpuid_feature,
> +		.matches = has_address_auth_cpucap,
>  	},
>  	{
>  		.capability = ARM64_HAS_ADDRESS_AUTH,
>  		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
> -		.matches = has_address_auth,
> +		.matches = has_address_auth_metacap,
>  	},
>  	{
>  		.desc = "Generic authentication (architected algorithm)",
> -- 
> 2.17.1
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Amit Daniel Kachhap June 23, 2020, 1:17 p.m. UTC | #2
Hi,

On 6/22/20 8:05 PM, Dave Martin wrote:
> On Thu, Jun 18, 2020 at 10:40:28AM +0530, Amit Daniel Kachhap wrote:
>> This patch modifies the address authentication cpufeature type to EXACT
>> from earlier LOWER_SAFE as the different configurations added for Armv8.6
>> enhanced PAC have different behaviour and there is no tunable to enable the
>> lower safe versions.
> 
> The enancements add no new instructions at EL0, right?  And no
> behavioural change, provided that userspace doesn't care what signing/
> authentication algorithm is used?

Yes you are correct that no new instructions are added.

> 
> If so then I guess you're correct not to add a new hwcap, but I thought
> it was worth asking.
> 
>> non-exact secondary cpus but rather marks them as tainted. This patch fixes
>> it by replacing the generic match handler with a new handler specific to
>> ptrauth.
>>
>> After this change, if there is any variation in ptrauth configurations in
>> secondary cpus from boot cpu then those mismatched cpus are parked in an
>> infinite loop.
>>
>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
>> [Suzuki: Introduce new matching function for address authentication]
>> Suggested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
> 
> Does a corresponding patch need to go to stable?  As things stand, I
> think older kernels that support pointer auth will go wrong on v8.7+
> hardware that has these features.
> 
> This patch in its current form may be too heavy for stable, though.
> 
> See comment below though.
> 
>> ---
>> Change since v2:
>>   * Added new matching helper function has_address_auth_cpucap as address
>>     authentication cpufeature is now FTR_EXACT. The existing matching function
>>     has_cpuid_feature is specific to LOWER_SAFE.
>>
>>   arch/arm64/kernel/cpufeature.c | 56 ++++++++++++++++++++++++++++------
>>   1 file changed, 47 insertions(+), 9 deletions(-)
>>
>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>> index 4ae41670c2e6..42670d615555 100644
>> --- a/arch/arm64/kernel/cpufeature.c
>> +++ b/arch/arm64/kernel/cpufeature.c
>> @@ -210,9 +210,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
>>   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
>>   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
>>   	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
>> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
>> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
>>   	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
>> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
>> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
>>   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
>>   	ARM64_FTR_END,
>>   };
>> @@ -1601,11 +1601,49 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
>>   #endif /* CONFIG_ARM64_RAS_EXTN */
>>   
>>   #ifdef CONFIG_ARM64_PTR_AUTH
>> -static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
>> -			     int __unused)
>> +static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
>>   {
>> -	return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
>> -	       __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
>> +	int local_cpu_auth;
>> +	static int boot_cpu_auth_arch;
>> +	static int boot_cpu_auth_imp_def;
>> +
>> +	/* We don't expect to be called with SCOPE_SYSTEM */
>> +	WARN_ON(scope == SCOPE_SYSTEM);
>> +
>> +	local_cpu_auth = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
>> +						     entry->field_pos, entry->sign);
>> +	/*
>> +	 * The ptr-auth feature levels are not intercompatible with
>> +	 * lower levels. Hence we must match all the CPUs with that
>> +	 * of the boot CPU. So cache the level of boot CPU and compare
>> +	 * it against the secondary CPUs.
>> +	 */
> 
> Thinking about this, does it actually matter if different CPUs have
> different trapping behaviours for ptrauth?
> 
> If we are guaranteed that the signing algorithm is still compatible
> across all CPUs, we might get away with it.

You may be right that these protections may not be required practically.
But the algorithm of each configurations is different so theoretically
same set of software will produce different behavior on different cpus.

This code initially changed only FTR_EXACT from FTR_LOWE_SAFE. But there
are many issues identified here [1] in the cpufeature framework so rest
of the defensive changes added.

[1]: https://www.spinics.net/lists/arm-kernel/msg808238.html
> 
> Possibly a dumb question -- I haven't checked the specs to find out
> whether this makes any sense or not.

Your point is valid though.

> 
>> +	if (scope & SCOPE_BOOT_CPU) {
>> +		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_IMP_DEF) {
>> +			boot_cpu_auth_imp_def = local_cpu_auth;
>> +			return boot_cpu_auth_imp_def >= entry->min_field_value;
>> +		} else if (entry->capability == ARM64_HAS_ADDRESS_AUTH_ARCH) {
>> +			boot_cpu_auth_arch = local_cpu_auth;
>> +			return boot_cpu_auth_arch >= entry->min_field_value;
>> +		}
>> +	} else if (scope & SCOPE_LOCAL_CPU) {
>> +		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_IMP_DEF) {
>> +			return (local_cpu_auth >= entry->min_field_value) &&
>> +			       (local_cpu_auth == boot_cpu_auth_imp_def);
>> +		}
>> +		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_ARCH) {
>> +			return (local_cpu_auth >= entry->min_field_value) &&
>> +			       (local_cpu_auth == boot_cpu_auth_arch);
>> +		}
>> +	}
>> +	return false;
>> +}
>> +
>> +static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
>> +			     int scope)
>> +{
>> +	return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
>> +	       has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
>>   }
>>   
>>   static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
>> @@ -1954,7 +1992,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>>   		.sign = FTR_UNSIGNED,
>>   		.field_pos = ID_AA64ISAR1_APA_SHIFT,
>>   		.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
>> -		.matches = has_cpuid_feature,
>> +		.matches = has_address_auth_cpucap,
>>   	},
>>   	{
>>   		.desc = "Address authentication (IMP DEF algorithm)",
>> @@ -1964,12 +2002,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>>   		.sign = FTR_UNSIGNED,
>>   		.field_pos = ID_AA64ISAR1_API_SHIFT,
>>   		.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
>> -		.matches = has_cpuid_feature,
>> +		.matches = has_address_auth_cpucap,
>>   	},
>>   	{
>>   		.capability = ARM64_HAS_ADDRESS_AUTH,
>>   		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
>> -		.matches = has_address_auth,
>> +		.matches = has_address_auth_metacap,
>>   	},
>>   	{
>>   		.desc = "Generic authentication (architected algorithm)",
>> -- 
>> 2.17.1
>>
>>
>> _______________________________________________
>> linux-arm-kernel mailing list
>> linux-arm-kernel@lists.infradead.org
>> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Dave Martin June 23, 2020, 2:47 p.m. UTC | #3
On Tue, Jun 23, 2020 at 06:47:02PM +0530, Amit Daniel Kachhap wrote:
> Hi,
> 
> On 6/22/20 8:05 PM, Dave Martin wrote:
> >On Thu, Jun 18, 2020 at 10:40:28AM +0530, Amit Daniel Kachhap wrote:
> >>This patch modifies the address authentication cpufeature type to EXACT
> >>from earlier LOWER_SAFE as the different configurations added for Armv8.6
> >>enhanced PAC have different behaviour and there is no tunable to enable the
> >>lower safe versions.
> >
> >The enancements add no new instructions at EL0, right?  And no
> >behavioural change, provided that userspace doesn't care what signing/
> >authentication algorithm is used?
> 
> Yes you are correct that no new instructions are added.
> 
> >
> >If so then I guess you're correct not to add a new hwcap, but I thought
> >it was worth asking.
> >
> >>non-exact secondary cpus but rather marks them as tainted. This patch fixes
> >>it by replacing the generic match handler with a new handler specific to
> >>ptrauth.
> >>
> >>After this change, if there is any variation in ptrauth configurations in
> >>secondary cpus from boot cpu then those mismatched cpus are parked in an
> >>infinite loop.
> >>
> >>Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
> >>[Suzuki: Introduce new matching function for address authentication]
> >>Suggested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
> >
> >Does a corresponding patch need to go to stable?  As things stand, I
> >think older kernels that support pointer auth will go wrong on v8.7+
> >hardware that has these features.
> >
> >This patch in its current form may be too heavy for stable, though.
> >
> >See comment below though.
> >
> >>---
> >>Change since v2:
> >>  * Added new matching helper function has_address_auth_cpucap as address
> >>    authentication cpufeature is now FTR_EXACT. The existing matching function
> >>    has_cpuid_feature is specific to LOWER_SAFE.
> >>
> >>  arch/arm64/kernel/cpufeature.c | 56 ++++++++++++++++++++++++++++------
> >>  1 file changed, 47 insertions(+), 9 deletions(-)
> >>
> >>diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> >>index 4ae41670c2e6..42670d615555 100644
> >>--- a/arch/arm64/kernel/cpufeature.c
> >>+++ b/arch/arm64/kernel/cpufeature.c
> >>@@ -210,9 +210,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
> >>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
> >>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
> >>  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> >>-		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
> >>+		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
> >>  	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
> >>-		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> >>+		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
> >>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
> >>  	ARM64_FTR_END,
> >>  };
> >>@@ -1601,11 +1601,49 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
> >>  #endif /* CONFIG_ARM64_RAS_EXTN */
> >>  #ifdef CONFIG_ARM64_PTR_AUTH
> >>-static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
> >>-			     int __unused)
> >>+static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
> >>  {
> >>-	return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
> >>-	       __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
> >>+	int local_cpu_auth;
> >>+	static int boot_cpu_auth_arch;
> >>+	static int boot_cpu_auth_imp_def;
> >>+
> >>+	/* We don't expect to be called with SCOPE_SYSTEM */
> >>+	WARN_ON(scope == SCOPE_SYSTEM);
> >>+
> >>+	local_cpu_auth = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
> >>+						     entry->field_pos, entry->sign);
> >>+	/*
> >>+	 * The ptr-auth feature levels are not intercompatible with
> >>+	 * lower levels. Hence we must match all the CPUs with that
> >>+	 * of the boot CPU. So cache the level of boot CPU and compare
> >>+	 * it against the secondary CPUs.
> >>+	 */
> >
> >Thinking about this, does it actually matter if different CPUs have
> >different trapping behaviours for ptrauth?
> >
> >If we are guaranteed that the signing algorithm is still compatible
> >across all CPUs, we might get away with it.
> 
> You may be right that these protections may not be required practically.
> But the algorithm of each configurations is different so theoretically
> same set of software will produce different behavior on different cpus.
> 
> This code initially changed only FTR_EXACT from FTR_LOWE_SAFE. But there
> are many issues identified here [1] in the cpufeature framework so rest
> of the defensive changes added.
> 
> [1]: https://www.spinics.net/lists/arm-kernel/msg808238.html
> >
> >Possibly a dumb question -- I haven't checked the specs to find out
> >whether this makes any sense or not.
> 
> Your point is valid though.

OK.

Did you see my comment above about patches for stable?

[...]

Cheers
---Dave
Amit Daniel Kachhap June 24, 2020, 7:13 a.m. UTC | #4
On 6/23/20 8:17 PM, Dave Martin wrote:
> On Tue, Jun 23, 2020 at 06:47:02PM +0530, Amit Daniel Kachhap wrote:
>> Hi,
>>
>> On 6/22/20 8:05 PM, Dave Martin wrote:
>>> On Thu, Jun 18, 2020 at 10:40:28AM +0530, Amit Daniel Kachhap wrote:
>>>> This patch modifies the address authentication cpufeature type to EXACT
>>> >from earlier LOWER_SAFE as the different configurations added for Armv8.6
>>>> enhanced PAC have different behaviour and there is no tunable to enable the
>>>> lower safe versions.
>>>
>>> The enancements add no new instructions at EL0, right?  And no
>>> behavioural change, provided that userspace doesn't care what signing/
>>> authentication algorithm is used?
>>
>> Yes you are correct that no new instructions are added.
>>
>>>
>>> If so then I guess you're correct not to add a new hwcap, but I thought
>>> it was worth asking.
>>>
>>>> non-exact secondary cpus but rather marks them as tainted. This patch fixes
>>>> it by replacing the generic match handler with a new handler specific to
>>>> ptrauth.
>>>>
>>>> After this change, if there is any variation in ptrauth configurations in
>>>> secondary cpus from boot cpu then those mismatched cpus are parked in an
>>>> infinite loop.
>>>>
>>>> Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
>>>> [Suzuki: Introduce new matching function for address authentication]
>>>> Suggested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
>>>
>>> Does a corresponding patch need to go to stable?  As things stand, I
>>> think older kernels that support pointer auth will go wrong on v8.7+
>>> hardware that has these features.

Yes It makes to add this patch to the stable version.

@Catalin, @Will - Shall I send this one with a fix subject line? Please 
let me know your suggestion.

>>>
>>> This patch in its current form may be too heavy for stable, though.
>>>
>>> See comment below though.
>>>
>>>> ---
>>>> Change since v2:
>>>>   * Added new matching helper function has_address_auth_cpucap as address
>>>>     authentication cpufeature is now FTR_EXACT. The existing matching function
>>>>     has_cpuid_feature is specific to LOWER_SAFE.
>>>>
>>>>   arch/arm64/kernel/cpufeature.c | 56 ++++++++++++++++++++++++++++------
>>>>   1 file changed, 47 insertions(+), 9 deletions(-)
>>>>
>>>> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
>>>> index 4ae41670c2e6..42670d615555 100644
>>>> --- a/arch/arm64/kernel/cpufeature.c
>>>> +++ b/arch/arm64/kernel/cpufeature.c
>>>> @@ -210,9 +210,9 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
>>>> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
>>>> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
>>>> -		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
>>>> +		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
>>>>   	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
>>>>   	ARM64_FTR_END,
>>>>   };
>>>> @@ -1601,11 +1601,49 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
>>>>   #endif /* CONFIG_ARM64_RAS_EXTN */
>>>>   #ifdef CONFIG_ARM64_PTR_AUTH
>>>> -static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
>>>> -			     int __unused)
>>>> +static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
>>>>   {
>>>> -	return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
>>>> -	       __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
>>>> +	int local_cpu_auth;
>>>> +	static int boot_cpu_auth_arch;
>>>> +	static int boot_cpu_auth_imp_def;
>>>> +
>>>> +	/* We don't expect to be called with SCOPE_SYSTEM */
>>>> +	WARN_ON(scope == SCOPE_SYSTEM);
>>>> +
>>>> +	local_cpu_auth = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
>>>> +						     entry->field_pos, entry->sign);
>>>> +	/*
>>>> +	 * The ptr-auth feature levels are not intercompatible with
>>>> +	 * lower levels. Hence we must match all the CPUs with that
>>>> +	 * of the boot CPU. So cache the level of boot CPU and compare
>>>> +	 * it against the secondary CPUs.
>>>> +	 */
>>>
>>> Thinking about this, does it actually matter if different CPUs have
>>> different trapping behaviours for ptrauth?
>>>
>>> If we are guaranteed that the signing algorithm is still compatible
>>> across all CPUs, we might get away with it.
>>
>> You may be right that these protections may not be required practically.
>> But the algorithm of each configurations is different so theoretically
>> same set of software will produce different behavior on different cpus.
>>
>> This code initially changed only FTR_EXACT from FTR_LOWE_SAFE. But there
>> are many issues identified here [1] in the cpufeature framework so rest
>> of the defensive changes added.
>>
>> [1]: https://www.spinics.net/lists/arm-kernel/msg808238.html
>>>
>>> Possibly a dumb question -- I haven't checked the specs to find out
>>> whether this makes any sense or not.
>>
>> Your point is valid though.
> 
> OK.
> 
> Did you see my comment above about patches for stable?

oops I missed this one.
> 
> [...]
> 
> Cheers
> ---Dave
>
Will Deacon June 24, 2020, 7:49 a.m. UTC | #5
On Wed, Jun 24, 2020 at 12:43:20PM +0530, Amit Daniel Kachhap wrote:
> On 6/23/20 8:17 PM, Dave Martin wrote:
> > On Tue, Jun 23, 2020 at 06:47:02PM +0530, Amit Daniel Kachhap wrote:
> > > On 6/22/20 8:05 PM, Dave Martin wrote:
> > > > On Thu, Jun 18, 2020 at 10:40:28AM +0530, Amit Daniel Kachhap wrote:
> > > > > This patch modifies the address authentication cpufeature type to EXACT
> > > > >from earlier LOWER_SAFE as the different configurations added for Armv8.6
> > > > > enhanced PAC have different behaviour and there is no tunable to enable the
> > > > > lower safe versions.

$ git grep 'This patch' Documentation/process/submitting-patches.rst

> > > > Does a corresponding patch need to go to stable?  As things stand, I
> > > > think older kernels that support pointer auth will go wrong on v8.7+
> > > > hardware that has these features.
> 
> Yes It makes to add this patch to the stable version.
> 
> @Catalin, @Will - Shall I send this one with a fix subject line? Please let
> me know your suggestion.

What exactly goes wrong? As far as I can tell, we will taint and probably
(?) crash shortly afterwards if you run an old kernel on hardware with
mismatched pointer auth. If that's the case, I don't think this warrants
stable (especially since this file has seen enough churn that the backports
are likely risky).

Can you confirm that things only go wrong if we have a mismatched system,
and ideally provide an example of what the crash looks like (you should
be able to get a fast model to emulate this setup)?

Cheers,

Will
Amit Daniel Kachhap June 24, 2020, 11:55 a.m. UTC | #6
Hi,

On 6/24/20 1:19 PM, Will Deacon wrote:
> On Wed, Jun 24, 2020 at 12:43:20PM +0530, Amit Daniel Kachhap wrote:
>> On 6/23/20 8:17 PM, Dave Martin wrote:
>>> On Tue, Jun 23, 2020 at 06:47:02PM +0530, Amit Daniel Kachhap wrote:
>>>> On 6/22/20 8:05 PM, Dave Martin wrote:
>>>>> On Thu, Jun 18, 2020 at 10:40:28AM +0530, Amit Daniel Kachhap wrote:
>>>>>> This patch modifies the address authentication cpufeature type to EXACT
>>>>> >from earlier LOWER_SAFE as the different configurations added for Armv8.6
>>>>>> enhanced PAC have different behaviour and there is no tunable to enable the
>>>>>> lower safe versions.
> 
> $ git grep 'This patch' Documentation/process/submitting-patches.rst

ok sure. I will update.

> 
>>>>> Does a corresponding patch need to go to stable?  As things stand, I
>>>>> think older kernels that support pointer auth will go wrong on v8.7+
>>>>> hardware that has these features.
>>
>> Yes It makes to add this patch to the stable version.
>>
>> @Catalin, @Will - Shall I send this one with a fix subject line? Please let
>> me know your suggestion.
> 
> What exactly goes wrong? As far as I can tell, we will taint and probably
> (?) crash shortly afterwards if you run an old kernel on hardware with
> mismatched pointer auth.

Yes you are right that 8.6+ hardware will crash with older kernel if
ptrauth cpu variation occurs. Basically EnhancedPAC2(with extra xor) and
old pac algorithm may not be compatible and may crash when thread
migration happens in between paciasp and autiasp [1].

[1]: 
https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/arm-architecture-developments-armv8-6-a

> If that's the case, I don't think this warrants
> stable (especially since this file has seen enough churn that the backports
> are likely risky).

ok. I was seeking mere opinion :).

> 
> Can you confirm that things only go wrong if we have a mismatched system,
> and ideally provide an example of what the crash looks like (you should
> be able to get a fast model to emulate this setup)?

It is difficult to emulate this as fast model only provides cluster
level pac config option and not at cpu level. I will check on it further.

Thanks,
Amit Daniel

> 
> Cheers,
> 
> Will
>
diff mbox series

Patch

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4ae41670c2e6..42670d615555 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -210,9 +210,9 @@  static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
+		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_API_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
-		       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
+		       FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_APA_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
 	ARM64_FTR_END,
 };
@@ -1601,11 +1601,49 @@  static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
 #endif /* CONFIG_ARM64_RAS_EXTN */
 
 #ifdef CONFIG_ARM64_PTR_AUTH
-static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
-			     int __unused)
+static bool has_address_auth_cpucap(const struct arm64_cpu_capabilities *entry, int scope)
 {
-	return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
-	       __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
+	int local_cpu_auth;
+	static int boot_cpu_auth_arch;
+	static int boot_cpu_auth_imp_def;
+
+	/* We don't expect to be called with SCOPE_SYSTEM */
+	WARN_ON(scope == SCOPE_SYSTEM);
+
+	local_cpu_auth = cpuid_feature_extract_field(__read_sysreg_by_encoding(entry->sys_reg),
+						     entry->field_pos, entry->sign);
+	/*
+	 * The ptr-auth feature levels are not intercompatible with
+	 * lower levels. Hence we must match all the CPUs with that
+	 * of the boot CPU. So cache the level of boot CPU and compare
+	 * it against the secondary CPUs.
+	 */
+	if (scope & SCOPE_BOOT_CPU) {
+		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_IMP_DEF) {
+			boot_cpu_auth_imp_def = local_cpu_auth;
+			return boot_cpu_auth_imp_def >= entry->min_field_value;
+		} else if (entry->capability == ARM64_HAS_ADDRESS_AUTH_ARCH) {
+			boot_cpu_auth_arch = local_cpu_auth;
+			return boot_cpu_auth_arch >= entry->min_field_value;
+		}
+	} else if (scope & SCOPE_LOCAL_CPU) {
+		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_IMP_DEF) {
+			return (local_cpu_auth >= entry->min_field_value) &&
+			       (local_cpu_auth == boot_cpu_auth_imp_def);
+		}
+		if (entry->capability == ARM64_HAS_ADDRESS_AUTH_ARCH) {
+			return (local_cpu_auth >= entry->min_field_value) &&
+			       (local_cpu_auth == boot_cpu_auth_arch);
+		}
+	}
+	return false;
+}
+
+static bool has_address_auth_metacap(const struct arm64_cpu_capabilities *entry,
+			     int scope)
+{
+	return has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_ARCH], scope) ||
+	       has_address_auth_cpucap(cpu_hwcaps_ptrs[ARM64_HAS_ADDRESS_AUTH_IMP_DEF], scope);
 }
 
 static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
@@ -1954,7 +1992,7 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.sign = FTR_UNSIGNED,
 		.field_pos = ID_AA64ISAR1_APA_SHIFT,
 		.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
-		.matches = has_cpuid_feature,
+		.matches = has_address_auth_cpucap,
 	},
 	{
 		.desc = "Address authentication (IMP DEF algorithm)",
@@ -1964,12 +2002,12 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.sign = FTR_UNSIGNED,
 		.field_pos = ID_AA64ISAR1_API_SHIFT,
 		.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
-		.matches = has_cpuid_feature,
+		.matches = has_address_auth_cpucap,
 	},
 	{
 		.capability = ARM64_HAS_ADDRESS_AUTH,
 		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
-		.matches = has_address_auth,
+		.matches = has_address_auth_metacap,
 	},
 	{
 		.desc = "Generic authentication (architected algorithm)",