diff mbox series

[v6,08/18] arm64: cpufeature: handle conflicts based on capability

Message ID 1583476525-13505-9-git-send-email-amit.kachhap@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: return address signing | expand

Commit Message

Amit Daniel Kachhap March 6, 2020, 6:35 a.m. UTC
From: Kristina Martsenko <kristina.martsenko@arm.com>

Each system capability can be of either boot, local, or system scope,
depending on when the state of the capability is finalized. When we
detect a conflict on a late CPU, we either offline the CPU or panic the
system. We currently always panic if the conflict is caused by a boot
scope capability, and offline the CPU if the conflict is caused by a
local or system scope capability.

We're going to want to add a new capability (for pointer authentication)
which needs to be boot scope but doesn't need to panic the system when a
conflict is detected. So add a new flag to specify whether the
capability requires the system to panic or not. Current boot scope
capabilities are updated to set the flag, so there should be no
functional change as a result of this patch.

Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Kristina Martsenko <kristina.martsenko@arm.com>
Signed-off-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
---
Changes since v5:
 * Moved cpucap_panic_on_conflict helper function inside cpufeature.c.

 arch/arm64/include/asm/cpufeature.h | 12 ++++++++++--
 arch/arm64/kernel/cpufeature.c      | 29 +++++++++++++++--------------
 2 files changed, 25 insertions(+), 16 deletions(-)

Comments

Vincenzo Frascino March 10, 2020, 12:31 p.m. UTC | #1
Hi Amit,

On 3/6/20 6:35 AM, Amit Daniel Kachhap wrote:

[...]

>  
> +static bool
> +cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
> +{
> +	return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
> +}
> +

If there is no specific reason in the previous patch for changing the signature,
could you please make "inline" even this function, for symmetry with the others?

[...]
Catalin Marinas March 11, 2020, 11:03 a.m. UTC | #2
On Tue, Mar 10, 2020 at 12:31:56PM +0000, Vincenzo Frascino wrote:
> On 3/6/20 6:35 AM, Amit Daniel Kachhap wrote:
> 
> [...]
> 
> >  
> > +static bool
> > +cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
> > +{
> > +	return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
> > +}
> > +
> 
> If there is no specific reason in the previous patch for changing the signature,
> could you please make "inline" even this function, for symmetry with the others?

Please don't add new 'inline' unless you have a real justification (in
which case __always_inline is better suited). Also symmetry with others
is not a good argument.

https://www.kernel.org/doc/html/latest/process/coding-style.html#the-inline-disease
Vincenzo Frascino March 11, 2020, 11:46 a.m. UTC | #3
On 3/11/20 11:03 AM, Catalin Marinas wrote:
> On Tue, Mar 10, 2020 at 12:31:56PM +0000, Vincenzo Frascino wrote:
>> On 3/6/20 6:35 AM, Amit Daniel Kachhap wrote:
>>
>> [...]
>>
>>>  
>>> +static bool
>>> +cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
>>> +{
>>> +	return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
>>> +}
>>> +
>>
>> If there is no specific reason in the previous patch for changing the signature,
>> could you please make "inline" even this function, for symmetry with the others?
> 
> Please don't add new 'inline' unless you have a real justification (in
> which case __always_inline is better suited). Also symmetry with others
> is not a good argument.
> 
> https://www.kernel.org/doc/html/latest/process/coding-style.html#the-inline-disease
> 

Ok, thanks for the explanation.

With this:

Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index ae9673a..9818ff8 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -208,6 +208,10 @@  extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
  *     In some non-typical cases either both (a) and (b), or neither,
  *     should be permitted. This can be described by including neither
  *     or both flags in the capability's type field.
+ *
+ *     In case of a conflict, the CPU is prevented from booting. If the
+ *     ARM64_CPUCAP_PANIC_ON_CONFLICT flag is specified for the capability,
+ *     then a kernel panic is triggered.
  */
 
 
@@ -240,6 +244,8 @@  extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
 #define ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU	((u16)BIT(4))
 /* Is it safe for a late CPU to miss this capability when system has it */
 #define ARM64_CPUCAP_OPTIONAL_FOR_LATE_CPU	((u16)BIT(5))
+/* Panic when a conflict is detected */
+#define ARM64_CPUCAP_PANIC_ON_CONFLICT		((u16)BIT(6))
 
 /*
  * CPU errata workarounds that need to be enabled at boot time if one or
@@ -279,9 +285,11 @@  extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
 
 /*
  * CPU feature used early in the boot based on the boot CPU. All secondary
- * CPUs must match the state of the capability as detected by the boot CPU.
+ * CPUs must match the state of the capability as detected by the boot CPU. In
+ * case of a conflict, a kernel panic is triggered.
  */
-#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE ARM64_CPUCAP_SCOPE_BOOT_CPU
+#define ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE		\
+	(ARM64_CPUCAP_SCOPE_BOOT_CPU | ARM64_CPUCAP_PANIC_ON_CONFLICT)
 
 struct arm64_cpu_capabilities {
 	const char *desc;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 865dce6..09906ff 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1376,6 +1376,12 @@  cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
 	return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
 }
 
+static bool
+cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
+{
+	return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
@@ -2018,10 +2024,8 @@  static void __init enable_cpu_capabilities(u16 scope_mask)
  * Run through the list of capabilities to check for conflicts.
  * If the system has already detected a capability, take necessary
  * action on this CPU.
- *
- * Returns "false" on conflicts.
  */
-static bool verify_local_cpu_caps(u16 scope_mask)
+static void verify_local_cpu_caps(u16 scope_mask)
 {
 	int i;
 	bool cpu_has_cap, system_has_cap;
@@ -2066,10 +2070,12 @@  static bool verify_local_cpu_caps(u16 scope_mask)
 		pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
 			smp_processor_id(), caps->capability,
 			caps->desc, system_has_cap, cpu_has_cap);
-		return false;
-	}
 
-	return true;
+		if (cpucap_panic_on_conflict(caps))
+			cpu_panic_kernel();
+		else
+			cpu_die_early();
+	}
 }
 
 /*
@@ -2079,12 +2085,8 @@  static bool verify_local_cpu_caps(u16 scope_mask)
 static void check_early_cpu_features(void)
 {
 	verify_cpu_asid_bits();
-	/*
-	 * Early features are used by the kernel already. If there
-	 * is a conflict, we cannot proceed further.
-	 */
-	if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
-		cpu_panic_kernel();
+
+	verify_local_cpu_caps(SCOPE_BOOT_CPU);
 }
 
 static void
@@ -2132,8 +2134,7 @@  static void verify_local_cpu_capabilities(void)
 	 * check_early_cpu_features(), as they need to be verified
 	 * on all secondary CPUs.
 	 */
-	if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
-		cpu_die_early();
+	verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
 
 	verify_local_elf_hwcaps(arm64_elf_hwcaps);