@@ -96,6 +96,7 @@ void spectre_v4_enable_task_mitigation(struct task_struct *tsk);
enum mitigation_state arm64_get_meltdown_state(void);
enum mitigation_state arm64_get_spectre_bhb_state(void);
+enum mitigation_state arm64_get_spectre_bse_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
u8 spectre_bhb_loop_affected(int scope);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
@@ -112,6 +112,24 @@ static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
}
}
+static const char *get_bse_affected_string(enum mitigation_state bse_state)
+{
+ enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
+
+ switch (bse_state) {
+ case SPECTRE_UNAFFECTED:
+ return "";
+ default:
+ case SPECTRE_VULNERABLE:
+ /* BHB+BSE = ", but not BHB or BSE" */
+ if (bhb_state == SPECTRE_VULNERABLE)
+ return " or BSE";
+ return ", but not BSE";
+ case SPECTRE_MITIGATED:
+ return ", BSE";
+ }
+}
+
static bool _unprivileged_ebpf_enabled(void)
{
#ifdef CONFIG_BPF_SYSCALL
@@ -125,7 +143,9 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
char *buf)
{
enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
+ enum mitigation_state bse_state = arm64_get_spectre_bse_state();
const char *bhb_str = get_bhb_affected_string(bhb_state);
+ const char *bse_str = get_bse_affected_string(bse_state);
const char *v2_str = "Branch predictor hardening";
switch (spectre_v2_state) {
@@ -143,7 +163,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
- return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
+ return sprintf(buf, "Mitigation: %s%s%s\n", v2_str, bhb_str, bse_str);
case SPECTRE_VULNERABLE:
fallthrough;
default:
@@ -825,6 +845,15 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
* - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
* software mitigation in the vectors is needed.
* - Has CSV2.3, so is unaffected.
+ *
+ *
+ * Spectre BSE.
+ *
+ * Affects a small number of CPUs. Cortex-A73 and Cortex-A75 are already
+ * mitigated by the firmware Spectre-BHB mitigation.
+ * A72 r0 is mitigated by the firmware Spectre v2 call. This means A72 appears
+ * in both the BHB "loop mitigated list" and "firmware mitigated list", and
+ * needs special casing.
*/
static enum mitigation_state spectre_bhb_state;
@@ -841,6 +870,13 @@ enum bhb_mitigation_bits {
};
static unsigned long system_bhb_mitigations;
+static enum mitigation_state spectre_bse_state;
+
+enum mitigation_state arm64_get_spectre_bse_state(void)
+{
+ return spectre_bse_state;
+}
+
/*
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
* SCOPE_SYSTEM call will give the right answer.
@@ -916,12 +952,36 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
}
}
+/*
+ * For a core affected by BSE, get the WA3 state and handle the 'unaffected'
+ * case from unaware firmware.
+ */
+static enum mitigation_state spectre_bse_get_cpu_fw_mitigation_state(void)
+{
+ enum mitigation_state state = spectre_bhb_get_cpu_fw_mitigation_state();
+
+ switch (state) {
+ case SPECTRE_MITIGATED:
+ return state;
+ default:
+ case SPECTRE_UNAFFECTED:
+ /*
+ * We don't rely on firmware for discovery of BSE affected
+ * cores. Unaffected is treated as not-implemented.
+ */
+ case SPECTRE_VULNERABLE:
+ return SPECTRE_VULNERABLE;
+ }
+}
+
static bool is_spectre_bhb_fw_affected(int scope)
{
static bool system_affected;
enum mitigation_state fw_state;
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
+ /* A72 r0pX */
+ MIDR_RANGE(MIDR_CORTEX_A72, 0, 0, 0, 0xf),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
{},
@@ -974,6 +1034,29 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
return false;
}
+static bool is_spectre_bse_affected(int scope)
+{
+ static bool system_affected;
+ static const struct midr_range spectre_bse_firmware_mitigated_list[] = {
+ /* A72 r0pX */
+ MIDR_RANGE(MIDR_CORTEX_A72, 0, 0, 0, 0xf),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+ {},
+ };
+
+ if (scope != SCOPE_LOCAL_CPU)
+ return system_affected;
+
+ if (is_midr_in_range_list(read_cpuid_id(),
+ spectre_bse_firmware_mitigated_list)) {
+ system_affected = true;
+ return true;
+ }
+
+ return false;
+}
+
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
{
const char *v = arm64_get_bp_hardening_vector(slot);
@@ -1046,7 +1129,9 @@ static void spectre_bhb_enable_loop_mitigation(void)
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
{
+ bool bse_upgrade_loop_mitigation = false;
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
+ enum mitigation_state bse_state = SPECTRE_VULNERABLE;
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
@@ -1073,17 +1158,42 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
state = SPECTRE_MITIGATED;
set_bit(BHB_INSN, &system_bhb_mitigations);
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
- spectre_bhb_enable_loop_mitigation();
- state = SPECTRE_MITIGATED;
+ /* Cores also affected by BSE are special cased later */
+ if (!is_spectre_bse_affected(SCOPE_LOCAL_CPU)) {
+ spectre_bhb_enable_loop_mitigation();
+ state = SPECTRE_MITIGATED;
+ } else {
+ bse_upgrade_loop_mitigation = true;
+ }
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
if (fw_state == SPECTRE_MITIGATED) {
spectre_bhb_enable_fw_mitigation();
state = SPECTRE_MITIGATED;
+
+ if (is_spectre_bse_affected(SCOPE_LOCAL_CPU))
+ bse_state = SPECTRE_MITIGATED;
}
}
+ /* Spectre BSE needs to upgrade the BHB mitigation to use firmware */
+ if (bse_upgrade_loop_mitigation) {
+ bse_state = spectre_bse_get_cpu_fw_mitigation_state();
+ if (bse_state == SPECTRE_MITIGATED) {
+ spectre_bhb_enable_fw_mitigation();
+ state = SPECTRE_MITIGATED;
+ bse_state = SPECTRE_MITIGATED;
+ } else {
+ spectre_bhb_enable_loop_mitigation();
+ state = SPECTRE_MITIGATED;
+ bse_state = SPECTRE_VULNERABLE;
+ }
+ } else if (!is_spectre_bse_affected(SCOPE_LOCAL_CPU)) {
+ bse_state = SPECTRE_UNAFFECTED;
+ }
+
update_mitigation_state(&spectre_bhb_state, state);
+ update_mitigation_state(&spectre_bse_state, bse_state);
}
/* Patched to NOP when enabled */
A group of older Cortex cores are affected by Spectre-BSE which on one core bypasses the existing Spectre-BHB mitigation. The workaround is to use the Spectre-BHB 'arch_workaround_3' firmware call. For Cortex-A7{3,5}, this is the existing mitigation so only the reporting needs updating. For Cortex-A72 if WA3 is not implemented, there is still a benefit from enabling the Spectre-BHB branchy-loop. This leads to special-casing anything affected by Spectre-BSE to avoid enabling the branchy-loop, and instead only do this if the firmware WA3 call is not implemented. Such platforms will be reported as: | Mitigation: Branch predictor hardening, BHB, but not BSE via the Spectre-v2 vulnerabilities file. Signed-off-by: James Morse <james.morse@arm.com> --- Nothing relies on the BSE entries being in the BHB firmware list, but this is done on the principle of least-surprise. --- arch/arm64/include/asm/spectre.h | 1 + arch/arm64/kernel/proton-pack.c | 116 ++++++++++++++++++++++++++++++- 2 files changed, 114 insertions(+), 3 deletions(-)