Message ID | 20190125180711.1970973-11-jeremy.linton@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: add system vulnerability sysfs entries | expand |
On Fri, 25 Jan 2019 12:07:09 -0600 Jeremy Linton <jeremy.linton@arm.com> wrote: Hi, > Add code to track whether all the cores in the machine are > vulnerable, and whether all the vulnerable cores have been > mitigated. > > Once we have that information we can add the sysfs stub and > provide an accurate view of what is known about the machine. > > Signed-off-by: Jeremy Linton <jeremy.linton@arm.com> > --- > arch/arm64/kernel/cpu_errata.c | 31 +++++++++++++++++++++++++++++-- > 1 file changed, 29 insertions(+), 2 deletions(-) > > diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c > index 024c83ffff99..caedf268c972 100644 > --- a/arch/arm64/kernel/cpu_errata.c > +++ b/arch/arm64/kernel/cpu_errata.c > @@ -497,6 +497,10 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) > .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ > CAP_MIDR_RANGE_LIST(midr_list) > > +/* Track overall mitigation state. We are only mitigated if all cores are ok */ > +static bool __hardenbp_enab = true; > +static bool __spectrev2_safe = true; > + > /* > * List of CPUs that do not need any Spectre-v2 mitigation at all. > */ > @@ -507,6 +511,10 @@ static const struct midr_range spectre_v2_safe_list[] = { > { /* sentinel */ } > }; > > +/* > + * Track overall bp hardening for all heterogeneous cores in the machine. > + * We are only considered "safe" if all booted cores are known safe. > + */ > static bool __maybe_unused > check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) > { > @@ -528,12 +536,19 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) > if (!need_wa) > return false; > > - if (need_wa < 0) > + __spectrev2_safe = false; > + > + if (need_wa < 0) { > pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); > + __hardenbp_enab = false; > + } > > /* forced off */ > - if (__nospectre_v2) > + if (__nospectre_v2) { > + pr_info_once("spectrev2 mitigation disabled by command line option\n"); > + __hardenbp_enab = false; > return false; > + } > > return (need_wa > 0); > } > @@ -757,4 +772,16 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, > return sprintf(buf, "Mitigation: __user pointer sanitization\n"); > } > > +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, > + char *buf) w/s issue. Other than that: Reviewed-by: Andre Przywara <andre.przywara@arm.com> Cheers, Andre. > +{ > + if (__spectrev2_safe) > + return sprintf(buf, "Not affected\n"); > + > + if (__hardenbp_enab) > + return sprintf(buf, "Mitigation: Branch predictor hardening\n"); > + > + return sprintf(buf, "Vulnerable\n"); > +} > + > #endif
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index 024c83ffff99..caedf268c972 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -497,6 +497,10 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ CAP_MIDR_RANGE_LIST(midr_list) +/* Track overall mitigation state. We are only mitigated if all cores are ok */ +static bool __hardenbp_enab = true; +static bool __spectrev2_safe = true; + /* * List of CPUs that do not need any Spectre-v2 mitigation at all. */ @@ -507,6 +511,10 @@ static const struct midr_range spectre_v2_safe_list[] = { { /* sentinel */ } }; +/* + * Track overall bp hardening for all heterogeneous cores in the machine. + * We are only considered "safe" if all booted cores are known safe. + */ static bool __maybe_unused check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) { @@ -528,12 +536,19 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) if (!need_wa) return false; - if (need_wa < 0) + __spectrev2_safe = false; + + if (need_wa < 0) { pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); + __hardenbp_enab = false; + } /* forced off */ - if (__nospectre_v2) + if (__nospectre_v2) { + pr_info_once("spectrev2 mitigation disabled by command line option\n"); + __hardenbp_enab = false; return false; + } return (need_wa > 0); } @@ -757,4 +772,16 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, return sprintf(buf, "Mitigation: __user pointer sanitization\n"); } +ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, + char *buf) +{ + if (__spectrev2_safe) + return sprintf(buf, "Not affected\n"); + + if (__hardenbp_enab) + return sprintf(buf, "Mitigation: Branch predictor hardening\n"); + + return sprintf(buf, "Vulnerable\n"); +} + #endif
Add code to track whether all the cores in the machine are vulnerable, and whether all the vulnerable cores have been mitigated. Once we have that information we can add the sysfs stub and provide an accurate view of what is known about the machine. Signed-off-by: Jeremy Linton <jeremy.linton@arm.com> --- arch/arm64/kernel/cpu_errata.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-)