@@ -860,6 +860,7 @@ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+static bool __pti_enabled;
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
int scope)
@@ -884,21 +885,26 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
/* Forced? */
if (__kpti_forced) {
+ __pti_enabled = __kpti_forced > 0;
pr_info_once("kernel page table isolation forced %s by %s\n",
- __kpti_forced > 0 ? "ON" : "OFF", str);
- return __kpti_forced > 0;
- }
-
- /* Useful for KASLR robustness */
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return true;
+ __pti_enabled ? "ON" : "OFF", str);
+ } else {
+ str = "default";
+ /* Useful for KASLR robustness */
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
+ __pti_enabled = true;
+ /* Don't force KPTI for CPUs that are not vulnerable */
+ else if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
+ __pti_enabled = false;
+ /* Defer to CPU feature registers */
+ else
+ __pti_enabled = !has_cpuid_feature(entry, scope);
- /* Don't force KPTI for CPUs that are not vulnerable */
- if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
- return false;
+ pr_info_once("kernel page table isolation %s by %s\n",
+ __pti_enabled ? "ON" : "OFF", str);
+ }
- /* Defer to CPU feature registers */
- return !has_cpuid_feature(entry, scope);
+ return __pti_enabled;
}
static void
Always log KPTI setting at boot time, whether or not KPTI was forced by a kernel parameter. Signed-off-by: Mark Langsdorf <mlangsdo@redhat.com> --- arch/arm64/kernel/cpufeature.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-)