@@ -933,6 +933,62 @@ extern struct arm64_ftr_override arm64_sw_feature_override;
u32 get_kvm_ipa_limit(void);
void dump_cpu_features(void);
+static inline bool cpu_has_bti(void)
+{
+ u64 pfr1;
+
+ if (!IS_ENABLED(CONFIG_ARM64_BTI))
+ return false;
+
+ pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+ pfr1 &= ~id_aa64pfr1_override.mask;
+ pfr1 |= id_aa64pfr1_override.val;
+
+ return cpuid_feature_extract_unsigned_field(pfr1,
+ ID_AA64PFR1_EL1_BT_SHIFT);
+}
+
+static inline bool cpu_has_e0pd(void)
+{
+ u64 mmfr2;
+
+ if (!IS_ENABLED(CONFIG_ARM64_E0PD))
+ return false;
+
+ mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
+ return cpuid_feature_extract_unsigned_field(mmfr2,
+ ID_AA64MMFR2_EL1_E0PD_SHIFT);
+}
+
+static inline bool cpu_has_pac(void)
+{
+ u64 isar1, isar2;
+ u8 feat;
+
+ if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
+ return false;
+
+ isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+ isar1 &= ~id_aa64isar1_override.mask;
+ isar1 |= id_aa64isar1_override.val;
+ feat = cpuid_feature_extract_unsigned_field(isar1,
+ ID_AA64ISAR1_EL1_APA_SHIFT);
+ if (feat)
+ return true;
+
+ feat = cpuid_feature_extract_unsigned_field(isar1,
+ ID_AA64ISAR1_EL1_API_SHIFT);
+ if (feat)
+ return true;
+
+ isar2 = read_sysreg_s(SYS_ID_AA64ISAR2_EL1);
+ isar2 &= ~id_aa64isar2_override.mask;
+ isar2 |= id_aa64isar2_override.val;
+ feat = cpuid_feature_extract_unsigned_field(isar2,
+ ID_AA64ISAR2_EL1_APA3_SHIFT);
+ return feat;
+}
+
#endif /* __ASSEMBLY__ */
#endif
@@ -1609,21 +1609,11 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
*/
bool kaslr_requires_kpti(void)
{
- if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
- return false;
-
/*
* E0PD does a similar job to KPTI so can be used instead
* where available.
*/
- if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
- u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
- if (cpuid_feature_extract_unsigned_field(mmfr2,
- ID_AA64MMFR2_EL1_E0PD_SHIFT))
- return false;
- }
-
- return kaslr_enabled();
+ return kaslr_enabled() && !cpu_has_e0pd();
}
static bool __meltdown_safe = true;
Add some helpers that will be used by the early kernel mapping code to check feature support on the local CPU. This permits the early kernel mapping to be created with the right attributes, removing the need for tearing it down and recreating it. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/include/asm/cpufeature.h | 56 ++++++++++++++++++++ arch/arm64/kernel/cpufeature.c | 12 +---- 2 files changed, 57 insertions(+), 11 deletions(-)