@@ -910,6 +910,7 @@ static inline unsigned int get_vmid_bits(u64 mmfr1)
struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id);
+extern struct arm64_ftr_override id_aa64mmfr0_override;
extern struct arm64_ftr_override id_aa64mmfr1_override;
extern struct arm64_ftr_override id_aa64mmfr2_override;
extern struct arm64_ftr_override id_aa64pfr0_override;
@@ -636,6 +636,7 @@ static const struct arm64_ftr_bits ftr_raz[] = {
#define ARM64_FTR_REG(id, table) \
__ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)
+struct arm64_ftr_override id_aa64mmfr0_override;
struct arm64_ftr_override id_aa64mmfr1_override;
struct arm64_ftr_override id_aa64mmfr2_override;
struct arm64_ftr_override id_aa64pfr0_override;
@@ -701,7 +702,8 @@ static const struct __ftr_reg_entry {
&id_aa64isar2_override),
/* Op1 = 0, CRn = 0, CRm = 7 */
- ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
+ ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0,
+ &id_aa64mmfr0_override),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
&id_aa64mmfr1_override),
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2,
@@ -45,6 +45,7 @@ PROVIDE(__pi_memstart_offset_seed = memstart_offset_seed);
PROVIDE(__pi_id_aa64isar1_override = id_aa64isar1_override);
PROVIDE(__pi_id_aa64isar2_override = id_aa64isar2_override);
+PROVIDE(__pi_id_aa64mmfr0_override = id_aa64mmfr0_override);
PROVIDE(__pi_id_aa64mmfr1_override = id_aa64mmfr1_override);
PROVIDE(__pi_id_aa64mmfr2_override = id_aa64mmfr2_override);
PROVIDE(__pi_id_aa64pfr0_override = id_aa64pfr0_override);
@@ -139,10 +139,36 @@ DEFINE_OVERRIDE(6, sw_features, "arm64_sw", arm64_sw_feature_override,
FIELD("nowxn", ARM64_SW_FEATURE_OVERRIDE_NOWXN),
{});
+asmlinkage bool __init mmfr2_varange_filter(u64 val)
+{
+ u64 mmfr0, tg4, tg16;
+
+ if (val)
+ return false;
+
+ mmfr0 = read_sysreg(id_aa64mmfr0_el1);
+ tg4 = (mmfr0 & ID_AA64MMFR0_EL1_TGRAN4_MASK) >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT;
+ tg16 = (mmfr0 & ID_AA64MMFR0_EL1_TGRAN16_MASK) >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT;
+
+ if (tg4 == ID_AA64MMFR0_EL1_TGRAN4_52_BIT) {
+ id_aa64mmfr0_override.val |=
+ ID_AA64MMFR0_EL1_TGRAN4_IMP << ID_AA64MMFR0_EL1_TGRAN4_SHIFT;
+ id_aa64mmfr0_override.mask |= ID_AA64MMFR0_EL1_TGRAN4_MASK;
+ }
+
+ if (tg16 == ID_AA64MMFR0_EL1_TGRAN16_52_BIT) {
+ id_aa64mmfr0_override.val |=
+ ID_AA64MMFR0_EL1_TGRAN16_IMP << ID_AA64MMFR0_EL1_TGRAN16_SHIFT;
+ id_aa64mmfr0_override.mask |= ID_AA64MMFR0_EL1_TGRAN16_MASK;
+ }
+ return true;
+}
+
DEFINE_OVERRIDE(7, mmfr2, "id_aa64mmfr2", id_aa64mmfr2_override,
FIELD("varange", ID_AA64MMFR2_EL1_VARange_SHIFT),
FIELD("e0pd", ID_AA64MMFR2_EL1_E0PD_SHIFT),
{});
+DEFINE_OVERRIDE_FILTER(mmfr2, 0, mmfr2_varange_filter);
/*
* regs[] is populated by R_AARCH64_PREL32 directives invisible to the compiler
The LVA feature only applies to 64k pages configurations, and for smaller page sizes there are other feature registers that describe the virtual addressing capabilities of the CPU. Let's adhere to the principle of least surprise, and wire up arm64.nolva so that it disables 52-bit virtual addressing support regardless of the page size. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- arch/arm64/include/asm/cpufeature.h | 1 + arch/arm64/kernel/cpufeature.c | 4 ++- arch/arm64/kernel/image-vars.h | 1 + arch/arm64/kernel/pi/idreg-override.c | 26 ++++++++++++++++++++ 4 files changed, 31 insertions(+), 1 deletion(-)