diff mbox series

arm64: kernel: Reindent CPU feature table for legibility

Message ID 20221129162153.1968634-1-ardb@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64: kernel: Reindent CPU feature table for legibility | expand

Commit Message

Ard Biesheuvel Nov. 29, 2022, 4:21 p.m. UTC
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
Applies onto today's for-next/core, which includes my patch introducing
the CPU feature for FEAT_DIT.  Best viewed with 'git show -w'

 arch/arm64/kernel/cpufeature.c | 1042 ++++++++++----------
 1 file changed, 521 insertions(+), 521 deletions(-)

Comments

Mark Brown Dec. 7, 2022, 6:37 p.m. UTC | #1
On Tue, Nov 29, 2022 at 05:21:53PM +0100, Ard Biesheuvel wrote:
> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
> ---
> Applies onto today's for-next/core, which includes my patch introducing
> the CPU feature for FEAT_DIT.  Best viewed with 'git show -w'
> 
>  arch/arm64/kernel/cpufeature.c | 1042 ++++++++++----------
>  1 file changed, 521 insertions(+), 521 deletions(-)

Now that all the ID registers are converted to automatic generation
thanks to James doing the conversion of the 32 bit ones I've been
looking at taking advantage of this to simplify the tables in
cpufeature.c and improve the legibility.  It's most critically useful
for the hwcaps but cpufeature can benefit too, but there's obvious
collisions with this.  If this is likely to go in I can base my work on
top of it.  It'd obviously be useful in general to land this patch as
soon as possible so people can base other work on top of it.

I've got no particular opinion on the reformatting itself.
diff mbox series

Patch

diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2846704471061793..5e72b2de49ceaf1f 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2128,562 +2128,562 @@  cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
 }
 
 static const struct arm64_cpu_capabilities arm64_features[] = {
-	{
-		.capability = ARM64_ALWAYS_BOOT,
-		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
-		.matches = has_always,
-	},
-	{
-		.capability = ARM64_ALWAYS_SYSTEM,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_always,
-	},
-	{
-		.desc = "GIC system register CPU interface",
-		.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
-		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
-		.matches = has_useable_gicv3_cpuif,
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 1,
-	},
-	{
-		.desc = "Enhanced Counter Virtualization",
-		.capability = ARM64_HAS_ECV,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64MMFR0_EL1,
-		.field_pos = ID_AA64MMFR0_EL1_ECV_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 1,
-	},
+{
+	.capability		= ARM64_ALWAYS_BOOT,
+	.type			= ARM64_CPUCAP_BOOT_CPU_FEATURE,
+	.matches		= has_always,
+},
+{
+	.capability		= ARM64_ALWAYS_SYSTEM,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_always,
+},
+{
+	.desc			= "GIC system register CPU interface",
+	.capability		= ARM64_HAS_SYSREG_GIC_CPUIF,
+	.type			= ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+	.matches		= has_useable_gicv3_cpuif,
+	.sys_reg		= SYS_ID_AA64PFR0_EL1,
+	.field_pos		= ID_AA64PFR0_EL1_GIC_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= 1,
+},
+{
+	.desc			= "Enhanced Counter Virtualization",
+	.capability		= ARM64_HAS_ECV,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64MMFR0_EL1,
+	.field_pos		= ID_AA64MMFR0_EL1_ECV_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= 1,
+},
 #ifdef CONFIG_ARM64_PAN
-	{
-		.desc = "Privileged Access Never",
-		.capability = ARM64_HAS_PAN,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64MMFR1_EL1,
-		.field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 1,
-		.cpu_enable = cpu_enable_pan,
-	},
+{
+	.desc			= "Privileged Access Never",
+	.capability		= ARM64_HAS_PAN,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64MMFR1_EL1,
+	.field_pos		= ID_AA64MMFR1_EL1_PAN_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= 1,
+	.cpu_enable		= cpu_enable_pan,
+},
 #endif /* CONFIG_ARM64_PAN */
 #ifdef CONFIG_ARM64_EPAN
-	{
-		.desc = "Enhanced Privileged Access Never",
-		.capability = ARM64_HAS_EPAN,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64MMFR1_EL1,
-		.field_pos = ID_AA64MMFR1_EL1_PAN_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 3,
-	},
+{
+	.desc			= "Enhanced Privileged Access Never",
+	.capability		= ARM64_HAS_EPAN,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64MMFR1_EL1,
+	.field_pos		= ID_AA64MMFR1_EL1_PAN_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= 3,
+},
 #endif /* CONFIG_ARM64_EPAN */
 #ifdef CONFIG_ARM64_LSE_ATOMICS
-	{
-		.desc = "LSE atomic instructions",
-		.capability = ARM64_HAS_LSE_ATOMICS,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64ISAR0_EL1,
-		.field_pos = ID_AA64ISAR0_EL1_ATOMIC_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 2,
-	},
+{
+	.desc			= "LSE atomic instructions",
+	.capability		= ARM64_HAS_LSE_ATOMICS,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64ISAR0_EL1,
+	.field_pos		= ID_AA64ISAR0_EL1_ATOMIC_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= 2,
+},
 #endif /* CONFIG_ARM64_LSE_ATOMICS */
-	{
-		.desc = "Software prefetching using PRFM",
-		.capability = ARM64_HAS_NO_HW_PREFETCH,
-		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
-		.matches = has_no_hw_prefetch,
-	},
-	{
-		.desc = "Virtualization Host Extensions",
-		.capability = ARM64_HAS_VIRT_HOST_EXTN,
-		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
-		.matches = runs_at_el2,
-		.cpu_enable = cpu_copy_el2regs,
-	},
-	{
-		.capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_32bit_el0,
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64PFR0_EL1_EL0_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
-	},
+{
+	.desc			= "Software prefetching using PRFM",
+	.capability		= ARM64_HAS_NO_HW_PREFETCH,
+	.type			= ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+	.matches		= has_no_hw_prefetch,
+},
+{
+	.desc			= "Virtualization Host Extensions",
+	.capability		= ARM64_HAS_VIRT_HOST_EXTN,
+	.type			= ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+	.matches		= runs_at_el2,
+	.cpu_enable		= cpu_copy_el2regs,
+},
+{
+	.capability		= ARM64_HAS_32BIT_EL0_DO_NOT_USE,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_32bit_el0,
+	.sys_reg		= SYS_ID_AA64PFR0_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64PFR0_EL1_EL0_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
+},
 #ifdef CONFIG_KVM
-	{
-		.desc = "32-bit EL1 Support",
-		.capability = ARM64_HAS_32BIT_EL1,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64PFR0_EL1_EL1_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
-	},
-	{
-		.desc = "Protected KVM",
-		.capability = ARM64_KVM_PROTECTED_MODE,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = is_kvm_protected_mode,
-	},
+{
+	.desc			= "32-bit EL1 Support",
+	.capability		= ARM64_HAS_32BIT_EL1,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64PFR0_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64PFR0_EL1_EL1_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR0_EL1_ELx_32BIT_64BIT,
+},
+{
+	.desc			= "Protected KVM",
+	.capability		= ARM64_KVM_PROTECTED_MODE,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= is_kvm_protected_mode,
+},
 #endif
-	{
-		.desc = "Kernel page table isolation (KPTI)",
-		.capability = ARM64_UNMAP_KERNEL_AT_EL0,
-		.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
-		/*
-		 * The ID feature fields below are used to indicate that
-		 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
-		 * more details.
-		 */
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.field_pos = ID_AA64PFR0_EL1_CSV3_SHIFT,
-		.field_width = 4,
-		.min_field_value = 1,
-		.matches = unmap_kernel_at_el0,
-		.cpu_enable = kpti_install_ng_mappings,
-	},
-	{
-		/* FP/SIMD is not implemented */
-		.capability = ARM64_HAS_NO_FPSIMD,
-		.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
-		.min_field_value = 0,
-		.matches = has_no_fpsimd,
-	},
+{
+	.desc			= "Kernel page table isolation (KPTI)",
+	.capability		= ARM64_UNMAP_KERNEL_AT_EL0,
+	.type			= ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
+	/*
+	 * The ID feature fields below are used to indicate that
+	 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
+	 * more details.
+	 */
+	.sys_reg		= SYS_ID_AA64PFR0_EL1,
+	.field_pos		= ID_AA64PFR0_EL1_CSV3_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= 1,
+	.matches		= unmap_kernel_at_el0,
+	.cpu_enable		= kpti_install_ng_mappings,
+},
+{
+	/* FP/SIMD is not implemented */
+	.capability		= ARM64_HAS_NO_FPSIMD,
+	.type			= ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
+	.min_field_value	= 0,
+	.matches		= has_no_fpsimd,
+},
 #ifdef CONFIG_ARM64_PMEM
-	{
-		.desc = "Data cache clean to Point of Persistence",
-		.capability = ARM64_HAS_DCPOP,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64ISAR1_EL1,
-		.field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT,
-		.field_width = 4,
-		.min_field_value = 1,
-	},
-	{
-		.desc = "Data cache clean to Point of Deep Persistence",
-		.capability = ARM64_HAS_DCPODP,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64ISAR1_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64ISAR1_EL1_DPB_SHIFT,
-		.field_width = 4,
-		.min_field_value = 2,
-	},
+{
+	.desc			= "Data cache clean to Point of Persistence",
+	.capability		= ARM64_HAS_DCPOP,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64ISAR1_EL1,
+	.field_pos		= ID_AA64ISAR1_EL1_DPB_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= 1,
+},
+{
+	.desc			= "Data cache clean to Point of Deep Persistence",
+	.capability		= ARM64_HAS_DCPODP,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64ISAR1_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64ISAR1_EL1_DPB_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= 2,
+},
 #endif
 #ifdef CONFIG_ARM64_SVE
-	{
-		.desc = "Scalable Vector Extension",
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.capability = ARM64_SVE,
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64PFR0_EL1_SVE_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR0_EL1_SVE_IMP,
-		.matches = has_cpuid_feature,
-		.cpu_enable = sve_kernel_enable,
-	},
+{
+	.desc			= "Scalable Vector Extension",
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.capability		= ARM64_SVE,
+	.sys_reg		= SYS_ID_AA64PFR0_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64PFR0_EL1_SVE_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR0_EL1_SVE_IMP,
+	.matches		= has_cpuid_feature,
+	.cpu_enable		= sve_kernel_enable,
+},
 #endif /* CONFIG_ARM64_SVE */
 #ifdef CONFIG_ARM64_RAS_EXTN
-	{
-		.desc = "RAS Extension Support",
-		.capability = ARM64_HAS_RAS_EXTN,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64PFR0_EL1_RAS_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR0_EL1_RAS_IMP,
-		.cpu_enable = cpu_clear_disr,
-	},
+{
+	.desc			= "RAS Extension Support",
+	.capability		= ARM64_HAS_RAS_EXTN,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64PFR0_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64PFR0_EL1_RAS_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR0_EL1_RAS_IMP,
+	.cpu_enable		= cpu_clear_disr,
+},
 #endif /* CONFIG_ARM64_RAS_EXTN */
 #ifdef CONFIG_ARM64_AMU_EXTN
-	{
-		/*
-		 * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y.
-		 * Therefore, don't provide .desc as we don't want the detection
-		 * message to be shown until at least one CPU is detected to
-		 * support the feature.
-		 */
-		.capability = ARM64_HAS_AMU_EXTN,
-		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
-		.matches = has_amu,
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64PFR0_EL1_AMU_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR0_EL1_AMU_IMP,
-		.cpu_enable = cpu_amu_enable,
-	},
+{
+	/*
+	 * The feature is enabled by default if CONFIG_ARM64_AMU_EXTN=y.
+	 * Therefore, don't provide .desc as we don't want the detection
+	 * message to be shown until at least one CPU is detected to
+	 * support the feature.
+	 */
+	.capability		= ARM64_HAS_AMU_EXTN,
+	.type			= ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+	.matches		= has_amu,
+	.sys_reg		= SYS_ID_AA64PFR0_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64PFR0_EL1_AMU_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR0_EL1_AMU_IMP,
+	.cpu_enable		= cpu_amu_enable,
+},
 #endif /* CONFIG_ARM64_AMU_EXTN */
-	{
-		.desc = "Data cache clean to the PoU not required for I/D coherence",
-		.capability = ARM64_HAS_CACHE_IDC,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cache_idc,
-		.cpu_enable = cpu_emulate_effective_ctr,
-	},
-	{
-		.desc = "Instruction cache invalidation not required for I/D coherence",
-		.capability = ARM64_HAS_CACHE_DIC,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cache_dic,
-	},
-	{
-		.desc = "Stage-2 Force Write-Back",
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.capability = ARM64_HAS_STAGE2_FWB,
-		.sys_reg = SYS_ID_AA64MMFR2_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64MMFR2_EL1_FWB_SHIFT,
-		.field_width = 4,
-		.min_field_value = 1,
-		.matches = has_cpuid_feature,
-	},
-	{
-		.desc = "ARMv8.4 Translation Table Level",
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.capability = ARM64_HAS_ARMv8_4_TTL,
-		.sys_reg = SYS_ID_AA64MMFR2_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64MMFR2_EL1_TTL_SHIFT,
-		.field_width = 4,
-		.min_field_value = 1,
-		.matches = has_cpuid_feature,
-	},
-	{
-		.desc = "TLB range maintenance instructions",
-		.capability = ARM64_HAS_TLB_RANGE,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64ISAR0_EL1,
-		.field_pos = ID_AA64ISAR0_EL1_TLB_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = ID_AA64ISAR0_EL1_TLB_RANGE,
-	},
+{
+	.desc			= "Data cache clean to the PoU not required for I/D coherence",
+	.capability		= ARM64_HAS_CACHE_IDC,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cache_idc,
+	.cpu_enable		= cpu_emulate_effective_ctr,
+},
+{
+	.desc			= "Instruction cache invalidation not required for I/D coherence",
+	.capability		= ARM64_HAS_CACHE_DIC,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cache_dic,
+},
+{
+	.desc			= "Stage-2 Force Write-Back",
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.capability		= ARM64_HAS_STAGE2_FWB,
+	.sys_reg		= SYS_ID_AA64MMFR2_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64MMFR2_EL1_FWB_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= 1,
+	.matches		= has_cpuid_feature,
+},
+{
+	.desc			= "ARMv8.4 Translation Table Level",
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.capability		= ARM64_HAS_ARMv8_4_TTL,
+	.sys_reg		= SYS_ID_AA64MMFR2_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64MMFR2_EL1_TTL_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= 1,
+	.matches		= has_cpuid_feature,
+},
+{
+	.desc			= "TLB range maintenance instructions",
+	.capability		= ARM64_HAS_TLB_RANGE,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64ISAR0_EL1,
+	.field_pos		= ID_AA64ISAR0_EL1_TLB_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= ID_AA64ISAR0_EL1_TLB_RANGE,
+},
 #ifdef CONFIG_ARM64_HW_AFDBM
-	{
-		/*
-		 * Since we turn this on always, we don't want the user to
-		 * think that the feature is available when it may not be.
-		 * So hide the description.
-		 *
-		 * .desc = "Hardware pagetable Dirty Bit Management",
-		 *
-		 */
-		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
-		.capability = ARM64_HW_DBM,
-		.sys_reg = SYS_ID_AA64MMFR1_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64MMFR1_EL1_HAFDBS_SHIFT,
-		.field_width = 4,
-		.min_field_value = 2,
-		.matches = has_hw_dbm,
-		.cpu_enable = cpu_enable_hw_dbm,
-	},
+{
+	/*
+	 * Since we turn this on always, we don't want the user to
+	 * think that the feature is available when it may not be.
+	 * So hide the description.
+	 *
+	 * .desc		= "Hardware pagetable Dirty Bit Management",
+	 *
+	 */
+	.type			= ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+	.capability		= ARM64_HW_DBM,
+	.sys_reg		= SYS_ID_AA64MMFR1_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64MMFR1_EL1_HAFDBS_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= 2,
+	.matches		= has_hw_dbm,
+	.cpu_enable		= cpu_enable_hw_dbm,
+},
 #endif
-	{
-		.desc = "CRC32 instructions",
-		.capability = ARM64_HAS_CRC32,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64ISAR0_EL1,
-		.field_pos = ID_AA64ISAR0_EL1_CRC32_SHIFT,
-		.field_width = 4,
-		.min_field_value = 1,
-	},
-	{
-		.desc = "Speculative Store Bypassing Safe (SSBS)",
-		.capability = ARM64_SSBS,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64PFR1_EL1,
-		.field_pos = ID_AA64PFR1_EL1_SSBS_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = ID_AA64PFR1_EL1_SSBS_IMP,
-	},
+{
+	.desc			= "CRC32 instructions",
+	.capability		= ARM64_HAS_CRC32,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64ISAR0_EL1,
+	.field_pos		= ID_AA64ISAR0_EL1_CRC32_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= 1,
+},
+{
+	.desc			= "Speculative Store Bypassing Safe (SSBS)",
+	.capability		= ARM64_SSBS,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64PFR1_EL1,
+	.field_pos		= ID_AA64PFR1_EL1_SSBS_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= ID_AA64PFR1_EL1_SSBS_IMP,
+},
 #ifdef CONFIG_ARM64_CNP
-	{
-		.desc = "Common not Private translations",
-		.capability = ARM64_HAS_CNP,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_useable_cnp,
-		.sys_reg = SYS_ID_AA64MMFR2_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64MMFR2_EL1_CnP_SHIFT,
-		.field_width = 4,
-		.min_field_value = 1,
-		.cpu_enable = cpu_enable_cnp,
-	},
+{
+	.desc			= "Common not Private translations",
+	.capability		= ARM64_HAS_CNP,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_useable_cnp,
+	.sys_reg		= SYS_ID_AA64MMFR2_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64MMFR2_EL1_CnP_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= 1,
+	.cpu_enable		= cpu_enable_cnp,
+},
 #endif
-	{
-		.desc = "Speculation barrier (SB)",
-		.capability = ARM64_HAS_SB,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64ISAR1_EL1,
-		.field_pos = ID_AA64ISAR1_EL1_SB_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 1,
-	},
+{
+	.desc			= "Speculation barrier (SB)",
+	.capability		= ARM64_HAS_SB,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64ISAR1_EL1,
+	.field_pos		= ID_AA64ISAR1_EL1_SB_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= 1,
+},
 #ifdef CONFIG_ARM64_PTR_AUTH
-	{
-		.desc = "Address authentication (architected QARMA5 algorithm)",
-		.capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5,
-		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
-		.sys_reg = SYS_ID_AA64ISAR1_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64ISAR1_EL1_APA_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64ISAR1_EL1_APA_PAuth,
-		.matches = has_address_auth_cpucap,
-	},
-	{
-		.desc = "Address authentication (architected QARMA3 algorithm)",
-		.capability = ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3,
-		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
-		.sys_reg = SYS_ID_AA64ISAR2_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64ISAR2_EL1_APA3_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64ISAR2_EL1_APA3_PAuth,
-		.matches = has_address_auth_cpucap,
-	},
-	{
-		.desc = "Address authentication (IMP DEF algorithm)",
-		.capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
-		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
-		.sys_reg = SYS_ID_AA64ISAR1_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64ISAR1_EL1_API_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64ISAR1_EL1_API_PAuth,
-		.matches = has_address_auth_cpucap,
-	},
-	{
-		.capability = ARM64_HAS_ADDRESS_AUTH,
-		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
-		.matches = has_address_auth_metacap,
-	},
-	{
-		.desc = "Generic authentication (architected QARMA5 algorithm)",
-		.capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.sys_reg = SYS_ID_AA64ISAR1_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64ISAR1_EL1_GPA_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64ISAR1_EL1_GPA_IMP,
-		.matches = has_cpuid_feature,
-	},
-	{
-		.desc = "Generic authentication (architected QARMA3 algorithm)",
-		.capability = ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.sys_reg = SYS_ID_AA64ISAR2_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64ISAR2_EL1_GPA3_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64ISAR2_EL1_GPA3_IMP,
-		.matches = has_cpuid_feature,
-	},
-	{
-		.desc = "Generic authentication (IMP DEF algorithm)",
-		.capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.sys_reg = SYS_ID_AA64ISAR1_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64ISAR1_EL1_GPI_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64ISAR1_EL1_GPI_IMP,
-		.matches = has_cpuid_feature,
-	},
-	{
-		.capability = ARM64_HAS_GENERIC_AUTH,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_generic_auth,
-	},
+{
+	.desc			= "Address authentication (architected QARMA5 algorithm)",
+	.capability		= ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA5,
+	.type			= ARM64_CPUCAP_BOOT_CPU_FEATURE,
+	.sys_reg		= SYS_ID_AA64ISAR1_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64ISAR1_EL1_APA_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64ISAR1_EL1_APA_PAuth,
+	.matches		= has_address_auth_cpucap,
+},
+{
+	.desc			= "Address authentication (architected QARMA3 algorithm)",
+	.capability		= ARM64_HAS_ADDRESS_AUTH_ARCH_QARMA3,
+	.type			= ARM64_CPUCAP_BOOT_CPU_FEATURE,
+	.sys_reg		= SYS_ID_AA64ISAR2_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64ISAR2_EL1_APA3_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64ISAR2_EL1_APA3_PAuth,
+	.matches		= has_address_auth_cpucap,
+},
+{
+	.desc			= "Address authentication (IMP DEF algorithm)",
+	.capability		= ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
+	.type			= ARM64_CPUCAP_BOOT_CPU_FEATURE,
+	.sys_reg		= SYS_ID_AA64ISAR1_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64ISAR1_EL1_API_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64ISAR1_EL1_API_PAuth,
+	.matches		= has_address_auth_cpucap,
+},
+{
+	.capability		= ARM64_HAS_ADDRESS_AUTH,
+	.type			= ARM64_CPUCAP_BOOT_CPU_FEATURE,
+	.matches		= has_address_auth_metacap,
+},
+{
+	.desc			= "Generic authentication (architected QARMA5 algorithm)",
+	.capability		= ARM64_HAS_GENERIC_AUTH_ARCH_QARMA5,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.sys_reg		= SYS_ID_AA64ISAR1_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64ISAR1_EL1_GPA_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64ISAR1_EL1_GPA_IMP,
+	.matches		= has_cpuid_feature,
+},
+{
+	.desc			= "Generic authentication (architected QARMA3 algorithm)",
+	.capability		= ARM64_HAS_GENERIC_AUTH_ARCH_QARMA3,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.sys_reg		= SYS_ID_AA64ISAR2_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64ISAR2_EL1_GPA3_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64ISAR2_EL1_GPA3_IMP,
+	.matches		= has_cpuid_feature,
+},
+{
+	.desc			= "Generic authentication (IMP DEF algorithm)",
+	.capability		= ARM64_HAS_GENERIC_AUTH_IMP_DEF,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.sys_reg		= SYS_ID_AA64ISAR1_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64ISAR1_EL1_GPI_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64ISAR1_EL1_GPI_IMP,
+	.matches		= has_cpuid_feature,
+},
+{
+	.capability		= ARM64_HAS_GENERIC_AUTH,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_generic_auth,
+},
 #endif /* CONFIG_ARM64_PTR_AUTH */
 #ifdef CONFIG_ARM64_PSEUDO_NMI
-	{
-		/*
-		 * Depends on having GICv3
-		 */
-		.desc = "IRQ priority masking",
-		.capability = ARM64_HAS_IRQ_PRIO_MASKING,
-		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
-		.matches = can_use_gic_priorities,
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.field_pos = ID_AA64PFR0_EL1_GIC_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 1,
-	},
+{
+	/*
+	 * Depends on having GICv3
+	 */
+	.desc			= "IRQ priority masking",
+	.capability		= ARM64_HAS_IRQ_PRIO_MASKING,
+	.type			= ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+	.matches		= can_use_gic_priorities,
+	.sys_reg		= SYS_ID_AA64PFR0_EL1,
+	.field_pos		= ID_AA64PFR0_EL1_GIC_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= 1,
+},
 #endif
 #ifdef CONFIG_ARM64_E0PD
-	{
-		.desc = "E0PD",
-		.capability = ARM64_HAS_E0PD,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.sys_reg = SYS_ID_AA64MMFR2_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_width = 4,
-		.field_pos = ID_AA64MMFR2_EL1_E0PD_SHIFT,
-		.matches = has_cpuid_feature,
-		.min_field_value = 1,
-		.cpu_enable = cpu_enable_e0pd,
-	},
+{
+	.desc			= "E0PD",
+	.capability		= ARM64_HAS_E0PD,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.sys_reg		= SYS_ID_AA64MMFR2_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_width		= 4,
+	.field_pos		= ID_AA64MMFR2_EL1_E0PD_SHIFT,
+	.matches		= has_cpuid_feature,
+	.min_field_value	= 1,
+	.cpu_enable		= cpu_enable_e0pd,
+},
 #endif
-	{
-		.desc = "Random Number Generator",
-		.capability = ARM64_HAS_RNG,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64ISAR0_EL1,
-		.field_pos = ID_AA64ISAR0_EL1_RNDR_SHIFT,
-		.field_width = 4,
-		.sign = FTR_UNSIGNED,
-		.min_field_value = 1,
-	},
+{
+	.desc			= "Random Number Generator",
+	.capability		= ARM64_HAS_RNG,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64ISAR0_EL1,
+	.field_pos		= ID_AA64ISAR0_EL1_RNDR_SHIFT,
+	.field_width		= 4,
+	.sign			= FTR_UNSIGNED,
+	.min_field_value	= 1,
+},
 #ifdef CONFIG_ARM64_BTI
-	{
-		.desc = "Branch Target Identification",
-		.capability = ARM64_BTI,
+{
+	.desc			= "Branch Target Identification",
+	.capability		= ARM64_BTI,
 #ifdef CONFIG_ARM64_BTI_KERNEL
-		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+	.type			= ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
 #else
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
 #endif
-		.matches = has_cpuid_feature,
-		.cpu_enable = bti_enable,
-		.sys_reg = SYS_ID_AA64PFR1_EL1,
-		.field_pos = ID_AA64PFR1_EL1_BT_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR1_EL1_BT_IMP,
-		.sign = FTR_UNSIGNED,
-	},
+	.matches		= has_cpuid_feature,
+	.cpu_enable		= bti_enable,
+	.sys_reg		= SYS_ID_AA64PFR1_EL1,
+	.field_pos		= ID_AA64PFR1_EL1_BT_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR1_EL1_BT_IMP,
+	.sign			= FTR_UNSIGNED,
+},
 #endif
 #ifdef CONFIG_ARM64_MTE
-	{
-		.desc = "Memory Tagging Extension",
-		.capability = ARM64_MTE,
-		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64PFR1_EL1,
-		.field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR1_EL1_MTE_MTE2,
-		.sign = FTR_UNSIGNED,
-		.cpu_enable = cpu_enable_mte,
-	},
-	{
-		.desc = "Asymmetric MTE Tag Check Fault",
-		.capability = ARM64_MTE_ASYMM,
-		.type = ARM64_CPUCAP_BOOT_CPU_FEATURE,
-		.matches = has_cpuid_feature,
-		.sys_reg = SYS_ID_AA64PFR1_EL1,
-		.field_pos = ID_AA64PFR1_EL1_MTE_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR1_EL1_MTE_MTE3,
-		.sign = FTR_UNSIGNED,
-	},
+{
+	.desc			= "Memory Tagging Extension",
+	.capability		= ARM64_MTE,
+	.type			= ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64PFR1_EL1,
+	.field_pos		= ID_AA64PFR1_EL1_MTE_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR1_EL1_MTE_MTE2,
+	.sign			= FTR_UNSIGNED,
+	.cpu_enable		= cpu_enable_mte,
+},
+{
+	.desc			= "Asymmetric MTE Tag Check Fault",
+	.capability		= ARM64_MTE_ASYMM,
+	.type			= ARM64_CPUCAP_BOOT_CPU_FEATURE,
+	.matches		= has_cpuid_feature,
+	.sys_reg		= SYS_ID_AA64PFR1_EL1,
+	.field_pos		= ID_AA64PFR1_EL1_MTE_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR1_EL1_MTE_MTE3,
+	.sign			= FTR_UNSIGNED,
+},
 #endif /* CONFIG_ARM64_MTE */
-	{
-		.desc = "RCpc load-acquire (LDAPR)",
-		.capability = ARM64_HAS_LDAPR,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.sys_reg = SYS_ID_AA64ISAR1_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64ISAR1_EL1_LRCPC_SHIFT,
-		.field_width = 4,
-		.matches = has_cpuid_feature,
-		.min_field_value = 1,
-	},
+{
+	.desc			= "RCpc load-acquire (LDAPR)",
+	.capability		= ARM64_HAS_LDAPR,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.sys_reg		= SYS_ID_AA64ISAR1_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64ISAR1_EL1_LRCPC_SHIFT,
+	.field_width		= 4,
+	.matches		= has_cpuid_feature,
+	.min_field_value	= 1,
+},
 #ifdef CONFIG_ARM64_SME
-	{
-		.desc = "Scalable Matrix Extension",
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.capability = ARM64_SME,
-		.sys_reg = SYS_ID_AA64PFR1_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64PFR1_EL1_SME_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR1_EL1_SME_IMP,
-		.matches = has_cpuid_feature,
-		.cpu_enable = sme_kernel_enable,
-	},
-	/* FA64 should be sorted after the base SME capability */
-	{
-		.desc = "FA64",
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.capability = ARM64_SME_FA64,
-		.sys_reg = SYS_ID_AA64SMFR0_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64SMFR0_EL1_FA64_SHIFT,
-		.field_width = 1,
-		.min_field_value = ID_AA64SMFR0_EL1_FA64_IMP,
-		.matches = has_cpuid_feature,
-		.cpu_enable = fa64_kernel_enable,
-	},
+{
+	.desc			= "Scalable Matrix Extension",
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.capability		= ARM64_SME,
+	.sys_reg		= SYS_ID_AA64PFR1_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64PFR1_EL1_SME_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR1_EL1_SME_IMP,
+	.matches		= has_cpuid_feature,
+	.cpu_enable		= sme_kernel_enable,
+},
+/* FA64 should be sorted after the base SME capability */
+{
+	.desc			= "FA64",
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.capability		= ARM64_SME_FA64,
+	.sys_reg		= SYS_ID_AA64SMFR0_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64SMFR0_EL1_FA64_SHIFT,
+	.field_width		= 1,
+	.min_field_value	= ID_AA64SMFR0_EL1_FA64_IMP,
+	.matches		= has_cpuid_feature,
+	.cpu_enable		= fa64_kernel_enable,
+},
 #endif /* CONFIG_ARM64_SME */
-	{
-		.desc = "WFx with timeout",
-		.capability = ARM64_HAS_WFXT,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.sys_reg = SYS_ID_AA64ISAR2_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64ISAR2_EL1_WFxT_SHIFT,
-		.field_width = 4,
-		.matches = has_cpuid_feature,
-		.min_field_value = ID_AA64ISAR2_EL1_WFxT_IMP,
-	},
-	{
-		.desc = "Trap EL0 IMPLEMENTATION DEFINED functionality",
-		.capability = ARM64_HAS_TIDCP1,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.sys_reg = SYS_ID_AA64MMFR1_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64MMFR1_EL1_TIDCP1_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64MMFR1_EL1_TIDCP1_IMP,
-		.matches = has_cpuid_feature,
-		.cpu_enable = cpu_trap_el0_impdef,
-	},
-	{
-		.desc = "Data independent timing control (DIT)",
-		.capability = ARM64_HAS_DIT,
-		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
-		.sys_reg = SYS_ID_AA64PFR0_EL1,
-		.sign = FTR_UNSIGNED,
-		.field_pos = ID_AA64PFR0_EL1_DIT_SHIFT,
-		.field_width = 4,
-		.min_field_value = ID_AA64PFR0_EL1_DIT_IMP,
-		.matches = has_cpuid_feature,
-		.cpu_enable = cpu_enable_dit,
-	},
-	{},
+{
+	.desc			= "WFx with timeout",
+	.capability		= ARM64_HAS_WFXT,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.sys_reg		= SYS_ID_AA64ISAR2_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64ISAR2_EL1_WFxT_SHIFT,
+	.field_width		= 4,
+	.matches		= has_cpuid_feature,
+	.min_field_value	= ID_AA64ISAR2_EL1_WFxT_IMP,
+},
+{
+	.desc			= "Trap EL0 IMPLEMENTATION DEFINED functionality",
+	.capability		= ARM64_HAS_TIDCP1,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.sys_reg		= SYS_ID_AA64MMFR1_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64MMFR1_EL1_TIDCP1_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64MMFR1_EL1_TIDCP1_IMP,
+	.matches		= has_cpuid_feature,
+	.cpu_enable		= cpu_trap_el0_impdef,
+},
+{
+	.desc			= "Data independent timing control (DIT)",
+	.capability		= ARM64_HAS_DIT,
+	.type			= ARM64_CPUCAP_SYSTEM_FEATURE,
+	.sys_reg		= SYS_ID_AA64PFR0_EL1,
+	.sign			= FTR_UNSIGNED,
+	.field_pos		= ID_AA64PFR0_EL1_DIT_SHIFT,
+	.field_width		= 4,
+	.min_field_value	= ID_AA64PFR0_EL1_DIT_IMP,
+	.matches		= has_cpuid_feature,
+	.cpu_enable		= cpu_enable_dit,
+},
+{},
 };
 
 #define HWCAP_CPUID_MATCH(reg, field, width, s, min_value)			\