diff mbox series

[v4,06/38] arm64: Rework setup_cpu_features()

Message ID 20231016102501.3643901-7-mark.rutland@arm.com (mailing list archive)
State New, archived
Headers show
Series [v4,01/38] clocksource/drivers/arm_arch_timer: Initialize evtstrm after finalizing cpucaps | expand

Commit Message

Mark Rutland Oct. 16, 2023, 10:24 a.m. UTC
Currently setup_cpu_features() handles a mixture of one-time kernel
feature setup (e.g. cpucaps) and one-time user feature setup (e.g. ELF
hwcaps). Subsequent patches will rework other one-time setup and expand
the logic currently in setup_cpu_features(), and in preparation for this
it would be helpful to split the kernel and user setup into separate
functions.

This patch splits setup_user_features() out of setup_cpu_features(),
with a few additional cleanups of note:

* setup_cpu_features() is renamed to setup_system_features() to make it
  clear that it handles system-wide feature setup rather than cpu-local
  feature setup.

* setup_system_capabilities() is folded into setup_system_features().

* Presence of TTBR0 pan is logged immediately after
  update_cpu_capabilities(), so that this is guaranteed to appear
  alongside all the other detected system cpucaps.

* The 'cwg' variable is removed as its value is only consumed once and
  it's simpler to use cache_type_cwg() directly without assigning its
  return value to a variable.

* The call to setup_user_features() is moved after alternatives are
  patched, which will allow user feature setup code to depend on
  alternative branches and allow for simplifications in subsequent
  patches.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Mark Brown <broonie@kernel.org>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/cpufeature.h |  4 ++-
 arch/arm64/kernel/cpufeature.c      | 43 ++++++++++++++---------------
 arch/arm64/kernel/smp.c             |  3 +-
 3 files changed, 26 insertions(+), 24 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index ba7c60c9ac695..1233a8ff96a88 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -649,7 +649,9 @@  static inline bool id_aa64pfr1_mte(u64 pfr1)
 	return val >= ID_AA64PFR1_EL1_MTE_MTE2;
 }
 
-void __init setup_cpu_features(void);
+void __init setup_system_features(void);
+void __init setup_user_features(void);
+
 void check_local_cpu_capabilities(void);
 
 u64 read_sanitised_ftr_reg(u32 id);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 444a73c2e6385..234cf3189bee0 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -3328,23 +3328,35 @@  unsigned long cpu_get_elf_hwcap2(void)
 	return elf_hwcap[1];
 }
 
-static void __init setup_system_capabilities(void)
+void __init setup_system_features(void)
 {
 	/*
-	 * We have finalised the system-wide safe feature
-	 * registers, finalise the capabilities that depend
-	 * on it. Also enable all the available capabilities,
-	 * that are not enabled already.
+	 * The system-wide safe feature feature register values have been
+	 * finalized. Finalize and log the available system capabilities.
 	 */
 	update_cpu_capabilities(SCOPE_SYSTEM);
+	if (system_uses_ttbr0_pan())
+		pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
+
+	/*
+	 * Enable all the available capabilities which have not been enabled
+	 * already.
+	 */
 	enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
+
+	sve_setup();
+	sme_setup();
+
+	/*
+	 * Check for sane CTR_EL0.CWG value.
+	 */
+	if (!cache_type_cwg())
+		pr_warn("No Cache Writeback Granule information, assuming %d\n",
+			ARCH_DMA_MINALIGN);
 }
 
-void __init setup_cpu_features(void)
+void __init setup_user_features(void)
 {
-	u32 cwg;
-
-	setup_system_capabilities();
 	setup_elf_hwcaps(arm64_elf_hwcaps);
 
 	if (system_supports_32bit_el0()) {
@@ -3352,20 +3364,7 @@  void __init setup_cpu_features(void)
 		elf_hwcap_fixup();
 	}
 
-	if (system_uses_ttbr0_pan())
-		pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
-
-	sve_setup();
-	sme_setup();
 	minsigstksz_setup();
-
-	/*
-	 * Check for sane CTR_EL0.CWG value.
-	 */
-	cwg = cache_type_cwg();
-	if (!cwg)
-		pr_warn("No Cache Writeback Granule information, assuming %d\n",
-			ARCH_DMA_MINALIGN);
 }
 
 static int enable_mismatched_32bit_el0(unsigned int cpu)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 960b98b43506d..e0cdf6820f9e9 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -431,9 +431,10 @@  static void __init hyp_mode_check(void)
 void __init smp_cpus_done(unsigned int max_cpus)
 {
 	pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-	setup_cpu_features();
+	setup_system_features();
 	hyp_mode_check();
 	apply_alternatives_all();
+	setup_user_features();
 	mark_linear_text_alias_ro();
 }