diff mbox series

[11/37] arm64: Rename SVE/SME cpu_enable functions

Message ID 20230919092850.1940729-12-mark.rutland@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: Remove cpus_have_const_cap() | expand

Commit Message

Mark Rutland Sept. 19, 2023, 9:28 a.m. UTC
The arm64_cpu_capabilities::cpu_enable() callbacks for SVE, SME, SME2,
and FA64 are named with an unusual "${feature}_kernel_enable" pattern
rather than the much more common "cpu_enable_${feature}". Now that we
only use these as cpu_enable() callbacks, it would be nice to have them
match the usual scheme.

This patch renames the cpu_enable() callbacks to match this scheme. At
the tsame time, the existing comments regarding ordering requirements
are turned into build-time assertions, matching what we do for
can_use_gic_priorities() and has_gic_prio_relaxed_sync(), and the newly
redundant comments are removed.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
---
 arch/arm64/include/asm/fpsimd.h |  8 ++++----
 arch/arm64/kernel/cpufeature.c  |  8 ++++----
 arch/arm64/kernel/fpsimd.c      | 26 ++++++++++----------------
 3 files changed, 18 insertions(+), 24 deletions(-)

Comments

Mark Brown Sept. 19, 2023, 10:52 a.m. UTC | #1
On Tue, Sep 19, 2023 at 10:28:24AM +0100, Mark Rutland wrote:
> The arm64_cpu_capabilities::cpu_enable() callbacks for SVE, SME, SME2,
> and FA64 are named with an unusual "${feature}_kernel_enable" pattern
> rather than the much more common "cpu_enable_${feature}". Now that we
> only use these as cpu_enable() callbacks, it would be nice to have them
> match the usual scheme.

Reviewed-by: Mark Brown <broonie@kernel.org>

However:

> This patch renames the cpu_enable() callbacks to match this scheme. At
> the tsame time, the existing comments regarding ordering requirements

Typo.

> are turned into build-time assertions, matching what we do for
> can_use_gic_priorities() and has_gic_prio_relaxed_sync(), and the newly
> redundant comments are removed.

This should really be a separate commit.
Mark Rutland Sept. 21, 2023, 4:50 p.m. UTC | #2
On Tue, Sep 19, 2023 at 11:52:47AM +0100, Mark Brown wrote:
> On Tue, Sep 19, 2023 at 10:28:24AM +0100, Mark Rutland wrote:
> > The arm64_cpu_capabilities::cpu_enable() callbacks for SVE, SME, SME2,
> > and FA64 are named with an unusual "${feature}_kernel_enable" pattern
> > rather than the much more common "cpu_enable_${feature}". Now that we
> > only use these as cpu_enable() callbacks, it would be nice to have them
> > match the usual scheme.
> 
> Reviewed-by: Mark Brown <broonie@kernel.org>
> 
> However:
> 
> > This patch renames the cpu_enable() callbacks to match this scheme. At
> > the tsame time, the existing comments regarding ordering requirements
> 
> Typo.

Thanks; fixed.

> > are turned into build-time assertions, matching what we do for
> > can_use_gic_priorities() and has_gic_prio_relaxed_sync(), and the newly
> > redundant comments are removed.
> 
> This should really be a separate commit.

Sure -- I've split this into a first patch which adds the assertions, and a
second which does the rename.

Mark.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index bd7bea92dae07..0cbaa06b394a0 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -149,10 +149,10 @@  extern void sme_save_state(void *state, int zt);
 extern void sme_load_state(void const *state, int zt);
 
 struct arm64_cpu_capabilities;
-extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused);
-extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
 
 extern u64 read_zcr_features(void);
 extern u64 read_smcr_features(void);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d2ed45e2c65a1..4509f1706fdf7 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2424,7 +2424,7 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.desc = "Scalable Vector Extension",
 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
 		.capability = ARM64_SVE,
-		.cpu_enable = sve_kernel_enable,
+		.cpu_enable = cpu_enable_sve,
 		.matches = has_cpuid_feature,
 		ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, SVE, IMP)
 	},
@@ -2677,7 +2677,7 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
 		.capability = ARM64_SME,
 		.matches = has_cpuid_feature,
-		.cpu_enable = sme_kernel_enable,
+		.cpu_enable = cpu_enable_sme,
 		ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, IMP)
 	},
 	/* FA64 should be sorted after the base SME capability */
@@ -2686,7 +2686,7 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
 		.capability = ARM64_SME_FA64,
 		.matches = has_cpuid_feature,
-		.cpu_enable = fa64_kernel_enable,
+		.cpu_enable = cpu_enable_fa64,
 		ARM64_CPUID_FIELDS(ID_AA64SMFR0_EL1, FA64, IMP)
 	},
 	{
@@ -2694,7 +2694,7 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
 		.capability = ARM64_SME2,
 		.matches = has_cpuid_feature,
-		.cpu_enable = sme2_kernel_enable,
+		.cpu_enable = cpu_enable_sme2,
 		ARM64_CPUID_FIELDS(ID_AA64PFR1_EL1, SME, SME2)
 	},
 #endif /* CONFIG_ARM64_SME */
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 601b973f90ad2..45ea9cabbaa41 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1160,11 +1160,7 @@  static void __init sve_efi_setup(void)
 	panic("Cannot allocate percpu memory for EFI SVE save/restore");
 }
 
-/*
- * Enable SVE for EL1.
- * Intended for use by the cpufeatures code during CPU boot.
- */
-void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_sve(const struct arm64_cpu_capabilities *__always_unused p)
 {
 	write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
 	isb();
@@ -1295,7 +1291,7 @@  static void sme_free(struct task_struct *task)
 	task->thread.sme_state = NULL;
 }
 
-void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_sme(const struct arm64_cpu_capabilities *__always_unused p)
 {
 	/* Set priority for all PEs to architecturally defined minimum */
 	write_sysreg_s(read_sysreg_s(SYS_SMPRI_EL1) & ~SMPRI_EL1_PRIORITY_MASK,
@@ -1310,23 +1306,21 @@  void sme_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
 	isb();
 }
 
-/*
- * This must be called after sme_kernel_enable(), we rely on the
- * feature table being sorted to ensure this.
- */
-void sme2_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_sme2(const struct arm64_cpu_capabilities *__always_unused p)
 {
+	/* This must be enabled after SME */
+	BUILD_BUG_ON(ARM64_SME2 <= ARM64_SME);
+
 	/* Allow use of ZT0 */
 	write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_EZT0_MASK,
 		       SYS_SMCR_EL1);
 }
 
-/*
- * This must be called after sme_kernel_enable(), we rely on the
- * feature table being sorted to ensure this.
- */
-void fa64_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
+void cpu_enable_fa64(const struct arm64_cpu_capabilities *__always_unused p)
 {
+	/* This must be enabled after SME */
+	BUILD_BUG_ON(ARM64_SME_FA64 <= ARM64_SME);
+
 	/* Allow use of FA64 */
 	write_sysreg_s(read_sysreg_s(SYS_SMCR_EL1) | SMCR_ELx_FA64_MASK,
 		       SYS_SMCR_EL1);