diff mbox series

[4/4] arm64: Implement Apple IMPDEF TSO memory model control

Message ID 20240411-tso-v1-4-754f11abfbff@marcan.st (mailing list archive)
State New, archived
Headers show
Series arm64: Support the TSO memory model | expand

Commit Message

Hector Martin April 11, 2024, 12:51 a.m. UTC
Apple CPUs may implement the TSO memory model as an optional
configurable mode. This allows x86 emulators to simplify their
load/store handling, greatly increasing performance.

Expose this via the prctl PR_SET_MEM_MODEL_TSO mechanism. We use the
Apple IMPDEF AIDR_EL1 register to check for the availability of TSO
mode, and enable this codepath on all CPUs with an Apple implementer.

This relies on the ACTLR_EL1 thread state scaffolding introduced
earlier.

Signed-off-by: Hector Martin <marcan@marcan.st>
---
 arch/arm64/Kconfig                        |  2 ++
 arch/arm64/include/asm/apple_cpufeature.h | 15 +++++++++++++++
 arch/arm64/include/asm/cpufeature.h       |  3 ++-
 arch/arm64/kernel/cpufeature_impdef.c     | 23 +++++++++++++++++++++++
 arch/arm64/kernel/process.c               | 22 ++++++++++++++++++++++
 arch/arm64/tools/cpucaps                  |  1 +
 6 files changed, 65 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9b3593b34cce..2f3eedd955c9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2167,6 +2167,8 @@  endif # ARM64_PSEUDO_NMI
 
 config ARM64_MEMORY_MODEL_CONTROL
 	bool "Runtime memory model control"
+	default ARCH_APPLE
+	select ARM64_ACTLR_STATE
 	help
 	  Some ARM64 CPUs support runtime switching of the CPU memory
 	  model, which can be useful to emulate other CPU architectures
diff --git a/arch/arm64/include/asm/apple_cpufeature.h b/arch/arm64/include/asm/apple_cpufeature.h
new file mode 100644
index 000000000000..4370d91ffa3e
--- /dev/null
+++ b/arch/arm64/include/asm/apple_cpufeature.h
@@ -0,0 +1,15 @@ 
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef __ASM_APPLE_CPUFEATURES_H
+#define __ASM_APPLE_CPUFEATURES_H
+
+#include <linux/bits.h>
+#include <asm/sysreg.h>
+
+#define AIDR_APPLE_TSO_SHIFT	9
+#define AIDR_APPLE_TSO		BIT(9)
+
+#define ACTLR_APPLE_TSO_SHIFT	1
+#define ACTLR_APPLE_TSO		BIT(1)
+
+#endif
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 46ab37f8f4d8..a191000d88c2 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -911,7 +911,8 @@  static inline unsigned int get_vmid_bits(u64 mmfr1)
 
 static __always_inline bool system_has_actlr_state(void)
 {
-	return false;
+	return IS_ENABLED(CONFIG_ARM64_ACTLR_STATE) &&
+		alternative_has_cap_unlikely(ARM64_HAS_TSO_APPLE);
 }
 
 s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, s64 cur);
diff --git a/arch/arm64/kernel/cpufeature_impdef.c b/arch/arm64/kernel/cpufeature_impdef.c
index bb04a8e3d79d..9325d1eb12f4 100644
--- a/arch/arm64/kernel/cpufeature_impdef.c
+++ b/arch/arm64/kernel/cpufeature_impdef.c
@@ -4,8 +4,21 @@ 
  */
 
 #include <asm/cpufeature.h>
+#include <asm/apple_cpufeature.h>
 
 #ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+static bool has_apple_feature(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	u64 val;
+	WARN_ON(scope != SCOPE_SYSTEM);
+
+	if (read_cpuid_implementor() != ARM_CPU_IMP_APPLE)
+		return false;
+
+	val = read_sysreg(aidr_el1);
+	return cpufeature_matches(val, entry);
+}
+
 static bool has_tso_fixed(const struct arm64_cpu_capabilities *entry, int scope)
 {
 	/* List of CPUs that always use the TSO memory model */
@@ -22,6 +35,16 @@  static bool has_tso_fixed(const struct arm64_cpu_capabilities *entry, int scope)
 
 static const struct arm64_cpu_capabilities arm64_impdef_features[] = {
 #ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
+	{
+		.desc = "TSO memory model (Apple)",
+		.capability = ARM64_HAS_TSO_APPLE,
+		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+		.matches = has_apple_feature,
+		.field_pos = AIDR_APPLE_TSO_SHIFT,
+		.field_width = 1,
+		.sign = FTR_UNSIGNED,
+		.min_field_value = 1,
+	},
 	{
 		.desc = "TSO memory model (Fixed)",
 		.capability = ARM64_HAS_TSO_FIXED,
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 117f80e16aac..34a19ecfb630 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -44,6 +44,7 @@ 
 #include <linux/memory_ordering_model.h>
 
 #include <asm/alternative.h>
+#include <asm/apple_cpufeature.h>
 #include <asm/compat.h>
 #include <asm/cpufeature.h>
 #include <asm/cacheflush.h>
@@ -522,6 +523,10 @@  void update_sctlr_el1(u64 sctlr)
 #ifdef CONFIG_ARM64_MEMORY_MODEL_CONTROL
 int arch_prctl_mem_model_get(struct task_struct *t)
 {
+	if (alternative_has_cap_unlikely(ARM64_HAS_TSO_APPLE) &&
+		t->thread.actlr & ACTLR_APPLE_TSO)
+		return PR_SET_MEM_MODEL_TSO;
+
 	return PR_SET_MEM_MODEL_DEFAULT;
 }
 
@@ -531,6 +536,23 @@  int arch_prctl_mem_model_set(struct task_struct *t, unsigned long val)
 	    val == PR_SET_MEM_MODEL_TSO)
 		return 0;
 
+	if (alternative_has_cap_unlikely(ARM64_HAS_TSO_APPLE)) {
+		WARN_ON(!system_has_actlr_state());
+
+		switch (val) {
+		case PR_SET_MEM_MODEL_TSO:
+			t->thread.actlr |= ACTLR_APPLE_TSO;
+			break;
+		case PR_SET_MEM_MODEL_DEFAULT:
+			t->thread.actlr &= ~ACTLR_APPLE_TSO;
+			break;
+		default:
+			return -EINVAL;
+		}
+		write_sysreg(t->thread.actlr, actlr_el1);
+		return 0;
+	}
+
 	if (val == PR_SET_MEM_MODEL_DEFAULT)
 		return 0;
 
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index daa6b9495402..62f9ca9ce44b 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -52,6 +52,7 @@  HAS_STAGE2_FWB
 HAS_TCR2
 HAS_TIDCP1
 HAS_TLB_RANGE
+HAS_TSO_APPLE
 HAS_TSO_FIXED
 HAS_VA52
 HAS_VIRT_HOST_EXTN