diff mbox series

[v2,1/1] arm64: Implement archrandom.h for ARMv8.5-RNG

Message ID 20191028203254.7152-2-richard.henderson@linaro.org (mailing list archive)
State New, archived
Headers show
Series arm64: Implement archrandom.h for ARMv8.5-RNG | expand

Commit Message

Richard Henderson Oct. 28, 2019, 8:32 p.m. UTC
From: Richard Henderson <richard.henderson@linaro.org>

Expose the ID_AA64ISAR0.RNDR field to userspace, as the
RNG system registers are always available at EL0.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
v2: Use __mrs_s and fix missing cc clobber (Mark),
    Log rng failures with pr_warn (Mark),
    Use __must_check; put RNDR in arch_get_random_long and RNDRRS
    in arch_get_random_seed_long (Ard),
    Use ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, and check this_cpu_has_cap
    when reading random data.  Move everything out of line, now that
    there are 5 other function calls involved, and to unify the rate
    limiting on the pr_warn.
---
 Documentation/arm64/cpu-feature-registers.rst |  2 +
 arch/arm64/include/asm/archrandom.h           | 32 +++++++
 arch/arm64/include/asm/cpucaps.h              |  3 +-
 arch/arm64/include/asm/sysreg.h               |  4 +
 arch/arm64/kernel/cpufeature.c                | 13 +++
 arch/arm64/kernel/random.c                    | 95 +++++++++++++++++++
 arch/arm64/Kconfig                            | 12 +++
 arch/arm64/kernel/Makefile                    |  1 +
 drivers/char/Kconfig                          |  4 +-
 9 files changed, 163 insertions(+), 3 deletions(-)
 create mode 100644 arch/arm64/include/asm/archrandom.h
 create mode 100644 arch/arm64/kernel/random.c

Comments

Richard Henderson Oct. 29, 2019, 1:24 p.m. UTC | #1
On 10/28/19 9:32 PM, richard.henderson@linaro.org wrote:
> +bool arch_get_random_long(unsigned long *v)
> +{
> +	bool ok;
> +
> +	preempt_disable_notrace();
> +
> +	ok = this_cpu_has_cap(ARM64_HAS_RNG);
> +	if (ok) {
> +		/*
> +		 * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
> +		 * and set PSTATE.NZCV to 0b0100 otherwise.
> +		 */
> +		asm volatile(
> +			__mrs_s("%0", SYS_RNDR_EL0) "\n"
> +		"	cset %w1, ne\n"
> +		: "=r"(*v), "=r"(ok)
> +		:
> +		: "cc");
> +
> +		if (unlikely(!ok)) {
> +			pr_warn_ratelimited("cpu%d: sys_rndr failed\n",
> +					    read_cpuid_id());
> +		}
> +	}
> +
> +	preempt_enable_notrace();
> +	return ok;
> +}
...
> +bool arch_get_random_seed_long(unsigned long *v)
> +{
> +	preempt_disable_notrace();
> +
> +	if (this_cpu_has_cap(ARM64_HAS_RNG)) {
> +		unsigned long ok, val;
> +
> +		/*
> +		 * Reads of RNDRRS set PSTATE.NZCV to 0b0000 on success,
> +		 * and set PSTATE.NZCV to 0b0100 otherwise.
> +		 */
> +		asm volatile(
> +			__mrs_s("%0", SYS_RNDRRS_EL0) "\n"
> +		"	cset %1, ne\n"
> +		: "=r"(val), "=r"(ok)
> +		:
> +		: "cc");
> +
> +		if (likely(ok)) {
> +			*v = val;
> +			preempt_enable_notrace();
> +			return true;
> +		}
> +
> +		pr_warn_ratelimited("cpu%d: sys_rndrrs failed\n",
> +				    read_cpuid_id());
> +	}
> +
> +	preempt_enable_notrace();
> +	return false;
> +}

Ho hum.  The difference in form between these two functions is unintentional.
I had peeked at the assembly for arch_get_random_long, tweaked the structure a
bit, and meant to copy the result to arch_get_random_seed_long, but forgot.

The first form above produces fewer register spills from gcc8.  I'll use that
for both for v3, supposing there are further comments to be addressed in review.


r~
diff mbox series

Patch

diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst
index 2955287e9acc..78d6f5c6e824 100644
--- a/Documentation/arm64/cpu-feature-registers.rst
+++ b/Documentation/arm64/cpu-feature-registers.rst
@@ -117,6 +117,8 @@  infrastructure:
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
+     | RNDR                         | [63-60] |    y    |
+     +------------------------------+---------+---------+
      | TS                           | [55-52] |    y    |
      +------------------------------+---------+---------+
      | FHM                          | [51-48] |    y    |
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
new file mode 100644
index 000000000000..2f166decb7d8
--- /dev/null
+++ b/arch/arm64/include/asm/archrandom.h
@@ -0,0 +1,32 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+bool __must_check arch_get_random_long(unsigned long *v);
+bool __must_check arch_get_random_int(unsigned int *v);
+bool __must_check arch_get_random_seed_long(unsigned long *v);
+bool __must_check arch_get_random_seed_int(unsigned int *v);
+
+/*
+ * These functions are technically part of the linux/random.h interface,
+ * but are not currently used.  For arm64, they're not actually usable
+ * separately from arch_get_random_long, etc, because we have to disable
+ * preemption around the per-cpu test plus the system register read.
+ * Against some future use, pretend success here, deferring failure to
+ * the actual read.
+ */
+
+static inline bool arch_has_random(void)
+{
+	return true;
+}
+
+static inline bool arch_has_random_seed(void)
+{
+	return true;
+}
+
+#endif /* CONFIG_ARCH_RANDOM */
+#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index ac1dbca3d0cd..1dd7644bc59a 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -54,7 +54,8 @@ 
 #define ARM64_WORKAROUND_1463225		44
 #define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM	45
 #define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM	46
+#define ARM64_HAS_RNG				47
 
-#define ARM64_NCAPS				47
+#define ARM64_NCAPS				48
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 6e919fafb43d..5e718f279469 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -365,6 +365,9 @@ 
 #define SYS_CTR_EL0			sys_reg(3, 3, 0, 0, 1)
 #define SYS_DCZID_EL0			sys_reg(3, 3, 0, 0, 7)
 
+#define SYS_RNDR_EL0			sys_reg(3, 3, 2, 4, 0)
+#define SYS_RNDRRS_EL0			sys_reg(3, 3, 2, 4, 1)
+
 #define SYS_PMCR_EL0			sys_reg(3, 3, 9, 12, 0)
 #define SYS_PMCNTENSET_EL0		sys_reg(3, 3, 9, 12, 1)
 #define SYS_PMCNTENCLR_EL0		sys_reg(3, 3, 9, 12, 2)
@@ -539,6 +542,7 @@ 
 			 ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
 
 /* id_aa64isar0 */
+#define ID_AA64ISAR0_RNDR_SHIFT		60
 #define ID_AA64ISAR0_TS_SHIFT		52
 #define ID_AA64ISAR0_FHM_SHIFT		48
 #define ID_AA64ISAR0_DP_SHIFT		44
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 80f459ad0190..456d5c461cbf 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -119,6 +119,7 @@  static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
  * sync with the documentation of the CPU feature register ABI.
  */
 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
@@ -1565,6 +1566,18 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.sign = FTR_UNSIGNED,
 		.min_field_value = 1,
 	},
+#endif
+#ifdef CONFIG_ARCH_RANDOM
+	{
+		.desc = "Random Number Generator",
+		.capability = ARM64_HAS_RNG,
+		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+		.matches = has_cpuid_feature,
+		.sys_reg = SYS_ID_AA64ISAR0_EL1,
+		.field_pos = ID_AA64ISAR0_RNDR_SHIFT,
+		.sign = FTR_UNSIGNED,
+		.min_field_value = 1,
+	},
 #endif
 	{},
 };
diff --git a/arch/arm64/kernel/random.c b/arch/arm64/kernel/random.c
new file mode 100644
index 000000000000..17956d3251c4
--- /dev/null
+++ b/arch/arm64/kernel/random.c
@@ -0,0 +1,95 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Random number generation using ARMv8.5-RNG.
+ */
+
+#include <linux/random.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
+#include <linux/preempt.h>
+#include <asm/cpufeature.h>
+
+bool arch_get_random_long(unsigned long *v)
+{
+	bool ok;
+
+	preempt_disable_notrace();
+
+	ok = this_cpu_has_cap(ARM64_HAS_RNG);
+	if (ok) {
+		/*
+		 * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
+		 * and set PSTATE.NZCV to 0b0100 otherwise.
+		 */
+		asm volatile(
+			__mrs_s("%0", SYS_RNDR_EL0) "\n"
+		"	cset %w1, ne\n"
+		: "=r"(*v), "=r"(ok)
+		:
+		: "cc");
+
+		if (unlikely(!ok)) {
+			pr_warn_ratelimited("cpu%d: sys_rndr failed\n",
+					    read_cpuid_id());
+		}
+	}
+
+	preempt_enable_notrace();
+	return ok;
+}
+
+bool arch_get_random_int(unsigned int *v)
+{
+	unsigned long val;
+
+	if (arch_get_random_long(&val)) {
+		*v = val;
+		return true;
+	}
+
+	return false;
+}
+
+bool arch_get_random_seed_long(unsigned long *v)
+{
+	preempt_disable_notrace();
+
+	if (this_cpu_has_cap(ARM64_HAS_RNG)) {
+		unsigned long ok, val;
+
+		/*
+		 * Reads of RNDRRS set PSTATE.NZCV to 0b0000 on success,
+		 * and set PSTATE.NZCV to 0b0100 otherwise.
+		 */
+		asm volatile(
+			__mrs_s("%0", SYS_RNDRRS_EL0) "\n"
+		"	cset %1, ne\n"
+		: "=r"(val), "=r"(ok)
+		:
+		: "cc");
+
+		if (likely(ok)) {
+			*v = val;
+			preempt_enable_notrace();
+			return true;
+		}
+
+		pr_warn_ratelimited("cpu%d: sys_rndrrs failed\n",
+				    read_cpuid_id());
+	}
+
+	preempt_enable_notrace();
+	return false;
+}
+
+bool arch_get_random_seed_int(unsigned int *v)
+{
+	unsigned long val;
+
+	if (arch_get_random_seed_long(&val)) {
+		*v = val;
+		return true;
+	}
+
+	return false;
+}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3f047afb982c..5bc88601f07b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1438,6 +1438,18 @@  config ARM64_PTR_AUTH
 
 endmenu
 
+menu "ARMv8.5 architectural features"
+
+config ARCH_RANDOM
+	bool "Enable support for random number generation"
+	default y
+	help
+	  Random number generation (part of the ARMv8.5 Extensions)
+	  provides a high bandwidth, cryptographically secure
+	  hardware random number generator.
+
+endmenu
+
 config ARM64_SVE
 	bool "ARM Scalable Vector Extension support"
 	default y
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 478491f07b4f..a47c2b984da7 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -63,6 +63,7 @@  obj-$(CONFIG_CRASH_CORE)		+= crash_core.o
 obj-$(CONFIG_ARM_SDE_INTERFACE)		+= sdei.o
 obj-$(CONFIG_ARM64_SSBD)		+= ssbd.o
 obj-$(CONFIG_ARM64_PTR_AUTH)		+= pointer_auth.o
+obj-$(CONFIG_ARCH_RANDOM)		+= random.o
 
 obj-y					+= vdso/ probes/
 obj-$(CONFIG_COMPAT_VDSO)		+= vdso32/
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index df0fc997dc3e..f26a0a8cc0d0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -539,7 +539,7 @@  endmenu
 
 config RANDOM_TRUST_CPU
 	bool "Trust the CPU manufacturer to initialize Linux's CRNG"
-	depends on X86 || S390 || PPC
+	depends on X86 || S390 || PPC || ARM64
 	default n
 	help
 	Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
@@ -559,4 +559,4 @@  config RANDOM_TRUST_BOOTLOADER
 	device randomness. Say Y here to assume the entropy provided by the
 	booloader is trustworthy so it will be added to the kernel's entropy
 	pool. Otherwise, say N here so it will be regarded as device input that
-	only mixes the entropy pool.
\ No newline at end of file
+	only mixes the entropy pool.