[1/1] arm64: Implement archrandom.h for ARMv8.5-RNG
diff mbox series

Message ID 20191019022048.28065-2-richard.henderson@linaro.org
State New
Headers show
Series
  • arm64: Implement archrandom.h for ARMv8.5-RNG
Related show

Commit Message

Richard Henderson Oct. 19, 2019, 2:20 a.m. UTC
Expose the ID_AA64ISAR0.RNDR field to userspace, as the
RNG system registers are always available at EL0.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 Documentation/arm64/cpu-feature-registers.rst |  2 +
 arch/arm64/include/asm/archrandom.h           | 76 +++++++++++++++++++
 arch/arm64/include/asm/cpucaps.h              |  3 +-
 arch/arm64/include/asm/sysreg.h               |  1 +
 arch/arm64/kernel/cpufeature.c                | 34 +++++++++
 arch/arm64/Kconfig                            | 12 +++
 drivers/char/Kconfig                          |  4 +-
 7 files changed, 129 insertions(+), 3 deletions(-)
 create mode 100644 arch/arm64/include/asm/archrandom.h

Comments

Mark Rutland Oct. 24, 2019, 11:32 a.m. UTC | #1
Hi Richard,

On Fri, Oct 18, 2019 at 07:20:48PM -0700, Richard Henderson wrote:
> Expose the ID_AA64ISAR0.RNDR field to userspace, as the
> RNG system registers are always available at EL0.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>  Documentation/arm64/cpu-feature-registers.rst |  2 +
>  arch/arm64/include/asm/archrandom.h           | 76 +++++++++++++++++++
>  arch/arm64/include/asm/cpucaps.h              |  3 +-
>  arch/arm64/include/asm/sysreg.h               |  1 +
>  arch/arm64/kernel/cpufeature.c                | 34 +++++++++
>  arch/arm64/Kconfig                            | 12 +++
>  drivers/char/Kconfig                          |  4 +-

I suspect that we may need KVM changes -- e.g. the ability to hide this
from guests.

>  7 files changed, 129 insertions(+), 3 deletions(-)
>  create mode 100644 arch/arm64/include/asm/archrandom.h
> 
> diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst
> index 2955287e9acc..78d6f5c6e824 100644
> --- a/Documentation/arm64/cpu-feature-registers.rst
> +++ b/Documentation/arm64/cpu-feature-registers.rst
> @@ -117,6 +117,8 @@ infrastructure:
>       +------------------------------+---------+---------+
>       | Name                         |  bits   | visible |
>       +------------------------------+---------+---------+
> +     | RNDR                         | [63-60] |    y    |
> +     +------------------------------+---------+---------+
>       | TS                           | [55-52] |    y    |
>       +------------------------------+---------+---------+
>       | FHM                          | [51-48] |    y    |
> diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
> new file mode 100644
> index 000000000000..80369898e274
> --- /dev/null
> +++ b/arch/arm64/include/asm/archrandom.h
> @@ -0,0 +1,76 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_ARCHRANDOM_H
> +#define _ASM_ARCHRANDOM_H
> +
> +#include <asm/cpufeature.h>
> +
> +/* Unconditional execution of RNDR and RNDRRS.  */
> +
> +static inline bool arm_rndr(unsigned long *v)
> +{
> +	int pass;
> +
> +	asm("mrs %0, s3_3_c2_c4_0\n\t"  /* RNDR */
> +	    "cset %w1, ne"
> +	    : "=r"(*v), "=r"(pass));
> +	return pass != 0;
> +}

Please give this a menmoic in <asm/sysreg.h>, i.e.

#define SYS_RNDR	sys_reg(3, 3, 2, 4, 0)

... and make this function:

static inline bool read_rndr(unsigned long *v)
{
	unsigned long pass;

	/*
	 * Reads of RNDR set PSTATE.NZCV to 0b0000 upon success, and set
	 * PSTATE.NZCV to 0b0100 otherwise.
	 */
	asm volatile(
		__mrs_s("%0", SYS_RNDR) "\n"
	"	cset %1, ne\n"
	: "=r" (*v), "=r" (pass);
	:
	: "cc");

	return pass;
}

Note that the cc clobber is important in case this gets inlined!

> +
> +static inline bool arm_rndrrs(unsigned long *v)
> +{
> +	int pass;
> +
> +	asm("mrs %0, s3_3_c2_c4_1\n\t"  /* RNDRRS */
> +	    "cset %w1, ne"
> +	    : "=r"(*v), "=r"(pass));
> +	return pass != 0;
> +}

Likewise, in <asm/sysreg.h>, add:

#define SYS_RNDRRS	sys_reg(3, 3, 2, 4, 1)

...and here have:

static inline bool read_rndrrs(unsigned long *v)
{
	unsigned long pass;

	/*
	 * Reads of RNDRRS set PSTATE.NZCV to 0b0000 upon success, and
	 * set PSTATE.NZCV to 0b0100 otherwise.
	 */
	asm volatile (
		__mrs_s("%0", SYS_RNDRRS) "\n"
	"	cset %w1, ne\n"
	: "=r" (*v), "=r" (pass)
	:
	: "cc");

	return pass;
}

> +
> +#ifdef CONFIG_ARCH_RANDOM
> +
> +/*
> + * Note that these two interfaces, random and random_seed, are strongly
> + * tied to the Intel instructions RDRAND and RDSEED.  RDSEED is the
> + * "enhanced" version and has stronger guarantees.  The ARMv8.5-RNG
> + * instruction RNDR corresponds to RDSEED, thus we put our implementation
> + * into the random_seed set of functions.
> + *
> + * Note as well that Intel does not have an instruction that corresponds
> + * to the RNDRRS instruction, so there's no generic interface for that.
> + */
> +static inline bool arch_has_random(void)
> +{
> +	return false;
> +}
> +
> +static inline bool arch_get_random_long(unsigned long *v)
> +{
> +	return false;
> +}
> +
> +static inline bool arch_get_random_int(unsigned int *v)
> +{
> +	return false;
> +}
> +
> +static inline bool arch_has_random_seed(void)
> +{
> +	return cpus_have_const_cap(ARM64_HAS_RNG);
> +}
> +
> +static inline bool arch_get_random_seed_long(unsigned long *v)
> +{
> +	/* If RNDR fails, attempt to re-seed with RNDRRS.  */
> +	return arch_has_random_seed() && (arm_rndr(v) || arm_rndrrs(v));
> +}

Here we clobber the value at v even if the reads of RNDR and RNDRRS
failed. Is that ok?

Maybe we want the accessors to only assign to v upon success.

> +
> +static inline bool arch_get_random_seed_int(unsigned int *v)
> +{
> +	unsigned long vl = 0;
> +	bool ret = arch_get_random_seed_long(&vl);
> +	*v = vl;
> +	return ret;
> +}
> +
> +#endif /* CONFIG_ARCH_RANDOM */
> +#endif /* _ASM_ARCHRANDOM_H */
> diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> index f19fe4b9acc4..2fc15765d25d 100644
> --- a/arch/arm64/include/asm/cpucaps.h
> +++ b/arch/arm64/include/asm/cpucaps.h
> @@ -52,7 +52,8 @@
>  #define ARM64_HAS_IRQ_PRIO_MASKING		42
>  #define ARM64_HAS_DCPODP			43
>  #define ARM64_WORKAROUND_1463225		44
> +#define ARM64_HAS_RNG				45
>  
> -#define ARM64_NCAPS				45
> +#define ARM64_NCAPS				46
>  
>  #endif /* __ASM_CPUCAPS_H */
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 972d196c7714..7a0c159661cd 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -539,6 +539,7 @@
>  			 ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
>  
>  /* id_aa64isar0 */
> +#define ID_AA64ISAR0_RNDR_SHIFT		60
>  #define ID_AA64ISAR0_TS_SHIFT		52
>  #define ID_AA64ISAR0_FHM_SHIFT		48
>  #define ID_AA64ISAR0_DP_SHIFT		44
> diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> index cabebf1a7976..34b9731e1fab 100644
> --- a/arch/arm64/kernel/cpufeature.c
> +++ b/arch/arm64/kernel/cpufeature.c
> @@ -119,6 +119,7 @@ static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
>   * sync with the documentation of the CPU feature register ABI.
>   */
>  static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
> +	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
>  	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
> @@ -1261,6 +1262,27 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
>  }
>  #endif
>  
> +#ifdef CONFIG_ARCH_RANDOM
> +static bool can_use_rng(const struct arm64_cpu_capabilities *entry, int scope)
> +{
> +	unsigned long tmp;
> +	int i;
> +
> +	if (!has_cpuid_feature(entry, scope))
> +		return false;
> +
> +	/*
> +	 * Re-seed from the true random number source.
> +	 * If this fails, disable the feature.
> +	 */
> +	for (i = 0; i < 10; ++i) {
> +		if (arm_rndrrs(&tmp))
> +			return true;
> +	}

The ARM ARM (ARM DDI 0487E.a) says:

| Reseeded Random Number. Returns a 64-bit random number which is
| reseeded from the True Random Number source at an IMPLEMENTATION
| DEFINED rate.

... and:

| If the instruction cannot return a genuine random number in a
| reasonable period of time, PSTATE.NZCV is set to 0b0100 and the data
| value returned in UNKNOWN.

... so it's not clear to me if the retry logic makes sense. Naively I'd
expect "reasonable period of time" to include 10 attempts.

Given we'll have to handle failure elsewhere, I suspect that it might be
best to assume that this works until we encounter evidence to the
contrary.

> +	return false;
> +}
> +#endif
> +
>  static const struct arm64_cpu_capabilities arm64_features[] = {
>  	{
>  		.desc = "GIC system register CPU interface",
> @@ -1560,6 +1582,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
>  		.sign = FTR_UNSIGNED,
>  		.min_field_value = 1,
>  	},
> +#endif
> +#ifdef CONFIG_ARCH_RANDOM
> +	{
> +		.desc = "Random Number Generator",
> +		.capability = ARM64_HAS_RNG,
> +		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,

I strongly suspect we're going to encounter systems where this feature
is mismatched, such that this can't be a boto CPU feature.

If we need entropy early in boot, we could detect if the boot CPU had
the feature, and seed the pool using it, then later make use of a
system-wide capability.

Thanks,
Mark.

> +		.matches = can_use_rng,
> +		.sys_reg = SYS_ID_AA64ISAR0_EL1,
> +		.field_pos = ID_AA64ISAR0_RNDR_SHIFT,
> +		.sign = FTR_UNSIGNED,
> +		.min_field_value = 1,
> +	},
>  #endif
>  	{},
>  };
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 950a56b71ff0..a035c178102a 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -1421,6 +1421,18 @@ config ARM64_PTR_AUTH
>  
>  endmenu
>  
> +menu "ARMv8.5 architectural features"
> +
> +config ARCH_RANDOM
> +	bool "Enable support for random number generation"
> +	default y
> +	help
> +	  Random number generation (part of the ARMv8.5 Extensions)
> +	  provides a high bandwidth, cryptographically secure
> +	  hardware random number generator.
> +
> +endmenu
> +
>  config ARM64_SVE
>  	bool "ARM Scalable Vector Extension support"
>  	default y
> diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
> index df0fc997dc3e..f26a0a8cc0d0 100644
> --- a/drivers/char/Kconfig
> +++ b/drivers/char/Kconfig
> @@ -539,7 +539,7 @@ endmenu
>  
>  config RANDOM_TRUST_CPU
>  	bool "Trust the CPU manufacturer to initialize Linux's CRNG"
> -	depends on X86 || S390 || PPC
> +	depends on X86 || S390 || PPC || ARM64
>  	default n
>  	help
>  	Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
> @@ -559,4 +559,4 @@ config RANDOM_TRUST_BOOTLOADER
>  	device randomness. Say Y here to assume the entropy provided by the
>  	booloader is trustworthy so it will be added to the kernel's entropy
>  	pool. Otherwise, say N here so it will be regarded as device input that
> -	only mixes the entropy pool.
> \ No newline at end of file
> +	only mixes the entropy pool.
> -- 
> 2.17.1
>
Ard Biesheuvel Oct. 24, 2019, 11:57 a.m. UTC | #2
Hi Richard, Mark,

Thanks for picking this up. I was literally about to hit send on a
series of my own implementing the same (or actually, I did hit send
but ARM FossMail rejected it).

On Thu, 24 Oct 2019 at 13:32, Mark Rutland <mark.rutland@arm.com> wrote:
>
> Hi Richard,
>
> On Fri, Oct 18, 2019 at 07:20:48PM -0700, Richard Henderson wrote:
> > Expose the ID_AA64ISAR0.RNDR field to userspace, as the
> > RNG system registers are always available at EL0.
> >
> > Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> > ---
> >  Documentation/arm64/cpu-feature-registers.rst |  2 +
> >  arch/arm64/include/asm/archrandom.h           | 76 +++++++++++++++++++
> >  arch/arm64/include/asm/cpucaps.h              |  3 +-
> >  arch/arm64/include/asm/sysreg.h               |  1 +
> >  arch/arm64/kernel/cpufeature.c                | 34 +++++++++
> >  arch/arm64/Kconfig                            | 12 +++
> >  drivers/char/Kconfig                          |  4 +-
>
> I suspect that we may need KVM changes -- e.g. the ability to hide this
> from guests.
>

These instructions are unconditionally enabled (if implemented) all
the way down to EL0, so I'm not sure if there is a lot of meaningful
hiding we can do here.

> >  7 files changed, 129 insertions(+), 3 deletions(-)
> >  create mode 100644 arch/arm64/include/asm/archrandom.h
> >
> > diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst
> > index 2955287e9acc..78d6f5c6e824 100644
> > --- a/Documentation/arm64/cpu-feature-registers.rst
> > +++ b/Documentation/arm64/cpu-feature-registers.rst
> > @@ -117,6 +117,8 @@ infrastructure:
> >       +------------------------------+---------+---------+
> >       | Name                         |  bits   | visible |
> >       +------------------------------+---------+---------+
> > +     | RNDR                         | [63-60] |    y    |
> > +     +------------------------------+---------+---------+
> >       | TS                           | [55-52] |    y    |
> >       +------------------------------+---------+---------+
> >       | FHM                          | [51-48] |    y    |
> > diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
> > new file mode 100644
> > index 000000000000..80369898e274
> > --- /dev/null
> > +++ b/arch/arm64/include/asm/archrandom.h
> > @@ -0,0 +1,76 @@
> > +/* SPDX-License-Identifier: GPL-2.0 */
> > +#ifndef _ASM_ARCHRANDOM_H
> > +#define _ASM_ARCHRANDOM_H
> > +
> > +#include <asm/cpufeature.h>
> > +
> > +/* Unconditional execution of RNDR and RNDRRS.  */
> > +
> > +static inline bool arm_rndr(unsigned long *v)
> > +{
> > +     int pass;
> > +
> > +     asm("mrs %0, s3_3_c2_c4_0\n\t"  /* RNDR */
> > +         "cset %w1, ne"
> > +         : "=r"(*v), "=r"(pass));
> > +     return pass != 0;
> > +}
>
> Please give this a menmoic in <asm/sysreg.h>, i.e.
>
> #define SYS_RNDR        sys_reg(3, 3, 2, 4, 0)
>
> ... and make this function:
>
> static inline bool read_rndr(unsigned long *v)
> {
>         unsigned long pass;
>
>         /*
>          * Reads of RNDR set PSTATE.NZCV to 0b0000 upon success, and set
>          * PSTATE.NZCV to 0b0100 otherwise.
>          */
>         asm volatile(
>                 __mrs_s("%0", SYS_RNDR) "\n"
>         "       cset %1, ne\n"
>         : "=r" (*v), "=r" (pass);
>         :
>         : "cc");
>
>         return pass;
> }
>
> Note that the cc clobber is important in case this gets inlined!
>
> > +
> > +static inline bool arm_rndrrs(unsigned long *v)
> > +{
> > +     int pass;
> > +
> > +     asm("mrs %0, s3_3_c2_c4_1\n\t"  /* RNDRRS */
> > +         "cset %w1, ne"
> > +         : "=r"(*v), "=r"(pass));
> > +     return pass != 0;
> > +}
>
> Likewise, in <asm/sysreg.h>, add:
>
> #define SYS_RNDRRS      sys_reg(3, 3, 2, 4, 1)
>
> ...and here have:
>
> static inline bool read_rndrrs(unsigned long *v)
> {
>         unsigned long pass;
>
>         /*
>          * Reads of RNDRRS set PSTATE.NZCV to 0b0000 upon success, and
>          * set PSTATE.NZCV to 0b0100 otherwise.
>          */
>         asm volatile (
>                 __mrs_s("%0", SYS_RNDRRS) "\n"
>         "       cset %w1, ne\n"
>         : "=r" (*v), "=r" (pass)
>         :
>         : "cc");
>
>         return pass;
> }
>
> > +
> > +#ifdef CONFIG_ARCH_RANDOM
> > +
> > +/*
> > + * Note that these two interfaces, random and random_seed, are strongly
> > + * tied to the Intel instructions RDRAND and RDSEED.  RDSEED is the
> > + * "enhanced" version and has stronger guarantees.  The ARMv8.5-RNG
> > + * instruction RNDR corresponds to RDSEED, thus we put our implementation
> > + * into the random_seed set of functions.
> > + *

Is that accurate? The ARM ARM says that RNDR is backed by a DRBG which

""
...is reseeded after an IMPLEMENTATION DEFINED number of random
numbers has been generated and read
using the RNDR register.
"""

which means that you cannot rely on this reseeding to take place at all.

So the way I read this, RNDR ~= RDRAND and RNDRRS ~= RDSEED, and we
should wire up the functions below accordingly.

> > + * Note as well that Intel does not have an instruction that corresponds
> > + * to the RNDRRS instruction, so there's no generic interface for that.
> > + */
> > +static inline bool arch_has_random(void)
> > +{
> > +     return false;
> > +}
> > +
> > +static inline bool arch_get_random_long(unsigned long *v)
> > +{
> > +     return false;
> > +}
> > +
> > +static inline bool arch_get_random_int(unsigned int *v)
> > +{
> > +     return false;
> > +}
> > +
> > +static inline bool arch_has_random_seed(void)
> > +{
> > +     return cpus_have_const_cap(ARM64_HAS_RNG);
> > +}
> > +
> > +static inline bool arch_get_random_seed_long(unsigned long *v)
> > +{
> > +     /* If RNDR fails, attempt to re-seed with RNDRRS.  */
> > +     return arch_has_random_seed() && (arm_rndr(v) || arm_rndrrs(v));
> > +}
>
> Here we clobber the value at v even if the reads of RNDR and RNDRRS
> failed. Is that ok?
>
> Maybe we want the accessors to only assign to v upon success.
>

I'd be more comfortable with this if arch_get_random_*() were
annotated as __must_check, and the assignment only done conditionally.

> > +
> > +static inline bool arch_get_random_seed_int(unsigned int *v)
> > +{
> > +     unsigned long vl = 0;
> > +     bool ret = arch_get_random_seed_long(&vl);
> > +     *v = vl;
> > +     return ret;
> > +}
> > +
> > +#endif /* CONFIG_ARCH_RANDOM */
> > +#endif /* _ASM_ARCHRANDOM_H */
> > diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
> > index f19fe4b9acc4..2fc15765d25d 100644
> > --- a/arch/arm64/include/asm/cpucaps.h
> > +++ b/arch/arm64/include/asm/cpucaps.h
> > @@ -52,7 +52,8 @@
> >  #define ARM64_HAS_IRQ_PRIO_MASKING           42
> >  #define ARM64_HAS_DCPODP                     43
> >  #define ARM64_WORKAROUND_1463225             44
> > +#define ARM64_HAS_RNG                                45
> >
> > -#define ARM64_NCAPS                          45
> > +#define ARM64_NCAPS                          46
> >
> >  #endif /* __ASM_CPUCAPS_H */
> > diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> > index 972d196c7714..7a0c159661cd 100644
> > --- a/arch/arm64/include/asm/sysreg.h
> > +++ b/arch/arm64/include/asm/sysreg.h
> > @@ -539,6 +539,7 @@
> >                        ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
> >
> >  /* id_aa64isar0 */
> > +#define ID_AA64ISAR0_RNDR_SHIFT              60
> >  #define ID_AA64ISAR0_TS_SHIFT                52
> >  #define ID_AA64ISAR0_FHM_SHIFT               48
> >  #define ID_AA64ISAR0_DP_SHIFT                44
> > diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
> > index cabebf1a7976..34b9731e1fab 100644
> > --- a/arch/arm64/kernel/cpufeature.c
> > +++ b/arch/arm64/kernel/cpufeature.c
> > @@ -119,6 +119,7 @@ static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
> >   * sync with the documentation of the CPU feature register ABI.
> >   */
> >  static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
> > +     ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
> >       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
> >       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
> >       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
> > @@ -1261,6 +1262,27 @@ static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
> >  }
> >  #endif
> >
> > +#ifdef CONFIG_ARCH_RANDOM
> > +static bool can_use_rng(const struct arm64_cpu_capabilities *entry, int scope)
> > +{
> > +     unsigned long tmp;
> > +     int i;
> > +
> > +     if (!has_cpuid_feature(entry, scope))
> > +             return false;
> > +
> > +     /*
> > +      * Re-seed from the true random number source.
> > +      * If this fails, disable the feature.
> > +      */
> > +     for (i = 0; i < 10; ++i) {
> > +             if (arm_rndrrs(&tmp))
> > +                     return true;
> > +     }
>
> The ARM ARM (ARM DDI 0487E.a) says:
>
> | Reseeded Random Number. Returns a 64-bit random number which is
> | reseeded from the True Random Number source at an IMPLEMENTATION
> | DEFINED rate.
>
> ... and:
>
> | If the instruction cannot return a genuine random number in a
> | reasonable period of time, PSTATE.NZCV is set to 0b0100 and the data
> | value returned in UNKNOWN.
>
> ... so it's not clear to me if the retry logic makes sense. Naively I'd
> expect "reasonable period of time" to include 10 attempts.
>
> Given we'll have to handle failure elsewhere, I suspect that it might be
> best to assume that this works until we encounter evidence to the
> contrary.
>

The architecture isn't very clear about what a reasonable period of
time is, but the fact that it can fail transiently suggests that
trying it only once doesn't make a lot of sense. However, I'm not sure
whether having a go at probe time is helpful in deciding whether we
can use it at all, and I'd be inclined to drop this test altogether.
This is especially true since we cannot stop EL0 or VMs at EL1 using
the instruction.

> > +     return false;
> > +}
> > +#endif
> > +
> >  static const struct arm64_cpu_capabilities arm64_features[] = {
> >       {
> >               .desc = "GIC system register CPU interface",
> > @@ -1560,6 +1582,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
> >               .sign = FTR_UNSIGNED,
> >               .min_field_value = 1,
> >       },
> > +#endif
> > +#ifdef CONFIG_ARCH_RANDOM
> > +     {
> > +             .desc = "Random Number Generator",
> > +             .capability = ARM64_HAS_RNG,
> > +             .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
>
> I strongly suspect we're going to encounter systems where this feature
> is mismatched, such that this can't be a boto CPU feature.
>
> If we need entropy early in boot, we could detect if the boot CPU had
> the feature, and seed the pool using it, then later make use of a
> system-wide capability.
>


> Thanks,
> Mark.
>
> > +             .matches = can_use_rng,
> > +             .sys_reg = SYS_ID_AA64ISAR0_EL1,
> > +             .field_pos = ID_AA64ISAR0_RNDR_SHIFT,
> > +             .sign = FTR_UNSIGNED,
> > +             .min_field_value = 1,
> > +     },
> >  #endif
> >       {},
> >  };
> > diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> > index 950a56b71ff0..a035c178102a 100644
> > --- a/arch/arm64/Kconfig
> > +++ b/arch/arm64/Kconfig
> > @@ -1421,6 +1421,18 @@ config ARM64_PTR_AUTH
> >
> >  endmenu
> >
> > +menu "ARMv8.5 architectural features"
> > +
> > +config ARCH_RANDOM
> > +     bool "Enable support for random number generation"
> > +     default y
> > +     help
> > +       Random number generation (part of the ARMv8.5 Extensions)
> > +       provides a high bandwidth, cryptographically secure
> > +       hardware random number generator.
> > +
> > +endmenu
> > +
> >  config ARM64_SVE
> >       bool "ARM Scalable Vector Extension support"
> >       default y
> > diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
> > index df0fc997dc3e..f26a0a8cc0d0 100644
> > --- a/drivers/char/Kconfig
> > +++ b/drivers/char/Kconfig
> > @@ -539,7 +539,7 @@ endmenu
> >
> >  config RANDOM_TRUST_CPU
> >       bool "Trust the CPU manufacturer to initialize Linux's CRNG"
> > -     depends on X86 || S390 || PPC
> > +     depends on X86 || S390 || PPC || ARM64
> >       default n
> >       help
> >       Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
> > @@ -559,4 +559,4 @@ config RANDOM_TRUST_BOOTLOADER
> >       device randomness. Say Y here to assume the entropy provided by the
> >       booloader is trustworthy so it will be added to the kernel's entropy
> >       pool. Otherwise, say N here so it will be regarded as device input that
> > -     only mixes the entropy pool.
> > \ No newline at end of file
> > +     only mixes the entropy pool.
> > --
> > 2.17.1
> >
Richard Henderson Oct. 27, 2019, 12:14 p.m. UTC | #3
On 10/24/19 1:32 PM, Mark Rutland wrote:
> Please give this a menmoic in <asm/sysreg.h>, i.e.
> 
> #define SYS_RNDR	sys_reg(3, 3, 2, 4, 0)
> 
> ... and make this function:
> 
> static inline bool read_rndr(unsigned long *v)
> {
> 	unsigned long pass;
> 
> 	/*
> 	 * Reads of RNDR set PSTATE.NZCV to 0b0000 upon success, and set
> 	 * PSTATE.NZCV to 0b0100 otherwise.
> 	 */
> 	asm volatile(
> 		__mrs_s("%0", SYS_RNDR) "\n"
> 	"	cset %1, ne\n"
> 	: "=r" (*v), "=r" (pass);
> 	:
> 	: "cc");
> 
> 	return pass;
> }

Done.

> #define SYS_RNDRRS	sys_reg(3, 3, 2, 4, 1)
> 
> ...and here have:
> 
> static inline bool read_rndrrs(unsigned long *v)
> {
> 	unsigned long pass;
> 
> 	/*
> 	 * Reads of RNDRRS set PSTATE.NZCV to 0b0000 upon success, and
> 	 * set PSTATE.NZCV to 0b0100 otherwise.
> 	 */
> 	asm volatile (
> 		__mrs_s("%0", SYS_RNDRRS) "\n"
> 	"	cset %w1, ne\n"
> 	: "=r" (*v), "=r" (pass)
> 	:
> 	: "cc");
> 
> 	return pass;
> }

Done.

>> +static inline bool arch_get_random_seed_long(unsigned long *v)
>> +{
>> +	/* If RNDR fails, attempt to re-seed with RNDRRS.  */
>> +	return arch_has_random_seed() && (arm_rndr(v) || arm_rndrrs(v));
>> +}
> 
> Here we clobber the value at v even if the reads of RNDR and RNDRRS
> failed. Is that ok?

The x86 inline asm does, so I should think it is ok.

>> +#ifdef CONFIG_ARCH_RANDOM
>> +static bool can_use_rng(const struct arm64_cpu_capabilities *entry, int scope)
>> +{
>> +	unsigned long tmp;
>> +	int i;
>> +
>> +	if (!has_cpuid_feature(entry, scope))
>> +		return false;
>> +
>> +	/*
>> +	 * Re-seed from the true random number source.
>> +	 * If this fails, disable the feature.
>> +	 */
>> +	for (i = 0; i < 10; ++i) {
>> +		if (arm_rndrrs(&tmp))
>> +			return true;
>> +	}
> 
> The ARM ARM (ARM DDI 0487E.a) says:
> 
> | Reseeded Random Number. Returns a 64-bit random number which is
> | reseeded from the True Random Number source at an IMPLEMENTATION
> | DEFINED rate.
> 
> ... and:
> 
> | If the instruction cannot return a genuine random number in a
> | reasonable period of time, PSTATE.NZCV is set to 0b0100 and the data
> | value returned in UNKNOWN.
> 
> ... so it's not clear to me if the retry logic makes sense. Naively I'd
> expect "reasonable period of time" to include 10 attempts.
> 
> Given we'll have to handle failure elsewhere, I suspect that it might be
> best to assume that this works until we encounter evidence to the
> contrary.

Compare arch/x86/kernel/cpu/rdrand.c (x86_init_rdrand) and
arch/powerpc/platforms/powernv/rng.c (initialize_darn).

Both existing implementations have a small loop testing to see of the hardware
passes its own self-check at startup.  Perhaps it's simply paranoia, but it
didn't seem untoward to check.


>> +#ifdef CONFIG_ARCH_RANDOM
>> +	{
>> +		.desc = "Random Number Generator",
>> +		.capability = ARM64_HAS_RNG,
>> +		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
> 
> I strongly suspect we're going to encounter systems where this feature
> is mismatched, such that this can't be a boto CPU feature.
> 
> If we need entropy early in boot, we could detect if the boot CPU had
> the feature, and seed the pool using it, then later make use of a
> system-wide capability.

In the meantime, what do you suggest I place here and in
arch_has_random_seed(), so that it's at least detected early enough for the
boot cpu, but does not require support from all cpus?


r~
Richard Henderson Oct. 27, 2019, 12:38 p.m. UTC | #4
On 10/24/19 1:57 PM, Ard Biesheuvel wrote:
>>> +
>>> +#ifdef CONFIG_ARCH_RANDOM
>>> +
>>> +/*
>>> + * Note that these two interfaces, random and random_seed, are strongly
>>> + * tied to the Intel instructions RDRAND and RDSEED.  RDSEED is the
>>> + * "enhanced" version and has stronger guarantees.  The ARMv8.5-RNG
>>> + * instruction RNDR corresponds to RDSEED, thus we put our implementation
>>> + * into the random_seed set of functions.
>>> + *
> 
> Is that accurate? The ARM ARM says that RNDR is backed by a DRBG which
> 
> ""
> ...is reseeded after an IMPLEMENTATION DEFINED number of random
> numbers has been generated and read
> using the RNDR register.
> """
> 
> which means that you cannot rely on this reseeding to take place at all.
> 
> So the way I read this, RNDR ~= RDRAND and RNDRRS ~= RDSEED, and we
> should wire up the functions below accordingly.

No, that reading is not correct, and is exactly what I was trying to explain in
that paragraph.

RNDR ~= RDSEED.

It's a matter of standards conformance:

RDRAND: NIST SP800-90A.

RDSEED: NIST SP800-90B,
        NIST SP800-90C.

RNDR:   NIST SP800-90A Rev 1,
        NIST SP800-90B,
        NIST SP800-22,
        FIPS 140-2,
        BSI AIS-31,
        NIST SP800-90C.


>>> + * Note as well that Intel does not have an instruction that corresponds
>>> + * to the RNDRRS instruction, so there's no generic interface for that.
>>> + */

...

>>> +static inline bool arch_has_random(void)
>>> +{
>>> +     return false;
>>> +}
>>> +
>>> +static inline bool arch_get_random_long(unsigned long *v)
>>> +{
>>> +     return false;
>>> +}
>>> +
>>> +static inline bool arch_get_random_int(unsigned int *v)
>>> +{
>>> +     return false;
>>> +}
>>> +
>>> +static inline bool arch_has_random_seed(void)
>>> +{
>>> +     return cpus_have_const_cap(ARM64_HAS_RNG);
>>> +}
>>> +
>>> +static inline bool arch_get_random_seed_long(unsigned long *v)
>>> +{
>>> +     /* If RNDR fails, attempt to re-seed with RNDRRS.  */
>>> +     return arch_has_random_seed() && (arm_rndr(v) || arm_rndrrs(v));
>>> +}
>>
>> Here we clobber the value at v even if the reads of RNDR and RNDRRS
>> failed. Is that ok?
>>
>> Maybe we want the accessors to only assign to v upon success.
>>
> 
> I'd be more comfortable with this if arch_get_random_*() were
> annotated as __must_check, and the assignment only done conditionally.

We should probably make that change generically rather than make it arm64 specific.

>>> +static bool can_use_rng(const struct arm64_cpu_capabilities *entry, int scope)
>>> +{
>>> +     unsigned long tmp;
>>> +     int i;
>>> +
>>> +     if (!has_cpuid_feature(entry, scope))
>>> +             return false;
>>> +
>>> +     /*
>>> +      * Re-seed from the true random number source.
>>> +      * If this fails, disable the feature.
>>> +      */
>>> +     for (i = 0; i < 10; ++i) {
>>> +             if (arm_rndrrs(&tmp))
>>> +                     return true;
>>> +     }
>>
>> The ARM ARM (ARM DDI 0487E.a) says:
>>
>> | Reseeded Random Number. Returns a 64-bit random number which is
>> | reseeded from the True Random Number source at an IMPLEMENTATION
>> | DEFINED rate.
>>
>> ... and:
>>
>> | If the instruction cannot return a genuine random number in a
>> | reasonable period of time, PSTATE.NZCV is set to 0b0100 and the data
>> | value returned in UNKNOWN.
>>
>> ... so it's not clear to me if the retry logic makes sense. Naively I'd
>> expect "reasonable period of time" to include 10 attempts.
>>
>> Given we'll have to handle failure elsewhere, I suspect that it might be
>> best to assume that this works until we encounter evidence to the
>> contrary.
>>
> 
> The architecture isn't very clear about what a reasonable period of
> time is, but the fact that it can fail transiently suggests that
> trying it only once doesn't make a lot of sense. However, I'm not sure
> whether having a go at probe time is helpful in deciding whether we
> can use it at all, and I'd be inclined to drop this test altogether.
> This is especially true since we cannot stop EL0 or VMs at EL1 using
> the instruction.

As mentioned in reply to Mark specifically, the other two architectures do the
same thing.  From the comment for x86, I read this as "if something goes wrong,
it's likely completely broken and won't work the first time we check".

Yes, we can't prevent EL0 or VMs from using the insn, but we can avoid having
the kernel itself attempt to use it if the hardware self-check fails.

OTOH, given that every user of arch_get_random() checks the result, and has a
fallback for failure, I suppose we don't need to do anything special.  Just use
the insn and let Z=1 use the fallback failure path.


r~
Ard Biesheuvel Oct. 27, 2019, 6:40 p.m. UTC | #5
On Sun, 27 Oct 2019 at 13:38, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> On 10/24/19 1:57 PM, Ard Biesheuvel wrote:
> >>> +
> >>> +#ifdef CONFIG_ARCH_RANDOM
> >>> +
> >>> +/*
> >>> + * Note that these two interfaces, random and random_seed, are strongly
> >>> + * tied to the Intel instructions RDRAND and RDSEED.  RDSEED is the
> >>> + * "enhanced" version and has stronger guarantees.  The ARMv8.5-RNG
> >>> + * instruction RNDR corresponds to RDSEED, thus we put our implementation
> >>> + * into the random_seed set of functions.
> >>> + *
> >
> > Is that accurate? The ARM ARM says that RNDR is backed by a DRBG which
> >
> > ""
> > ...is reseeded after an IMPLEMENTATION DEFINED number of random
> > numbers has been generated and read
> > using the RNDR register.
> > """
> >
> > which means that you cannot rely on this reseeding to take place at all.
> >
> > So the way I read this, RNDR ~= RDRAND and RNDRRS ~= RDSEED, and we
> > should wire up the functions below accordingly.
>
> No, that reading is not correct, and is exactly what I was trying to explain in
> that paragraph.
>
> RNDR ~= RDSEED.
>
> It's a matter of standards conformance:
>
> RDRAND: NIST SP800-90A.
>
> RDSEED: NIST SP800-90B,
>         NIST SP800-90C.
>
> RNDR:   NIST SP800-90A Rev 1,
>         NIST SP800-90B,
>         NIST SP800-22,
>         FIPS 140-2,
>         BSI AIS-31,
>         NIST SP800-90C.
>

That is not what the ARM ARM says (DDI0487E.a K12.1):

The *TRNG* that seeds the DRBG that backs both RNDR and RNDRRS should conform to

• The NIST SP800-90B standard.
• The NIST SP800-22 standard.
• The FIPS 140-2 standard.
• The BSI AIS-31 standard.

This DRBG itself should conform to NIST SP800-90A Rev 1, and is
reseeded at an implementation defined rate when RNDR is used, or every
time when RNDRRS is used.

So the output of the TRNG itself is not accessible directly, and both
RNDR and RNDRRS return output generated by a DRBG. NIST SP800-90A
suggests a minimum seed size of 440 bits, so using RNDRRS to generate
64-bit seeds is reasonable, even though it comes from a DRBG. But RNDR
is definitely not equivalent to RDSEED.



>
> >>> + * Note as well that Intel does not have an instruction that corresponds
> >>> + * to the RNDRRS instruction, so there's no generic interface for that.
> >>> + */
>
> ...
>
> >>> +static inline bool arch_has_random(void)
> >>> +{
> >>> +     return false;
> >>> +}
> >>> +
> >>> +static inline bool arch_get_random_long(unsigned long *v)
> >>> +{
> >>> +     return false;
> >>> +}
> >>> +
> >>> +static inline bool arch_get_random_int(unsigned int *v)
> >>> +{
> >>> +     return false;
> >>> +}
> >>> +
> >>> +static inline bool arch_has_random_seed(void)
> >>> +{
> >>> +     return cpus_have_const_cap(ARM64_HAS_RNG);
> >>> +}
> >>> +
> >>> +static inline bool arch_get_random_seed_long(unsigned long *v)
> >>> +{
> >>> +     /* If RNDR fails, attempt to re-seed with RNDRRS.  */
> >>> +     return arch_has_random_seed() && (arm_rndr(v) || arm_rndrrs(v));
> >>> +}
> >>
> >> Here we clobber the value at v even if the reads of RNDR and RNDRRS
> >> failed. Is that ok?
> >>
> >> Maybe we want the accessors to only assign to v upon success.
> >>
> >
> > I'd be more comfortable with this if arch_get_random_*() were
> > annotated as __must_check, and the assignment only done conditionally.
>
> We should probably make that change generically rather than make it arm64 specific.
>
> >>> +static bool can_use_rng(const struct arm64_cpu_capabilities *entry, int scope)
> >>> +{
> >>> +     unsigned long tmp;
> >>> +     int i;
> >>> +
> >>> +     if (!has_cpuid_feature(entry, scope))
> >>> +             return false;
> >>> +
> >>> +     /*
> >>> +      * Re-seed from the true random number source.
> >>> +      * If this fails, disable the feature.
> >>> +      */
> >>> +     for (i = 0; i < 10; ++i) {
> >>> +             if (arm_rndrrs(&tmp))
> >>> +                     return true;
> >>> +     }
> >>
> >> The ARM ARM (ARM DDI 0487E.a) says:
> >>
> >> | Reseeded Random Number. Returns a 64-bit random number which is
> >> | reseeded from the True Random Number source at an IMPLEMENTATION
> >> | DEFINED rate.
> >>
> >> ... and:
> >>
> >> | If the instruction cannot return a genuine random number in a
> >> | reasonable period of time, PSTATE.NZCV is set to 0b0100 and the data
> >> | value returned in UNKNOWN.
> >>
> >> ... so it's not clear to me if the retry logic makes sense. Naively I'd
> >> expect "reasonable period of time" to include 10 attempts.
> >>
> >> Given we'll have to handle failure elsewhere, I suspect that it might be
> >> best to assume that this works until we encounter evidence to the
> >> contrary.
> >>
> >
> > The architecture isn't very clear about what a reasonable period of
> > time is, but the fact that it can fail transiently suggests that
> > trying it only once doesn't make a lot of sense. However, I'm not sure
> > whether having a go at probe time is helpful in deciding whether we
> > can use it at all, and I'd be inclined to drop this test altogether.
> > This is especially true since we cannot stop EL0 or VMs at EL1 using
> > the instruction.
>
> As mentioned in reply to Mark specifically, the other two architectures do the
> same thing.  From the comment for x86, I read this as "if something goes wrong,
> it's likely completely broken and won't work the first time we check".
>
> Yes, we can't prevent EL0 or VMs from using the insn, but we can avoid having
> the kernel itself attempt to use it if the hardware self-check fails.
>
> OTOH, given that every user of arch_get_random() checks the result, and has a
> fallback for failure, I suppose we don't need to do anything special.  Just use
> the insn and let Z=1 use the fallback failure path.
>
>
> r~
Ard Biesheuvel Oct. 28, 2019, 7:26 a.m. UTC | #6
On Sun, 27 Oct 2019 at 19:40, Ard Biesheuvel <ard.biesheuvel@linaro.org> wrote:
>
> On Sun, 27 Oct 2019 at 13:38, Richard Henderson
> <richard.henderson@linaro.org> wrote:
> >
> > On 10/24/19 1:57 PM, Ard Biesheuvel wrote:
> > >>> +
> > >>> +#ifdef CONFIG_ARCH_RANDOM
> > >>> +
> > >>> +/*
> > >>> + * Note that these two interfaces, random and random_seed, are strongly
> > >>> + * tied to the Intel instructions RDRAND and RDSEED.  RDSEED is the
> > >>> + * "enhanced" version and has stronger guarantees.  The ARMv8.5-RNG
> > >>> + * instruction RNDR corresponds to RDSEED, thus we put our implementation
> > >>> + * into the random_seed set of functions.
> > >>> + *
> > >
> > > Is that accurate? The ARM ARM says that RNDR is backed by a DRBG which
> > >
> > > ""
> > > ...is reseeded after an IMPLEMENTATION DEFINED number of random
> > > numbers has been generated and read
> > > using the RNDR register.
> > > """
> > >
> > > which means that you cannot rely on this reseeding to take place at all.
> > >
> > > So the way I read this, RNDR ~= RDRAND and RNDRRS ~= RDSEED, and we
> > > should wire up the functions below accordingly.
> >
> > No, that reading is not correct, and is exactly what I was trying to explain in
> > that paragraph.
> >
> > RNDR ~= RDSEED.
> >
> > It's a matter of standards conformance:
> >
> > RDRAND: NIST SP800-90A.
> >
> > RDSEED: NIST SP800-90B,
> >         NIST SP800-90C.
> >
> > RNDR:   NIST SP800-90A Rev 1,
> >         NIST SP800-90B,
> >         NIST SP800-22,
> >         FIPS 140-2,
> >         BSI AIS-31,
> >         NIST SP800-90C.
> >
>
> That is not what the ARM ARM says (DDI0487E.a K12.1):
>
> The *TRNG* that seeds the DRBG that backs both RNDR and RNDRRS should conform to
>
> • The NIST SP800-90B standard.
> • The NIST SP800-22 standard.
> • The FIPS 140-2 standard.
> • The BSI AIS-31 standard.
>
> This DRBG itself should conform to NIST SP800-90A Rev 1, and is
> reseeded at an implementation defined rate when RNDR is used, or every
> time when RNDRRS is used.
>
> So the output of the TRNG itself is not accessible directly, and both
> RNDR and RNDRRS return output generated by a DRBG. NIST SP800-90A
> suggests a minimum seed size of 440 bits, so using RNDRRS to generate
> 64-bit seeds is reasonable,

This isn't 100% accurate, but the point is that NIST SP800-90A defines
seed sizes for all DRBGs that exceed 64 bits, and so taking at most 64
bits of output from a DRBG seeded with 64+ bits of true entropy is a
reasonable approximation of using the seed directly. The downside, of
course, is that you need to call the instruction multiple times to get
a seed of the mandated size, and so from a certification POV, this may
still be problematic.

I brought this up some time ago, and suggested that we should have one
instruction to produce strong entropy, and one instruction to return
the output of the DRBG, with the ability to set the seed explicitly,
which would allow the true entropy from the first instruction to be
mixed with input from another source, in order to mitigate the trust
issues that affect RDRAND/RDSEED.


> even though it comes from a DRBG. But RNDR
> is definitely not equivalent to RDSEED.
>
Mark Rutland Oct. 28, 2019, 12:29 p.m. UTC | #7
On Sun, Oct 27, 2019 at 01:14:15PM +0100, Richard Henderson wrote:
> On 10/24/19 1:32 PM, Mark Rutland wrote:
> >> +static inline bool arch_get_random_seed_long(unsigned long *v)
> >> +{
> >> +	/* If RNDR fails, attempt to re-seed with RNDRRS.  */
> >> +	return arch_has_random_seed() && (arm_rndr(v) || arm_rndrrs(v));
> >> +}
> > 
> > Here we clobber the value at v even if the reads of RNDR and RNDRRS
> > failed. Is that ok?
> 
> The x86 inline asm does, so I should think it is ok.

Ok. Could we please note that in the commit message? That way it
shouldn't be asked again in review. :)

Otherwise, doing the assignment conditional would be nice. The compiler
should be able to optimize away the conditional when it would be
clobbered, and we could get a compiler warning for the case where an
uninitialized value would be consumed.

> >> +#ifdef CONFIG_ARCH_RANDOM
> >> +static bool can_use_rng(const struct arm64_cpu_capabilities *entry, int scope)
> >> +{
> >> +	unsigned long tmp;
> >> +	int i;
> >> +
> >> +	if (!has_cpuid_feature(entry, scope))
> >> +		return false;
> >> +
> >> +	/*
> >> +	 * Re-seed from the true random number source.
> >> +	 * If this fails, disable the feature.
> >> +	 */
> >> +	for (i = 0; i < 10; ++i) {
> >> +		if (arm_rndrrs(&tmp))
> >> +			return true;
> >> +	}
> > 
> > The ARM ARM (ARM DDI 0487E.a) says:
> > 
> > | Reseeded Random Number. Returns a 64-bit random number which is
> > | reseeded from the True Random Number source at an IMPLEMENTATION
> > | DEFINED rate.
> > 
> > ... and:
> > 
> > | If the instruction cannot return a genuine random number in a
> > | reasonable period of time, PSTATE.NZCV is set to 0b0100 and the data
> > | value returned in UNKNOWN.
> > 
> > ... so it's not clear to me if the retry logic makes sense. Naively I'd
> > expect "reasonable period of time" to include 10 attempts.
> > 
> > Given we'll have to handle failure elsewhere, I suspect that it might be
> > best to assume that this works until we encounter evidence to the
> > contrary.
> 
> Compare arch/x86/kernel/cpu/rdrand.c (x86_init_rdrand) and
> arch/powerpc/platforms/powernv/rng.c (initialize_darn).

One thing to bear in mind here is that for arm64 we're likely to have a
larger envelope of implementations, and unlike x86 and powerpc we're at
the mercy of second-party integration.

> Both existing implementations have a small loop testing to see of the hardware
> passes its own self-check at startup.  Perhaps it's simply paranoia, but it
> didn't seem untoward to check.

My concern is that whatever loop bound we choose could fall either side
of that "reasonable period" on some implementations, so whether or not
we detect the RNG will be effectively random.

The current wording of the ARM ARM suggests "a reasonable period of
time" could be larger than a few iterations of the loop:

| The definition of “reasonable period of time” is IMPLEMENTATION
| DEFINED. The expectation is that software might use this as an
| opportunity to reschedule or run a different routine, perhaps after a
| small number of retries have failed to return a valid value.

Given that, I'd be happier if we always trusted the ID register to
determine the presence of the RNG, and failing the check only resulted
in a pr_warn() that the RNG might not be producing entropy at a
sufficient rate.

We might need more architectural guidance/clarification here, given that
seems to not be in line with expectations on other architectures.

> >> +#ifdef CONFIG_ARCH_RANDOM
> >> +	{
> >> +		.desc = "Random Number Generator",
> >> +		.capability = ARM64_HAS_RNG,
> >> +		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
> > 
> > I strongly suspect we're going to encounter systems where this feature
> > is mismatched, such that this can't be a boto CPU feature.
> > 
> > If we need entropy early in boot, we could detect if the boot CPU had
> > the feature, and seed the pool using it, then later make use of a
> > system-wide capability.
> 
> In the meantime, what do you suggest I place here and in
> arch_has_random_seed(), so that it's at least detected early enough for the
> boot cpu, but does not require support from all cpus?

I'd suggest first doing the generic all-cpus support, with the boot CPU
being a separate special case.

For the boot CPU we might only need a separate callback to seed that
entropy into the main pool. That can either check a local cap or read
the ID register directly.

How much entropy are we liable to consume before bringing up
secondaries? IIRC that's for things like allocator randomization, and I
presume that seeding the main pool would be sufficient for that.

Thanks,
Mark.

Patch
diff mbox series

diff --git a/Documentation/arm64/cpu-feature-registers.rst b/Documentation/arm64/cpu-feature-registers.rst
index 2955287e9acc..78d6f5c6e824 100644
--- a/Documentation/arm64/cpu-feature-registers.rst
+++ b/Documentation/arm64/cpu-feature-registers.rst
@@ -117,6 +117,8 @@  infrastructure:
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
+     | RNDR                         | [63-60] |    y    |
+     +------------------------------+---------+---------+
      | TS                           | [55-52] |    y    |
      +------------------------------+---------+---------+
      | FHM                          | [51-48] |    y    |
diff --git a/arch/arm64/include/asm/archrandom.h b/arch/arm64/include/asm/archrandom.h
new file mode 100644
index 000000000000..80369898e274
--- /dev/null
+++ b/arch/arm64/include/asm/archrandom.h
@@ -0,0 +1,76 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARCHRANDOM_H
+#define _ASM_ARCHRANDOM_H
+
+#include <asm/cpufeature.h>
+
+/* Unconditional execution of RNDR and RNDRRS.  */
+
+static inline bool arm_rndr(unsigned long *v)
+{
+	int pass;
+
+	asm("mrs %0, s3_3_c2_c4_0\n\t"  /* RNDR */
+	    "cset %w1, ne"
+	    : "=r"(*v), "=r"(pass));
+	return pass != 0;
+}
+
+static inline bool arm_rndrrs(unsigned long *v)
+{
+	int pass;
+
+	asm("mrs %0, s3_3_c2_c4_1\n\t"  /* RNDRRS */
+	    "cset %w1, ne"
+	    : "=r"(*v), "=r"(pass));
+	return pass != 0;
+}
+
+#ifdef CONFIG_ARCH_RANDOM
+
+/*
+ * Note that these two interfaces, random and random_seed, are strongly
+ * tied to the Intel instructions RDRAND and RDSEED.  RDSEED is the
+ * "enhanced" version and has stronger guarantees.  The ARMv8.5-RNG
+ * instruction RNDR corresponds to RDSEED, thus we put our implementation
+ * into the random_seed set of functions.
+ *
+ * Note as well that Intel does not have an instruction that corresponds
+ * to the RNDRRS instruction, so there's no generic interface for that.
+ */
+static inline bool arch_has_random(void)
+{
+	return false;
+}
+
+static inline bool arch_get_random_long(unsigned long *v)
+{
+	return false;
+}
+
+static inline bool arch_get_random_int(unsigned int *v)
+{
+	return false;
+}
+
+static inline bool arch_has_random_seed(void)
+{
+	return cpus_have_const_cap(ARM64_HAS_RNG);
+}
+
+static inline bool arch_get_random_seed_long(unsigned long *v)
+{
+	/* If RNDR fails, attempt to re-seed with RNDRRS.  */
+	return arch_has_random_seed() && (arm_rndr(v) || arm_rndrrs(v));
+}
+
+static inline bool arch_get_random_seed_int(unsigned int *v)
+{
+	unsigned long vl = 0;
+	bool ret = arch_get_random_seed_long(&vl);
+	*v = vl;
+	return ret;
+}
+
+#endif /* CONFIG_ARCH_RANDOM */
+#endif /* _ASM_ARCHRANDOM_H */
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index f19fe4b9acc4..2fc15765d25d 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -52,7 +52,8 @@ 
 #define ARM64_HAS_IRQ_PRIO_MASKING		42
 #define ARM64_HAS_DCPODP			43
 #define ARM64_WORKAROUND_1463225		44
+#define ARM64_HAS_RNG				45
 
-#define ARM64_NCAPS				45
+#define ARM64_NCAPS				46
 
 #endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 972d196c7714..7a0c159661cd 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -539,6 +539,7 @@ 
 			 ENDIAN_SET_EL1 | SCTLR_EL1_UCI  | SCTLR_EL1_RES1)
 
 /* id_aa64isar0 */
+#define ID_AA64ISAR0_RNDR_SHIFT		60
 #define ID_AA64ISAR0_TS_SHIFT		52
 #define ID_AA64ISAR0_FHM_SHIFT		48
 #define ID_AA64ISAR0_DP_SHIFT		44
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index cabebf1a7976..34b9731e1fab 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -119,6 +119,7 @@  static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
  * sync with the documentation of the CPU feature register ABI.
  */
 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
+	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
@@ -1261,6 +1262,27 @@  static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
 }
 #endif
 
+#ifdef CONFIG_ARCH_RANDOM
+static bool can_use_rng(const struct arm64_cpu_capabilities *entry, int scope)
+{
+	unsigned long tmp;
+	int i;
+
+	if (!has_cpuid_feature(entry, scope))
+		return false;
+
+	/*
+	 * Re-seed from the true random number source.
+	 * If this fails, disable the feature.
+	 */
+	for (i = 0; i < 10; ++i) {
+		if (arm_rndrrs(&tmp))
+			return true;
+	}
+	return false;
+}
+#endif
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
@@ -1560,6 +1582,18 @@  static const struct arm64_cpu_capabilities arm64_features[] = {
 		.sign = FTR_UNSIGNED,
 		.min_field_value = 1,
 	},
+#endif
+#ifdef CONFIG_ARCH_RANDOM
+	{
+		.desc = "Random Number Generator",
+		.capability = ARM64_HAS_RNG,
+		.type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
+		.matches = can_use_rng,
+		.sys_reg = SYS_ID_AA64ISAR0_EL1,
+		.field_pos = ID_AA64ISAR0_RNDR_SHIFT,
+		.sign = FTR_UNSIGNED,
+		.min_field_value = 1,
+	},
 #endif
 	{},
 };
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 950a56b71ff0..a035c178102a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1421,6 +1421,18 @@  config ARM64_PTR_AUTH
 
 endmenu
 
+menu "ARMv8.5 architectural features"
+
+config ARCH_RANDOM
+	bool "Enable support for random number generation"
+	default y
+	help
+	  Random number generation (part of the ARMv8.5 Extensions)
+	  provides a high bandwidth, cryptographically secure
+	  hardware random number generator.
+
+endmenu
+
 config ARM64_SVE
 	bool "ARM Scalable Vector Extension support"
 	default y
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index df0fc997dc3e..f26a0a8cc0d0 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -539,7 +539,7 @@  endmenu
 
 config RANDOM_TRUST_CPU
 	bool "Trust the CPU manufacturer to initialize Linux's CRNG"
-	depends on X86 || S390 || PPC
+	depends on X86 || S390 || PPC || ARM64
 	default n
 	help
 	Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
@@ -559,4 +559,4 @@  config RANDOM_TRUST_BOOTLOADER
 	device randomness. Say Y here to assume the entropy provided by the
 	booloader is trustworthy so it will be added to the kernel's entropy
 	pool. Otherwise, say N here so it will be regarded as device input that
-	only mixes the entropy pool.
\ No newline at end of file
+	only mixes the entropy pool.