@@ -9606,6 +9606,13 @@ F: Documentation/core-api/atomic_ops.rst
F: Documentation/core-api/refcount-vs-atomic.rst
F: Documentation/memory-barriers.txt
+LINUX RANDOM NUMBER GENERATOR (LRNG) DRIVER
+M: Stephan Mueller <smueller@chronox.de>
+S: Maintained
+W: https://www.chronox.de/lrng.html
+F: drivers/char/lrng/*
+F: include/linux/lrng.h
+
LIS3LV02D ACCELEROMETER DRIVER
M: Eric Piel <eric.piel@tremplin-utc.net>
S: Maintained
@@ -535,6 +535,8 @@ config ADI
and SSM (Silicon Secured Memory). Intended consumers of this
driver include crash and makedumpfile.
+source "drivers/char/lrng/Kconfig"
+
endmenu
config RANDOM_TRUST_CPU
@@ -3,7 +3,14 @@
# Makefile for the kernel character device drivers.
#
-obj-y += mem.o random.o
+obj-y += mem.o
+
+ifeq ($(CONFIG_LRNG),y)
+ obj-y += lrng/
+else
+ obj-y += random.o
+endif
+
obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
new file mode 100644
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Linux Random Number Generator configuration
+#
+
+menuconfig LRNG
+ bool "Linux Random Number Generator"
+ help
+ The Linux Random Number Generator (LRNG) is the replacement
+ of the existing /dev/random provided with drivers/char/random.c.
+ It generates entropy from different noise sources and
+ delivers significant entropy during boot.
+
+if LRNG
+
+choice
+ prompt "LRNG Entropy Pool Size"
+ default LRNG_POOL_SIZE_4096
+ help
+ Select the size of the LRNG entropy pool. The size of the
+ entropy pool is relevant for the amount of entropy that
+ the LRNG can maintain as a maximum. The larger the size
+ of the entropy pool is the more entropy can be maintained
+ but the less often older entropic values are overwritten
+ with new entropy.
+
+ config LRNG_POOL_SIZE_512
+ bool "512 bits"
+
+ config LRNG_POOL_SIZE_1024
+ bool "1024 bits"
+
+ config LRNG_POOL_SIZE_2048
+ bool "2048 bits"
+
+ config LRNG_POOL_SIZE_4096
+ bool "4096 bits (default)"
+
+ config LRNG_POOL_SIZE_8192
+ bool "8192 bits"
+
+ config LRNG_POOL_SIZE_16384
+ bool "16384 bits"
+
+ config LRNG_POOL_SIZE_32768
+ bool "32768 bits"
+
+ config LRNG_POOL_SIZE_65536
+ bool "65536 bits"
+
+ config LRNG_POOL_SIZE_131072
+ bool "131072 bits"
+endchoice
+
+config LRNG_POOL_SIZE
+ int
+ default 0 if LRNG_POOL_SIZE_512
+ default 1 if LRNG_POOL_SIZE_1024
+ default 2 if LRNG_POOL_SIZE_2048
+ default 3 if LRNG_POOL_SIZE_4096
+ default 4 if LRNG_POOL_SIZE_8192
+ default 5 if LRNG_POOL_SIZE_16384
+ default 6 if LRNG_POOL_SIZE_32768
+ default 7 if LRNG_POOL_SIZE_65536
+ default 8 if LRNG_POOL_SIZE_131072
+
+endif # LRNG
new file mode 100644
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux Random Number Generator.
+#
+
+obj-y += lrng_pool.o lrng_aux.o \
+ lrng_sw_noise.o lrng_archrandom.o \
+ lrng_drng.o lrng_chacha20.o \
+ lrng_interfaces.o \
new file mode 100644
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Fast Noise Source: CPU-based noise source
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/random.h>
+
+#include "lrng_internal.h"
+
+/*
+ * Estimated entropy of data is a 32th of LRNG_DRNG_SECURITY_STRENGTH_BITS.
+ * As we have no ability to review the implementation of those noise sources,
+ * it is prudent to have a conservative estimate here.
+ */
+#define LRNG_ARCHRANDOM_DEFAULT_STRENGTH (LRNG_DRNG_SECURITY_STRENGTH_BITS>>5)
+#define LRNG_ARCHRANDOM_TRUST_CPU_STRENGTH LRNG_DRNG_SECURITY_STRENGTH_BITS
+#ifdef CONFIG_RANDOM_TRUST_CPU
+static u32 archrandom = LRNG_ARCHRANDOM_TRUST_CPU_STRENGTH;
+#else
+static u32 archrandom = LRNG_ARCHRANDOM_DEFAULT_STRENGTH;
+#endif
+module_param(archrandom, uint, 0644);
+MODULE_PARM_DESC(archrandom, "Entropy in bits of 256 data bits from CPU noise "
+ "source (e.g. RDRAND)");
+
+static int __init lrng_parse_trust_cpu(char *arg)
+{
+ int ret;
+ bool trust_cpu = false;
+
+ ret = kstrtobool(arg, &trust_cpu);
+ if (ret)
+ return ret;
+
+ if (trust_cpu)
+ archrandom = LRNG_ARCHRANDOM_TRUST_CPU_STRENGTH;
+ else
+ archrandom = LRNG_ARCHRANDOM_DEFAULT_STRENGTH;
+
+ return 0;
+}
+early_param("random.trust_cpu", lrng_parse_trust_cpu);
+
+/**
+ * Get CPU noise source entropy
+ *
+ * @outbuf: buffer to store entropy of size LRNG_DRNG_SECURITY_STRENGTH_BYTES
+ * @return: > 0 on success where value provides the added entropy in bits
+ * 0 if no fast source was available
+ */
+u32 lrng_get_arch(u8 *outbuf)
+{
+ u32 i, ent_bits = archrandom;
+
+ /* operate on full blocks */
+ BUILD_BUG_ON(LRNG_DRNG_SECURITY_STRENGTH_BYTES % sizeof(unsigned long));
+ /* ensure we have aligned buffers */
+ BUILD_BUG_ON(LRNG_KCAPI_ALIGN % sizeof(unsigned long));
+
+ if (!ent_bits)
+ return 0;
+
+ for (i = 0; i < LRNG_DRNG_SECURITY_STRENGTH_BYTES;
+ i += sizeof(unsigned long)) {
+ if (!arch_get_random_seed_long((unsigned long *)(outbuf + i)) &&
+ !arch_get_random_long((unsigned long *)(outbuf + i))) {
+ archrandom = 0;
+ return 0;
+ }
+ }
+
+ /* Obtain entropy statement -- cap entropy to buffer size in bits */
+ ent_bits = min_t(u32, ent_bits, LRNG_DRNG_SECURITY_STRENGTH_BITS);
+ pr_debug("obtained %u bits of entropy from CPU RNG noise source\n",
+ ent_bits);
+ return ent_bits;
+}
+
+u32 lrng_slow_noise_req_entropy(u32 required_entropy_bits)
+{
+ u32 arch_ent_bits = min_t(u32, archrandom,
+ LRNG_DRNG_SECURITY_STRENGTH_BITS);
+ u32 fast_noise_entropy = arch_ent_bits + lrng_jent_entropylevel();
+
+ if (fast_noise_entropy > required_entropy_bits)
+ return 0;
+ return (required_entropy_bits - fast_noise_entropy);
+}
new file mode 100644
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG auxiliary interfaces
+ *
+ * Copyright (C) 2019 Stephan Mueller <smueller@chronox.de>
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
+ * Rights Reserved.
+ * Copyright (C) 2016 Jason Cooper <jason@lakedaemon.net>
+ */
+
+#include <linux/mm.h>
+#include <linux/random.h>
+
+#include "lrng_internal.h"
+
+struct batched_entropy {
+ union {
+ u64 entropy_u64[LRNG_DRNG_BLOCKSIZE / sizeof(u64)];
+ u32 entropy_u32[LRNG_DRNG_BLOCKSIZE / sizeof(u32)];
+ };
+ unsigned int position;
+ spinlock_t batch_lock;
+};
+
+/*
+ * Get a random word for internal kernel use only. The quality of the random
+ * number is either as good as RDRAND or as good as /dev/urandom, with the
+ * goal of being quite fast and not depleting entropy.
+ */
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+};
+
+u64 get_random_u64(void)
+{
+ u64 ret;
+ unsigned long flags;
+ struct batched_entropy *batch;
+
+#if BITS_PER_LONG == 64
+ if (arch_get_random_long((unsigned long *)&ret))
+ return ret;
+#else
+ if (arch_get_random_long((unsigned long *)&ret) &&
+ arch_get_random_long((unsigned long *)&ret + 1))
+ return ret;
+#endif
+
+ lrng_debug_report_seedlevel("get_random_u64");
+
+ batch = raw_cpu_ptr(&batched_entropy_u64);
+ spin_lock_irqsave(&batch->batch_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ lrng_drng_get_atomic((u8 *)batch->entropy_u64,
+ LRNG_DRNG_BLOCKSIZE);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u64[batch->position++];
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(get_random_u64);
+
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+};
+
+u32 get_random_u32(void)
+{
+ u32 ret;
+ unsigned long flags;
+ struct batched_entropy *batch;
+
+ if (arch_get_random_int(&ret))
+ return ret;
+
+ lrng_debug_report_seedlevel("get_random_u32");
+
+ batch = raw_cpu_ptr(&batched_entropy_u32);
+ spin_lock_irqsave(&batch->batch_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ lrng_drng_get_atomic((u8 *)batch->entropy_u32,
+ LRNG_DRNG_BLOCKSIZE);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u32[batch->position++];
+ spin_unlock_irqrestore(&batch->batch_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(get_random_u32);
+
+/*
+ * It's important to invalidate all potential batched entropy that might
+ * be stored before the crng is initialized, which we can do lazily by
+ * simply resetting the counter to zero so that it's re-extracted on the
+ * next usage.
+ */
+void invalidate_batched_entropy(void)
+{
+ int cpu;
+ unsigned long flags;
+
+ for_each_possible_cpu(cpu) {
+ struct batched_entropy *batched_entropy;
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
+ spin_lock_irqsave(&batched_entropy->batch_lock, flags);
+ batched_entropy->position = 0;
+ spin_unlock(&batched_entropy->batch_lock);
+
+ batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
+ spin_lock(&batched_entropy->batch_lock);
+ batched_entropy->position = 0;
+ spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
+ }
+}
+
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
+ * @range: The size of the area, starting at @start, within which the
+ * random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
+ *
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned. We now align it regardless.
+ *
+ * Return: A page aligned address within [start, start + range). On error,
+ * @start is returned.
+ */
+unsigned long randomize_page(unsigned long start, unsigned long range)
+{
+ if (!PAGE_ALIGNED(start)) {
+ range -= PAGE_ALIGN(start) - start;
+ start = PAGE_ALIGN(start);
+ }
+
+ if (start > ULONG_MAX - range)
+ range = ULONG_MAX - start;
+
+ range >>= PAGE_SHIFT;
+
+ if (range == 0)
+ return start;
+
+ return start + (get_random_long() % range << PAGE_SHIFT);
+}
new file mode 100644
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * Backend for the LRNG providing the cryptographic primitives using
+ * ChaCha20 cipher implementations.
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/chacha.h>
+#include <linux/cryptohash.h>
+#include <linux/lrng.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+
+#include "lrng_chacha20.h"
+#include "lrng_internal.h"
+
+/******************************* ChaCha20 DRNG *******************************/
+
+#define CHACHA_BLOCK_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
+
+struct chacha20_state {
+ struct chacha20_block block;
+};
+
+/*
+ * Have a static memory blocks for the ChaCha20 DRNG instance to avoid calling
+ * kmalloc too early in the boot cycle. For subsequent allocation requests,
+ * such as per-NUMA-node DRNG instances, kmalloc will be used.
+ */
+struct chacha20_state chacha20;
+
+/**
+ * Update of the ChaCha20 state by either using an unused buffer part or by
+ * generating one ChaCha20 block which is half of the state of the ChaCha20.
+ * The block is XORed into the key part of the state. This shall ensure
+ * backtracking resistance as well as a proper mix of the ChaCha20 state once
+ * the key is injected.
+ */
+static void lrng_chacha20_update(struct chacha20_state *chacha20_state,
+ u32 *buf, u32 used_words)
+{
+ struct chacha20_block *chacha20 = &chacha20_state->block;
+ u32 i, tmp[CHACHA_BLOCK_WORDS];
+
+ BUILD_BUG_ON(sizeof(struct chacha20_block) != CHACHA_BLOCK_SIZE);
+ BUILD_BUG_ON(CHACHA_BLOCK_SIZE != 2 * CHACHA_KEY_SIZE);
+
+ if (used_words > CHACHA_KEY_SIZE_WORDS) {
+ chacha20_block(&chacha20->constants[0], (u8 *)tmp);
+ for (i = 0; i < CHACHA_KEY_SIZE_WORDS; i++)
+ chacha20->key.u[i] ^= tmp[i];
+ memzero_explicit(tmp, sizeof(tmp));
+ } else {
+ for (i = 0; i < CHACHA_KEY_SIZE_WORDS; i++)
+ chacha20->key.u[i] ^= buf[i + used_words];
+ }
+
+ /* Deterministic increment of nonce as required in RFC 7539 chapter 4 */
+ chacha20->nonce[0]++;
+ if (chacha20->nonce[0] == 0)
+ chacha20->nonce[1]++;
+ if (chacha20->nonce[1] == 0)
+ chacha20->nonce[2]++;
+
+ /* Leave counter untouched as it is start value is undefined in RFC */
+}
+
+/**
+ * Seed the ChaCha20 DRNG by injecting the input data into the key part of
+ * the ChaCha20 state. If the input data is longer than the ChaCha20 key size,
+ * perform a ChaCha20 operation after processing of key size input data.
+ * This operation shall spread out the entropy into the ChaCha20 state before
+ * new entropy is injected into the key part.
+ */
+static int lrng_cc20_drng_seed_helper(void *drng, const u8 *inbuf, u32 inbuflen)
+{
+ struct chacha20_state *chacha20_state = (struct chacha20_state *)drng;
+ struct chacha20_block *chacha20 = &chacha20_state->block;
+
+ while (inbuflen) {
+ u32 i, todo = min_t(u32, inbuflen, CHACHA_KEY_SIZE);
+
+ for (i = 0; i < todo; i++)
+ chacha20->key.b[i] ^= inbuf[i];
+
+ /* Break potential dependencies between the inbuf key blocks */
+ lrng_chacha20_update(chacha20_state, NULL,
+ CHACHA_BLOCK_WORDS);
+ inbuf += todo;
+ inbuflen -= todo;
+ }
+
+ return 0;
+}
+
+/**
+ * Chacha20 DRNG generation of random numbers: the stream output of ChaCha20
+ * is the random number. After the completion of the generation of the
+ * stream, the entire ChaCha20 state is updated.
+ *
+ * Note, as the ChaCha20 implements a 32 bit counter, we must ensure
+ * that this function is only invoked for at most 2^32 - 1 ChaCha20 blocks
+ * before a reseed or an update happens. This is ensured by the variable
+ * outbuflen which is a 32 bit integer defining the number of bytes to be
+ * generated by the ChaCha20 DRNG. At the end of this function, an update
+ * operation is invoked which implies that the 32 bit counter will never be
+ * overflown in this implementation.
+ */
+static int lrng_cc20_drng_generate_helper(void *drng, u8 *outbuf, u32 outbuflen)
+{
+ struct chacha20_state *chacha20_state = (struct chacha20_state *)drng;
+ struct chacha20_block *chacha20 = &chacha20_state->block;
+ u32 aligned_buf[CHACHA_BLOCK_WORDS], ret = outbuflen,
+ used = CHACHA_BLOCK_WORDS;
+ int zeroize_buf = 0;
+
+ while (outbuflen >= CHACHA_BLOCK_SIZE) {
+ chacha20_block(&chacha20->constants[0], outbuf);
+ outbuf += CHACHA_BLOCK_SIZE;
+ outbuflen -= CHACHA_BLOCK_SIZE;
+ }
+
+ if (outbuflen) {
+ chacha20_block(&chacha20->constants[0], (u8 *)aligned_buf);
+ memcpy(outbuf, aligned_buf, outbuflen);
+ used = ((outbuflen + sizeof(aligned_buf[0]) - 1) /
+ sizeof(aligned_buf[0]));
+ zeroize_buf = 1;
+ }
+
+ lrng_chacha20_update(chacha20_state, aligned_buf, used);
+
+ if (zeroize_buf)
+ memzero_explicit(aligned_buf, sizeof(aligned_buf));
+
+ return ret;
+}
+
+/**
+ * ChaCha20 DRNG that provides full strength, i.e. the output is capable
+ * of transporting 1 bit of entropy per data bit, provided the DRNG was
+ * seeded with 256 bits of entropy. This is achieved by folding the ChaCha20
+ * block output of 512 bits in half using XOR.
+ *
+ * Other than the output handling, the implementation is conceptually
+ * identical to lrng_drng_generate_helper.
+ */
+static int lrng_cc20_drng_generate_helper_full(void *drng, u8 *outbuf,
+ u32 outbuflen)
+{
+ struct chacha20_state *chacha20_state = (struct chacha20_state *)drng;
+ struct chacha20_block *chacha20 = &chacha20_state->block;
+ u32 aligned_buf[CHACHA_BLOCK_WORDS];
+ u32 ret = outbuflen;
+
+ while (outbuflen >= CHACHA_BLOCK_SIZE) {
+ u32 i;
+
+ chacha20_block(&chacha20->constants[0], outbuf);
+
+ /* fold output in half */
+ for (i = 0; i < (CHACHA_BLOCK_WORDS / 2); i++)
+ outbuf[i] ^= outbuf[i + (CHACHA_BLOCK_WORDS / 2)];
+
+ outbuf += CHACHA_BLOCK_SIZE / 2;
+ outbuflen -= CHACHA_BLOCK_SIZE / 2;
+ }
+
+ while (outbuflen) {
+ u32 i, todo = min_t(u32, CHACHA_BLOCK_SIZE / 2, outbuflen);
+
+ chacha20_block(&chacha20->constants[0], (u8 *)aligned_buf);
+
+ /* fold output in half */
+ for (i = 0; i < (CHACHA_BLOCK_WORDS / 2); i++)
+ aligned_buf[i] ^=
+ aligned_buf[i + (CHACHA_BLOCK_WORDS / 2)];
+
+ memcpy(outbuf, aligned_buf, todo);
+ outbuflen -= todo;
+ outbuf += todo;
+ }
+ memzero_explicit(aligned_buf, sizeof(aligned_buf));
+
+ lrng_chacha20_update(chacha20_state, NULL, CHACHA_BLOCK_WORDS);
+
+ return ret;
+}
+
+void lrng_cc20_init_state(struct chacha20_state *state)
+{
+ struct chacha20_block *chacha20 = &state->block;
+ unsigned long v;
+ u32 i;
+
+ lrng_cc20_init_rfc7539(chacha20);
+
+ for (i = 0; i < CHACHA_KEY_SIZE_WORDS; i++) {
+ chacha20->key.u[i] ^= jiffies;
+ chacha20->key.u[i] ^= random_get_entropy();
+ if (arch_get_random_seed_long(&v) || arch_get_random_long(&v))
+ chacha20->key.u[i] ^= v;
+ }
+
+ for (i = 0; i < 3; i++) {
+ chacha20->nonce[i] ^= jiffies;
+ chacha20->nonce[i] ^= random_get_entropy();
+ if (arch_get_random_seed_long(&v) || arch_get_random_long(&v))
+ chacha20->nonce[i] ^= v;
+ }
+
+ pr_info("ChaCha20 core initialized\n");
+}
+
+/**
+ * Allocation of the DRNG state
+ */
+static void *lrng_cc20_drng_alloc(u32 sec_strength)
+{
+ struct chacha20_state *state = NULL;
+
+ if (sec_strength > CHACHA_KEY_SIZE) {
+ pr_err("Security strength of ChaCha20 DRNG (%u bits) lower "
+ "than requested by LRNG (%u bits)\n",
+ CHACHA_KEY_SIZE * 8, sec_strength * 8);
+ return ERR_PTR(-EINVAL);
+ }
+ if (sec_strength < CHACHA_KEY_SIZE)
+ pr_warn("Security strength of ChaCha20 DRNG (%u bits) higher "
+ "than requested by LRNG (%u bits)\n",
+ CHACHA_KEY_SIZE * 8, sec_strength * 8);
+
+ state = kmalloc(sizeof(struct chacha20_state), GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+ pr_debug("memory for ChaCha20 core allocated\n");
+
+ lrng_cc20_init_state(state);
+
+ return state;
+}
+
+static void lrng_cc20_drng_dealloc(void *drng)
+{
+ struct chacha20_state *chacha20_state = (struct chacha20_state *)drng;
+
+ if (drng == &chacha20) {
+ memzero_explicit(chacha20_state, sizeof(*chacha20_state));
+ pr_debug("static ChaCha20 core zeroized\n");
+ return;
+ }
+
+ pr_debug("ChaCha20 core zeroized and freed\n");
+ kzfree(chacha20_state);
+}
+
+/******************************* Hash Operation *******************************/
+
+static void *lrng_cc20_hash_alloc(const u8 *key, u32 keylen)
+{
+ pr_info("Hash SHA-1 allocated\n");
+ return NULL;
+}
+
+static void lrng_cc20_hash_dealloc(void *hash)
+{
+}
+
+static u32 lrng_cc20_hash_digestsize(void *hash)
+{
+ return (SHA_DIGEST_WORDS * sizeof(u32));
+}
+
+static int lrng_cc20_hash_buffer(void *hash, const u8 *inbuf, u32 inbuflen,
+ u8 *digest)
+{
+ u32 i;
+ u32 workspace[SHA_WORKSPACE_WORDS];
+
+ WARN_ON(inbuflen % (SHA_WORKSPACE_WORDS * sizeof(u32)));
+
+ sha_init((u32 *)digest);
+ for (i = 0; i < inbuflen; i += (SHA_WORKSPACE_WORDS * sizeof(u32)))
+ sha_transform((u32 *)digest, (inbuf + i), workspace);
+ memzero_explicit(workspace, sizeof(workspace));
+
+ return 0;
+}
+
+static const char *lrng_cc20_drng_name(void)
+{
+ const char *cc20_drng_name = "ChaCha20 DRNG";
+ return cc20_drng_name;
+}
+
+static const char *lrng_cc20_hash_name(void)
+{
+ const char *cc20_hash_name = "SHA-1";
+ return cc20_hash_name;
+}
+
+const struct lrng_crypto_cb lrng_cc20_crypto_cb = {
+ .lrng_drng_name = lrng_cc20_drng_name,
+ .lrng_hash_name = lrng_cc20_hash_name,
+ .lrng_drng_alloc = lrng_cc20_drng_alloc,
+ .lrng_drng_dealloc = lrng_cc20_drng_dealloc,
+ .lrng_drng_seed_helper = lrng_cc20_drng_seed_helper,
+ .lrng_drng_generate_helper = lrng_cc20_drng_generate_helper,
+ .lrng_drng_generate_helper_full = lrng_cc20_drng_generate_helper_full,
+ .lrng_hash_alloc = lrng_cc20_hash_alloc,
+ .lrng_hash_dealloc = lrng_cc20_hash_dealloc,
+ .lrng_hash_digestsize = lrng_cc20_hash_digestsize,
+ .lrng_hash_buffer = lrng_cc20_hash_buffer,
+};
new file mode 100644
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * LRNG ChaCha20 definitions
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <crypto/chacha.h>
+
+/* State according to RFC 7539 section 2.3 */
+struct chacha20_block {
+ u32 constants[4];
+ union {
+#define CHACHA_KEY_SIZE_WORDS (CHACHA_KEY_SIZE / sizeof(u32))
+ u32 u[CHACHA_KEY_SIZE_WORDS];
+ u8 b[CHACHA_KEY_SIZE];
+ } key;
+ u32 counter;
+ u32 nonce[3];
+};
+
+static inline void lrng_cc20_init_rfc7539(struct chacha20_block *chacha20)
+{
+ memcpy(&chacha20->constants[0], "expand 32-byte k", 16);
+}
new file mode 100644
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG DRNG processing
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+
+#include "lrng_internal.h"
+
+/*
+ * Maximum number of seconds between DRNG reseed intervals of the DRNG. Note,
+ * this is enforced with the next request of random numbers from the
+ * DRNG. Setting this value to zero implies a reseeding attempt before every
+ * generated random number.
+ */
+int lrng_drng_reseed_max_time = 600;
+
+static atomic_t lrng_avail = ATOMIC_INIT(0);
+
+DEFINE_MUTEX(lrng_crypto_cb_update);
+
+/* DRNG for /dev/urandom, getrandom(2), get_random_bytes */
+static struct lrng_drng lrng_drng_init = {
+ .drng = &chacha20,
+ .crypto_cb = &lrng_cc20_crypto_cb,
+ .lock = __MUTEX_INITIALIZER(lrng_drng_init.lock),
+ .spin_lock = __SPIN_LOCK_UNLOCKED(lrng_drng_init.spin_lock)
+};
+
+/*
+ * DRNG for get_random_bytes when called in atomic context. This
+ * DRNG will always use the ChaCha20 DRNG. It will never benefit from a
+ * DRNG switch like the "regular" DRNG. If there was no DRNG switch, the atomic
+ * DRNG is identical to the "regular" DRNG.
+ *
+ * The reason for having this is due to the fact that DRNGs other than
+ * the ChaCha20 DRNG may sleep.
+ */
+static struct lrng_drng lrng_drng_atomic = {
+ .drng = &chacha20,
+ .crypto_cb = &lrng_cc20_crypto_cb,
+ .spin_lock = __SPIN_LOCK_UNLOCKED(lrng_drng_atomic.spin_lock)
+};
+
+/********************************** Helper ************************************/
+
+bool lrng_get_available(void)
+{
+ return likely(atomic_read(&lrng_avail));
+}
+
+void lrng_set_available(void)
+{
+ atomic_set(&lrng_avail, 1);
+}
+
+struct lrng_drng *lrng_drng_init_instance(void)
+{
+ return &lrng_drng_init;
+}
+
+struct lrng_drng *lrng_drng_atomic_instance(void)
+{
+ return &lrng_drng_atomic;
+}
+
+void lrng_drng_reset(struct lrng_drng *drng)
+{
+ atomic_set(&drng->requests, LRNG_DRNG_RESEED_THRESH);
+ drng->last_seeded = jiffies;
+ drng->fully_seeded = false;
+ drng->force_reseed = true;
+ pr_debug("reset DRNG\n");
+}
+
+/************************* Random Number Generation ***************************/
+
+/* Inject a data buffer into the DRNG */
+static void lrng_drng_inject(struct lrng_drng *drng,
+ const u8 *inbuf, u32 inbuflen)
+{
+ const char *drng_type = unlikely(drng == &lrng_drng_atomic) ?
+ "atomic" : "regular";
+ unsigned long flags = 0;
+
+ BUILD_BUG_ON(LRNG_DRNG_RESEED_THRESH > INT_MAX);
+ pr_debug("seeding %s DRNG with %u bytes\n", drng_type, inbuflen);
+ lrng_drng_lock(drng, &flags);
+ if (drng->crypto_cb->lrng_drng_seed_helper(drng->drng,
+ inbuf, inbuflen) < 0) {
+ pr_warn("seeding of %s DRNG failed\n", drng_type);
+ atomic_set(&drng->requests, 1);
+ } else {
+ pr_debug("%s DRNG stats since last seeding: %lu secs; "
+ "generate calls: %d\n", drng_type,
+ (time_after(jiffies, drng->last_seeded) ?
+ (jiffies - drng->last_seeded) : 0) / HZ,
+ (LRNG_DRNG_RESEED_THRESH -
+ atomic_read(&drng->requests)));
+ drng->last_seeded = jiffies;
+ atomic_set(&drng->requests, LRNG_DRNG_RESEED_THRESH);
+ drng->force_reseed = false;
+
+ if (drng->drng == lrng_drng_atomic.drng) {
+ lrng_drng_atomic.last_seeded = jiffies;
+ atomic_set(&lrng_drng_atomic.requests,
+ LRNG_DRNG_RESEED_THRESH);
+ lrng_drng_atomic.force_reseed = false;
+ }
+ }
+ lrng_drng_unlock(drng, &flags);
+}
+
+/**
+ * Perform the seeding of the DRNG with data from noise source
+ */
+static inline int _lrng_drng_seed(struct lrng_drng *drng)
+{
+ struct entropy_buf seedbuf __aligned(LRNG_KCAPI_ALIGN);
+ unsigned long flags = 0;
+ u32 total_entropy_bits;
+ int ret;
+
+ lrng_drng_lock(drng, &flags);
+ total_entropy_bits = lrng_fill_seed_buffer(drng->crypto_cb, drng->hash,
+ &seedbuf, 0);
+ lrng_drng_unlock(drng, &flags);
+
+ /* Allow the seeding operation to be called again */
+ lrng_pool_unlock();
+ lrng_init_ops(total_entropy_bits);
+ ret = total_entropy_bits >> 3;
+
+ lrng_drng_inject(drng, (u8 *)&seedbuf, sizeof(seedbuf));
+ memzero_explicit(&seedbuf, sizeof(seedbuf));
+
+ return ret;
+}
+
+static int lrng_drng_get(struct lrng_drng *drng, u8 *outbuf, u32 outbuflen);
+static void lrng_drng_seed(struct lrng_drng *drng)
+{
+ int ret = _lrng_drng_seed(drng);
+
+ if (ret >= LRNG_DRNG_SECURITY_STRENGTH_BYTES)
+ drng->fully_seeded = true;
+
+ BUILD_BUG_ON(LRNG_MIN_SEED_ENTROPY_BITS >
+ LRNG_DRNG_SECURITY_STRENGTH_BITS);
+
+ /*
+ * Reseed atomic DRNG from current DRNG,
+ *
+ * We can obtain random numbers from DRNG as the lock type
+ * chosen by lrng_drng_get is usable with the current caller.
+ */
+ if ((drng->drng != lrng_drng_atomic.drng) &&
+ (lrng_drng_atomic.force_reseed ||
+ atomic_read(&lrng_drng_atomic.requests) <= 0 ||
+ time_after(jiffies, lrng_drng_atomic.last_seeded +
+ lrng_drng_reseed_max_time * HZ))) {
+ u8 seedbuf[LRNG_DRNG_SECURITY_STRENGTH_BYTES]
+ __aligned(LRNG_KCAPI_ALIGN);
+
+ ret = lrng_drng_get(drng, seedbuf, sizeof(seedbuf));
+
+ if (ret < 0) {
+ pr_warn("Error generating random numbers for atomic "
+ "DRNG: %d\n", ret);
+ } else {
+ lrng_drng_inject(&lrng_drng_atomic, seedbuf, ret);
+ }
+ memzero_explicit(&seedbuf, sizeof(seedbuf));
+ }
+}
+
+static inline void _lrng_drng_seed_work(struct lrng_drng *drng, u32 node)
+{
+ pr_debug("reseed triggered by interrupt noise source for DRNG on NUMA "
+ "node %d\n", node);
+ lrng_drng_seed(drng);
+ if (drng->fully_seeded) {
+ /* Prevent reseed storm */
+ drng->last_seeded += node * 100 * HZ;
+ /* Prevent draining of pool on idle systems */
+ lrng_drng_reseed_max_time += 100;
+ }
+}
+
+/**
+ * DRNG reseed trigger: Kernel thread handler triggered by the schedule_work()
+ */
+void lrng_drng_seed_work(struct work_struct *dummy)
+{
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ u32 node;
+
+ if (lrng_drng) {
+ for_each_online_node(node) {
+ struct lrng_drng *drng = lrng_drng[node];
+
+ if (drng && !drng->fully_seeded) {
+ _lrng_drng_seed_work(drng, node);
+ goto out;
+ }
+ }
+ lrng_pool_all_numa_nodes_seeded();
+ } else {
+ if (!lrng_drng_init.fully_seeded)
+ _lrng_drng_seed_work(&lrng_drng_init, 0);
+ }
+
+out:
+ /* Allow the seeding operation to be called again */
+ lrng_pool_unlock();
+}
+
+/* Force all DRNGs to reseed before next generation */
+void lrng_drng_force_reseed(void)
+{
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ u32 node;
+
+ if (!lrng_drng) {
+ lrng_drng_init.force_reseed = true;
+ pr_debug("force reseed of initial DRNG\n");
+ return;
+ }
+ for_each_online_node(node) {
+ struct lrng_drng *drng = lrng_drng[node];
+
+ if (!drng)
+ continue;
+
+ drng->force_reseed = true;
+ pr_debug("force reseed of DRNG on node %u\n", node);
+ }
+ lrng_drng_atomic.force_reseed = true;
+}
+
+/**
+ * Get random data out of the DRNG which is reseeded frequently.
+ *
+ * @outbuf: buffer for storing random data
+ * @outbuflen: length of outbuf
+ * @return: < 0 in error case (DRNG generation or update failed)
+ * >=0 returning the returned number of bytes
+ */
+static int lrng_drng_get(struct lrng_drng *drng, u8 *outbuf, u32 outbuflen)
+{
+ unsigned long flags = 0;
+ u32 processed = 0;
+
+ if (!outbuf || !outbuflen)
+ return 0;
+
+ outbuflen = min_t(size_t, outbuflen, INT_MAX);
+
+ lrng_drngs_init_cc20();
+
+ while (outbuflen) {
+ u32 todo = min_t(u32, outbuflen, LRNG_DRNG_MAX_REQSIZE);
+ int ret;
+
+ /* All but the atomic DRNG are seeded during generation */
+ if (atomic_dec_and_test(&drng->requests) ||
+ drng->force_reseed ||
+ time_after(jiffies, drng->last_seeded +
+ lrng_drng_reseed_max_time * HZ)) {
+ if (likely(drng != &lrng_drng_atomic)) {
+ if (lrng_pool_trylock())
+ atomic_set(&drng->requests, 1);
+ else
+ lrng_drng_seed(drng);
+ }
+ }
+
+ lrng_drng_lock(drng, &flags);
+ ret = drng->crypto_cb->lrng_drng_generate_helper(
+ drng->drng, outbuf + processed, todo);
+ lrng_drng_unlock(drng, &flags);
+ if (ret <= 0) {
+ pr_warn("getting random data from DRNG failed (%d)\n",
+ ret);
+ return -EFAULT;
+ }
+ processed += ret;
+ outbuflen -= ret;
+ }
+
+ return processed;
+}
+
+int lrng_drng_get_atomic(u8 *outbuf, u32 outbuflen)
+{
+ return lrng_drng_get(&lrng_drng_atomic, outbuf, outbuflen);
+}
+
+int lrng_drng_get_sleep(u8 *outbuf, u32 outbuflen)
+{
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ struct lrng_drng *drng = &lrng_drng_init;
+ int node = numa_node_id();
+
+ might_sleep();
+
+ if (lrng_drng && lrng_drng[node] && lrng_drng[node]->fully_seeded)
+ drng = lrng_drng[node];
+
+ return lrng_drng_get(drng, outbuf, outbuflen);
+}
+
+/* Initialize the default DRNG during boot */
+void lrng_drngs_init_cc20(void)
+{
+ unsigned long flags = 0;
+
+ if (lrng_get_available())
+ return;
+
+ lrng_drng_lock(&lrng_drng_init, &flags);
+ if (lrng_get_available()) {
+ lrng_drng_unlock(&lrng_drng_init, &flags);
+ return;
+ }
+
+ lrng_drng_reset(&lrng_drng_init);
+ lrng_cc20_init_state(&chacha20);
+ lrng_state_init_seed_work();
+ lrng_drng_unlock(&lrng_drng_init, &flags);
+
+ lrng_drng_lock(&lrng_drng_atomic, &flags);
+ lrng_drng_reset(&lrng_drng_atomic);
+ /*
+ * We do not initialize the state of the atomic DRNG as it is identical
+ * to the DRNG at this point.
+ */
+ lrng_drng_unlock(&lrng_drng_atomic, &flags);
+
+ lrng_set_available();
+}
+
+/* Reset LRNG such that all existing entropy is gone */
+static void _lrng_reset(struct work_struct *work)
+{
+ struct lrng_drng **lrng_drng = lrng_drng_instances();
+ unsigned long flags = 0;
+
+ if (!lrng_drng) {
+ lrng_drng_lock(&lrng_drng_init, &flags);
+ lrng_drng_reset(&lrng_drng_init);
+ lrng_drng_unlock(&lrng_drng_init, &flags);
+ } else {
+ u32 node;
+
+ for_each_online_node(node) {
+ struct lrng_drng *drng = lrng_drng[node];
+
+ if (!drng)
+ continue;
+ lrng_drng_lock(drng, &flags);
+ lrng_drng_reset(drng);
+ lrng_drng_unlock(drng, &flags);
+ }
+ }
+ lrng_set_entropy_thresh(LRNG_INIT_ENTROPY_BITS +
+ LRNG_CONDITIONING_ENTROPY_LOSS);
+
+ lrng_reset_state();
+}
+
+static DECLARE_WORK(lrng_reset_work, _lrng_reset);
+
+void lrng_reset(void)
+{
+ schedule_work(&lrng_reset_work);
+}
+
+/***************************** Initialize LRNG *******************************/
+
+static int __init lrng_init(void)
+{
+ lrng_drngs_init_cc20();
+
+ lrng_drngs_numa_alloc();
+ return 0;
+}
+
+late_initcall(lrng_init);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("Linux Random Number Generator");
new file mode 100644
@@ -0,0 +1,623 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG User and kernel space interfaces
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kthread.h>
+#include <linux/poll.h>
+#include <linux/preempt.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/timex.h>
+
+#include "lrng_internal.h"
+
+/*
+ * If the entropy count falls under this number of bits, then we
+ * should wake up processes which are selecting or polling on write
+ * access to /dev/random.
+ */
+u32 lrng_write_wakeup_bits = LRNG_WRITE_WAKEUP_ENTROPY;
+
+static LIST_HEAD(lrng_ready_list);
+static DEFINE_SPINLOCK(lrng_ready_list_lock);
+
+static DECLARE_WAIT_QUEUE_HEAD(lrng_write_wait);
+static DECLARE_WAIT_QUEUE_HEAD(lrng_init_wait);
+static struct fasync_struct *fasync;
+
+struct ctl_table random_table[];
+/********************************** Helper ***********************************/
+
+/* Is the DRNG seed level too low? */
+static inline bool lrng_need_entropy(void)
+{
+ return (lrng_avail_entropy() < lrng_write_wakeup_bits);
+}
+
+void lrng_writer_wakeup(void)
+{
+ if (lrng_need_entropy() && wq_has_sleeper(&lrng_write_wait)) {
+ wake_up_interruptible(&lrng_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
+}
+
+void lrng_init_wakeup(void)
+{
+ wake_up_all(&lrng_init_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+}
+
+/**
+ * Ping all kernel internal callers waiting until the DRNG is at least minimally
+ * seeded to inform that the DRNG reached that seed level.
+ *
+ * When the SP800-90B testing is enabled, the ping only happens if the SP800-90B
+ * startup health tests are completed. This implies that kernel internal
+ * callers always have an SP800-90B compliant noise source when being
+ * pinged.
+ */
+void lrng_process_ready_list(void)
+{
+ unsigned long flags;
+ struct random_ready_callback *rdy, *tmp;
+
+ if (!lrng_sp80090b_startup_complete())
+ return;
+
+ spin_lock_irqsave(&lrng_ready_list_lock, flags);
+ list_for_each_entry_safe(rdy, tmp, &lrng_ready_list, list) {
+ struct module *owner = rdy->owner;
+
+ list_del_init(&rdy->list);
+ rdy->func(rdy);
+ module_put(owner);
+ }
+ spin_unlock_irqrestore(&lrng_ready_list_lock, flags);
+}
+
+void lrng_debug_report_seedlevel(const char *name)
+{
+#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ static void *previous = NULL;
+ void *caller = (void *) _RET_IP_;
+
+ if (READ_ONCE(previous) == caller)
+ return;
+
+ if (!lrng_state_min_seeded())
+ pr_notice("%pS %s called without reaching mimimally seeded "
+ "level (available entropy %u)\n", caller, name,
+ lrng_avail_entropy());
+
+ WRITE_ONCE(previous, caller);
+#endif
+}
+
+/************************ LRNG kernel input interfaces ************************/
+
+/**
+ * Interface for in-kernel drivers of true hardware RNGs.
+ * Those devices may produce endless random bits and will be throttled
+ * when our pool is full.
+ *
+ * @buffer: buffer holding the entropic data from HW noise sources to be used to
+ * insert into entropy pool.
+ * @count: length of buffer
+ * @entropy_bits: amount of entropy in buffer (value is in bits)
+ */
+void add_hwgenerator_randomness(const char *buffer, size_t count,
+ size_t entropy_bits)
+{
+ /*
+ * Suspend writing if we are fully loaded with entropy.
+ * We'll be woken up again once below lrng_write_wakeup_thresh,
+ * or when the calling thread is about to terminate.
+ */
+ wait_event_interruptible(lrng_write_wait,
+ lrng_need_entropy() ||
+ lrng_state_exseed_allow(lrng_noise_source_hw) ||
+ kthread_should_stop());
+ lrng_state_exseed_set(lrng_noise_source_hw, false);
+ lrng_pool_lfsr_nonaligned(buffer, count);
+ lrng_pool_add_entropy(entropy_bits);
+}
+EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
+
+/**
+ * Handle random seed passed by bootloader.
+ * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
+ * it would be regarded as device data.
+ * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
+ *
+ * @buf: buffer holding the entropic data from HW noise sources to be used to
+ * insert into entropy pool.
+ * @size: length of buffer
+ */
+void add_bootloader_randomness(const void *buf, unsigned int size)
+{
+ if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
+ add_hwgenerator_randomness(buf, size, size * 8);
+ else
+ add_device_randomness(buf, size);
+}
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
+
+/**
+ * Callback for HID layer -- use the HID event values to stir the entropy pool
+ */
+void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value)
+{
+ static unsigned char last_value;
+
+ /* ignore autorepeat and the like */
+ if (value == last_value)
+ return;
+
+ last_value = value;
+
+ lrng_pool_lfsr_u32((type << 4) ^ code ^ (code >> 4) ^ value);
+}
+EXPORT_SYMBOL_GPL(add_input_randomness);
+
+/**
+ * Add device- or boot-specific data to the entropy pool to help
+ * initialize it.
+ *
+ * None of this adds any entropy; it is meant to avoid the problem of
+ * the entropy pool having similar initial state across largely
+ * identical devices.
+ *
+ * @buf: buffer holding the entropic data from HW noise sources to be used to
+ * insert into entropy pool.
+ * @size: length of buffer
+ */
+void add_device_randomness(const void *buf, unsigned int size)
+{
+ lrng_pool_lfsr_nonaligned((u8 *)buf, size);
+ lrng_pool_lfsr_u32(random_get_entropy());
+ lrng_pool_lfsr_u32(jiffies);
+}
+EXPORT_SYMBOL(add_device_randomness);
+
+#ifdef CONFIG_BLOCK
+void rand_initialize_disk(struct gendisk *disk) { }
+void add_disk_randomness(struct gendisk *disk) { }
+EXPORT_SYMBOL(add_disk_randomness);
+#endif
+
+/**
+ * Delete a previously registered readiness callback function.
+ *
+ * @rdy: callback definition that was registered initially
+ */
+void del_random_ready_callback(struct random_ready_callback *rdy)
+{
+ unsigned long flags;
+ struct module *owner = NULL;
+
+ spin_lock_irqsave(&lrng_ready_list_lock, flags);
+ if (!list_empty(&rdy->list)) {
+ list_del_init(&rdy->list);
+ owner = rdy->owner;
+ }
+ spin_unlock_irqrestore(&lrng_ready_list_lock, flags);
+
+ module_put(owner);
+}
+EXPORT_SYMBOL(del_random_ready_callback);
+
+/**
+ * Add a callback function that will be invoked when the DRNG is mimimally
+ * seeded.
+ *
+ * @rdy: callback definition to be invoked when the LRNG is seeded
+ * @return: 0 if callback is successfully added
+ * -EALREADY if pool is already initialised (callback not called)
+ * -ENOENT if module for callback is not alive
+ */
+int add_random_ready_callback(struct random_ready_callback *rdy)
+{
+ struct module *owner;
+ unsigned long flags;
+ int err = -EALREADY;
+
+ if (likely(lrng_state_min_seeded()))
+ return err;
+
+ owner = rdy->owner;
+ if (!try_module_get(owner))
+ return -ENOENT;
+
+ spin_lock_irqsave(&lrng_ready_list_lock, flags);
+ if (lrng_state_min_seeded())
+ goto out;
+
+ owner = NULL;
+
+ list_add(&rdy->list, &lrng_ready_list);
+ err = 0;
+
+out:
+ spin_unlock_irqrestore(&lrng_ready_list_lock, flags);
+
+ module_put(owner);
+
+ return err;
+}
+EXPORT_SYMBOL(add_random_ready_callback);
+
+/*********************** LRNG kernel output interfaces ************************/
+
+/**
+ * Provider of cryptographic strong random numbers for kernel-internal usage.
+ * This function is appropriate for all in-kernel use cases. However,
+ * it will always use the ChaCha20 DRNG.
+ *
+ * @buf: buffer to store the random bytes
+ * @nbytes: size of the buffer
+ */
+void get_random_bytes(void *buf, int nbytes)
+{
+ lrng_drng_get_atomic((u8 *)buf, (u32)nbytes);
+ lrng_debug_report_seedlevel("get_random_bytes");
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+/**
+ * Provider of cryptographic strong random numbers for kernel-internal usage.
+ * This function is appropriate only for non-atomic use cases as this
+ * function may sleep. Though, it provides access to the full functionality
+ * of LRNG including the switchable DRNG support, that may support other
+ * DRNGs such as the SP800-90A DRBG.
+ *
+ * @buf: buffer to store the random bytes
+ * @nbytes: size of the buffer
+ */
+void get_random_bytes_full(void *buf, int nbytes)
+{
+ lrng_drng_get_sleep((u8 *)buf, (u32)nbytes);
+ lrng_debug_report_seedlevel("get_random_bytes_full");
+}
+EXPORT_SYMBOL(get_random_bytes_full);
+
+/**
+ * Wait for the LRNG to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
+ * family of functions. Using any of these functions without first calling
+ * this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the LRNG has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+ if (likely(lrng_state_min_seeded()))
+ return 0;
+ return wait_event_interruptible(lrng_init_wait,
+ lrng_state_min_seeded());
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
+
+/**
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available. The arch-specific hw RNG will
+ * almost certainly be faster than what we can do in software, but it
+ * is impossible to verify that it is implemented securely (as
+ * opposed, to, say, the AES encryption of a sequence number using a
+ * key known by the NSA). So it's useful if we need the speed, but
+ * only if we're willing to trust the hardware manufacturer not to
+ * have put in a back door.
+ *
+ * @buf: buffer allocated by caller to store the random data in
+ * @nbytes: length of outbuf
+ *
+ * Return number of bytes filled in.
+ */
+int __must_check get_random_bytes_arch(void *buf, int nbytes)
+{
+ u8 *p = buf;
+
+ while (nbytes) {
+ unsigned long v;
+ int chunk = min_t(int, nbytes, sizeof(unsigned long));
+
+ if (!arch_get_random_long(&v))
+ break;
+
+ memcpy(p, &v, chunk);
+ p += chunk;
+ nbytes -= chunk;
+ }
+
+ if (nbytes)
+ lrng_drng_get_atomic((u8 *)p, (u32)nbytes);
+
+ return nbytes;
+}
+EXPORT_SYMBOL(get_random_bytes_arch);
+
+/************************ LRNG user output interfaces *************************/
+
+static ssize_t lrng_read_common(char __user *buf, size_t nbytes)
+{
+ ssize_t ret = 0;
+ u8 tmpbuf[LRNG_DRNG_BLOCKSIZE] __aligned(LRNG_KCAPI_ALIGN);
+ u8 *tmp_large = NULL, *tmp = tmpbuf;
+ u32 tmplen = sizeof(tmpbuf);
+
+ if (nbytes == 0)
+ return 0;
+
+ /*
+ * Satisfy large read requests -- as the common case are smaller
+ * request sizes, such as 16 or 32 bytes, avoid a kmalloc overhead for
+ * those by using the stack variable of tmpbuf.
+ */
+ if (!IS_ENABLED(CONFIG_BASE_SMALL) && (nbytes > sizeof(tmpbuf))) {
+ tmplen = min_t(u32, nbytes, LRNG_DRNG_MAX_REQSIZE);
+ tmp_large = kmalloc(tmplen + LRNG_KCAPI_ALIGN, GFP_KERNEL);
+ if (!tmp_large)
+ tmplen = sizeof(tmpbuf);
+ else
+ tmp = PTR_ALIGN(tmp_large, LRNG_KCAPI_ALIGN);
+ }
+
+ while (nbytes) {
+ u32 todo = min_t(u32, nbytes, tmplen);
+ int rc = 0;
+
+ /* Reschedule if we received a large request. */
+ if ((tmp_large) && need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ }
+
+ rc = lrng_drng_get_sleep(tmp, todo);
+ if (rc <= 0) {
+ if (rc < 0)
+ ret = rc;
+ break;
+ }
+ if (copy_to_user(buf, tmp, rc)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= rc;
+ buf += rc;
+ ret += rc;
+ }
+
+ /* Wipe data just returned from memory */
+ if (tmp_large)
+ kzfree(tmp_large);
+ else
+ memzero_explicit(tmpbuf, sizeof(tmpbuf));
+
+ return ret;
+}
+
+static ssize_t
+lrng_read_common_block(int nonblock, char __user *buf, size_t nbytes)
+{
+ if (nbytes == 0)
+ return 0;
+
+ if (unlikely(!lrng_state_operational())) {
+ int ret;
+
+ if (nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(lrng_init_wait,
+ lrng_state_operational());
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return lrng_read_common(buf, nbytes);
+}
+
+static ssize_t lrng_drng_read_block(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return lrng_read_common_block(file->f_flags & O_NONBLOCK, buf, nbytes);
+}
+
+static unsigned int lrng_random_poll(struct file *file, poll_table *wait)
+{
+ __poll_t mask;
+
+ poll_wait(file, &lrng_init_wait, wait);
+ poll_wait(file, &lrng_write_wait, wait);
+ mask = 0;
+ if (lrng_state_operational())
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (lrng_need_entropy() ||
+ lrng_state_exseed_allow(lrng_noise_source_user))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+ return mask;
+}
+
+static ssize_t lrng_drng_write_common(const char __user *buffer, size_t count,
+ u32 entropy_bits)
+{
+ ssize_t ret = 0;
+ u8 buf[64] __aligned(LRNG_KCAPI_ALIGN);
+ const char __user *p = buffer;
+ u32 orig_entropy_bits = entropy_bits;
+
+ if (!lrng_get_available())
+ return -EAGAIN;
+
+ count = min_t(size_t, count, INT_MAX);
+ while (count > 0) {
+ size_t bytes = min_t(size_t, count, sizeof(buf));
+ u32 ent = min_t(u32, bytes<<3, entropy_bits);
+
+ if (copy_from_user(&buf, p, bytes))
+ return -EFAULT;
+ /* Inject data into entropy pool */
+ lrng_pool_lfsr(buf, bytes);
+ lrng_pool_add_entropy(ent);
+
+ count -= bytes;
+ p += bytes;
+ ret += bytes;
+ entropy_bits -= ent;
+
+ cond_resched();
+ }
+
+ /* Force reseed of DRNG during next data request. */
+ if (!orig_entropy_bits)
+ lrng_drng_force_reseed();
+
+ return ret;
+}
+
+static ssize_t lrng_drng_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ if (!lrng_state_min_seeded())
+ pr_notice_ratelimited("%s - use of insufficiently seeded DRNG "
+ "(%zu bytes read)\n", current->comm,
+ nbytes);
+ else if (!lrng_state_operational())
+ pr_debug_ratelimited("%s - use of not fully seeded DRNG (%zu "
+ "bytes read)\n", current->comm, nbytes);
+
+ return lrng_read_common(buf, nbytes);
+}
+
+static ssize_t lrng_drng_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ return lrng_drng_write_common(buffer, count, 0);
+}
+
+static long lrng_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int size, ent_count_bits;
+ int __user *p = (int __user *)arg;
+
+ switch (cmd) {
+ case RNDGETENTCNT:
+ ent_count_bits = lrng_avail_entropy();
+ if (put_user(ent_count_bits, p))
+ return -EFAULT;
+ return 0;
+ case RNDADDTOENTCNT:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count_bits, p))
+ return -EFAULT;
+ ent_count_bits = (int)lrng_avail_entropy() + ent_count_bits;
+ if (ent_count_bits < 0)
+ ent_count_bits = 0;
+ if (ent_count_bits > LRNG_POOL_SIZE_BITS)
+ ent_count_bits = LRNG_POOL_SIZE_BITS;
+ lrng_pool_set_entropy(ent_count_bits);
+ return 0;
+ case RNDADDENTROPY:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count_bits, p++))
+ return -EFAULT;
+ if (ent_count_bits < 0)
+ return -EINVAL;
+ if (get_user(size, p++))
+ return -EFAULT;
+ if (size < 0)
+ return -EINVAL;
+ lrng_state_exseed_set(lrng_noise_source_user, false);
+ /* there cannot be more entropy than data */
+ ent_count_bits = min(ent_count_bits, size<<3);
+ return lrng_drng_write_common((const char __user *)p, size,
+ ent_count_bits);
+ case RNDZAPENTCNT:
+ case RNDCLEARPOOL:
+ /* Clear the entropy pool counter. */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ lrng_pool_set_entropy(0);
+ return 0;
+ case RNDRESEEDCRNG:
+ /*
+ * We leave the capability check here since it is present
+ * in the upstream's RNG implementation. Yet, user space
+ * can trigger a reseed as easy as writing into /dev/random
+ * or /dev/urandom where no privilege is needed.
+ */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* Force a reseed of all DRNGs */
+ lrng_drng_force_reseed();
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lrng_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &fasync);
+}
+
+const struct file_operations random_fops = {
+ .read = lrng_drng_read_block,
+ .write = lrng_drng_write,
+ .poll = lrng_random_poll,
+ .unlocked_ioctl = lrng_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .fasync = lrng_fasync,
+ .llseek = noop_llseek,
+};
+
+const struct file_operations urandom_fops = {
+ .read = lrng_drng_read,
+ .write = lrng_drng_write,
+ .unlocked_ioctl = lrng_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .fasync = lrng_fasync,
+ .llseek = noop_llseek,
+};
+
+SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
+ unsigned int, flags)
+{
+ if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
+ return -EINVAL;
+
+ /*
+ * Requesting insecure and blocking randomness at the same time makes
+ * no sense.
+ */
+ if ((flags &
+ (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
+ return -EINVAL;
+
+ if (count > INT_MAX)
+ count = INT_MAX;
+
+ if (flags & GRND_INSECURE)
+ return lrng_drng_read(NULL, buf, count, NULL);
+
+ return lrng_read_common_block(flags & GRND_NONBLOCK, buf, count);
+}
new file mode 100644
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2018 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_INTERNAL_H
+#define _LRNG_INTERNAL_H
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+/*************************** General LRNG parameter ***************************/
+
+/* Entropy pool parameter
+ *
+ * The LRNG_POOL_SIZE cannot be smaller than 64 bytes as the SHA-1 operation
+ * in lrng_chacha20.c requires multiples of 64 bytes
+ */
+#define LRNG_POOL_SIZE (16 << CONFIG_LRNG_POOL_SIZE)
+#define LRNG_POOL_WORD_BYTES (4) /* (sizeof(atomic_t)) */
+#define LRNG_POOL_SIZE_BYTES (LRNG_POOL_SIZE * LRNG_POOL_WORD_BYTES)
+#define LRNG_POOL_SIZE_BITS (LRNG_POOL_SIZE_BYTES * 8)
+#define LRNG_POOL_WORD_BITS (LRNG_POOL_WORD_BYTES * 8)
+
+/* Security strength of LRNG -- this must match DRNG security strength */
+#define LRNG_DRNG_SECURITY_STRENGTH_BYTES 32
+#define LRNG_DRNG_SECURITY_STRENGTH_BITS (LRNG_DRNG_SECURITY_STRENGTH_BYTES * 8)
+#define LRNG_DRNG_BLOCKSIZE 64 /* Maximum of DRNG block sizes */
+
+/*
+ * SP800-90A defines a maximum request size of 1<<16 bytes. The given value is
+ * considered a safer margin.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_DRNG_MAX_REQSIZE (1<<12)
+
+/*
+ * SP800-90A defines a maximum number of requests between reseeds of 2^48.
+ * The given value is considered a much safer margin, balancing requests for
+ * frequent reseeds with the need to conserve entropy. This value MUST NOT be
+ * larger than INT_MAX because it is used in an atomic_t.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_DRNG_RESEED_THRESH (1<<20)
+
+/*
+ * Number of interrupts to be recorded to assume that DRNG security strength
+ * bits of entropy are received.
+ * Note: a value below the DRNG security strength should not be defined as this
+ * may imply the DRNG can never be fully seeded in case other noise
+ * sources are unavailable.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_IRQ_ENTROPY_BITS LRNG_DRNG_SECURITY_STRENGTH_BITS
+
+/*
+ * Min required seed entropy is 128 bits covering the minimum entropy
+ * requirement of SP800-131A and the German BSI's TR02102.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_FULL_SEED_ENTROPY_BITS LRNG_DRNG_SECURITY_STRENGTH_BITS
+#define LRNG_MIN_SEED_ENTROPY_BITS 128
+#define LRNG_INIT_ENTROPY_BITS 32
+
+/*
+ * Amount of entropy that is lost with the conditioning functions of LFSR and
+ * hash_df as shown with the entropy analysis compliant to SP800-90B.
+ */
+#define LRNG_CONDITIONING_ENTROPY_LOSS 1
+
+/*
+ * Wakeup value
+ *
+ * This value is allowed to be changed.
+ */
+#if (LRNG_POOL_SIZE_BITS <= (LRNG_DRNG_SECURITY_STRENGTH_BITS * 2))
+# define LRNG_WRITE_WAKEUP_ENTROPY (LRNG_DRNG_SECURITY_STRENGTH_BITS + \
+ LRNG_CONDITIONING_ENTROPY_LOSS)
+#else
+# define LRNG_WRITE_WAKEUP_ENTROPY (LRNG_DRNG_SECURITY_STRENGTH_BITS * 2)
+#endif
+
+/*
+ * Oversampling factor of IRQ events to obtain
+ * LRNG_DRNG_SECURITY_STRENGTH_BYTES. This factor is used when a
+ * high-resolution time stamp is not available. In this case, jiffies and
+ * register contents are used to fill the entropy pool. These noise sources
+ * are much less entropic than the high-resolution timer. The entropy content
+ * is the entropy content assumed with LRNG_IRQ_ENTROPY_BITS divided by
+ * LRNG_IRQ_OVERSAMPLING_FACTOR.
+ *
+ * This value is allowed to be changed.
+ */
+#define LRNG_IRQ_OVERSAMPLING_FACTOR 10
+
+/*
+ * Alignmask which should cover all cipher implementations
+ * WARNING: If this is changed to a value larger than 8, manual
+ * alignment is necessary as older versions of GCC may not be capable
+ * of aligning stack variables at boundaries greater than 8.
+ * In this case, PTR_ALIGN must be used.
+ */
+#define LRNG_KCAPI_ALIGN 8
+
+/************************ Default DRNG implementation *************************/
+
+extern struct chacha20_state chacha20;
+extern const struct lrng_crypto_cb lrng_cc20_crypto_cb;
+void lrng_cc20_init_state(struct chacha20_state *state);
+
+/********************************** /proc *************************************/
+
+static inline void lrng_pool_inc_numa_node(void) { }
+
+/****************************** LRNG interfaces *******************************/
+
+extern u32 lrng_write_wakeup_bits;
+extern int lrng_drng_reseed_max_time;
+
+void lrng_writer_wakeup(void);
+void lrng_init_wakeup(void);
+void lrng_debug_report_seedlevel(const char *name);
+void lrng_process_ready_list(void);
+
+/************************** Entropy pool management ***************************/
+
+enum lrng_external_noise_source {
+ lrng_noise_source_hw,
+ lrng_noise_source_user
+};
+
+bool lrng_state_exseed_allow(enum lrng_external_noise_source source);
+void lrng_state_exseed_set(enum lrng_external_noise_source source, bool type);
+void lrng_state_init_seed_work(void);
+u32 lrng_avail_entropy(void);
+void lrng_set_entropy_thresh(u32 new);
+int lrng_pool_trylock(void);
+void lrng_pool_unlock(void);
+void lrng_reset_state(void);
+void lrng_pool_all_numa_nodes_seeded(void);
+bool lrng_state_min_seeded(void);
+bool lrng_state_fully_seeded(void);
+bool lrng_state_operational(void);
+bool lrng_pool_highres_timer(void);
+void lrng_pool_set_entropy(u32 entropy_bits);
+void lrng_pool_lfsr(const u8 *buf, u32 buflen);
+void lrng_pool_lfsr_nonaligned(const u8 *buf, u32 buflen);
+void lrng_pool_lfsr_u32(u32 value);
+void lrng_pool_add_irq(u32 irq_num);
+void lrng_pool_add_entropy(u32 entropy_bits);
+
+struct entropy_buf {
+ u8 a[LRNG_DRNG_SECURITY_STRENGTH_BYTES];
+ u8 b[LRNG_DRNG_SECURITY_STRENGTH_BYTES];
+ u8 c[LRNG_DRNG_SECURITY_STRENGTH_BYTES];
+ u32 now;
+};
+
+int lrng_fill_seed_buffer(const struct lrng_crypto_cb *crypto_cb, void *hash,
+ struct entropy_buf *entropy_buf, u32 entropy_retain);
+void lrng_init_ops(u32 seed_bits);
+
+/************************** Jitter RNG Noise Source ***************************/
+
+#ifdef CONFIG_LRNG_JENT
+u32 lrng_get_jent(u8 *outbuf, unsigned int outbuflen);
+u32 lrng_jent_entropylevel(void);
+#else /* CONFIG_CRYPTO_JITTERENTROPY */
+static inline u32 lrng_get_jent(u8 *outbuf, unsigned int outbuflen) {return 0; }
+static inline u32 lrng_jent_entropylevel(void) { return 0; }
+#endif /* CONFIG_CRYPTO_JITTERENTROPY */
+
+/*************************** CPU-based Noise Source ***************************/
+
+u32 lrng_get_arch(u8 *outbuf);
+u32 lrng_slow_noise_req_entropy(u32 required_entropy_bits);
+
+/****************************** DRNG processing *******************************/
+
+/* Secondary DRNG state handle */
+struct lrng_drng {
+ void *drng; /* DRNG handle */
+ void *hash; /* Hash handle */
+ const struct lrng_crypto_cb *crypto_cb; /* Crypto callbacks */
+ atomic_t requests; /* Number of DRNG requests */
+ unsigned long last_seeded; /* Last time it was seeded */
+ bool fully_seeded; /* Is DRNG fully seeded? */
+ bool force_reseed; /* Force a reseed */
+ struct mutex lock;
+ spinlock_t spin_lock;
+};
+
+extern struct mutex lrng_crypto_cb_update;
+
+struct lrng_drng *lrng_drng_init_instance(void);
+struct lrng_drng *lrng_drng_atomic_instance(void);
+
+static __always_inline bool lrng_drng_is_atomic(struct lrng_drng *drng)
+{
+ return (drng->drng == lrng_drng_atomic_instance()->drng);
+}
+
+/* Lock the DRNG */
+static __always_inline void lrng_drng_lock(struct lrng_drng *drng,
+ unsigned long *flags)
+{
+ /* Use spin lock in case the atomic DRNG context is used */
+ if (lrng_drng_is_atomic(drng)) {
+ spin_lock_irqsave(&drng->spin_lock, *flags);
+
+ /*
+ * In case a lock transition happened while we were spinning,
+ * catch this case and use the new lock type.
+ */
+ if (unlikely(!lrng_drng_is_atomic(drng))) {
+ spin_unlock_irqrestore(&drng->spin_lock, *flags);
+ mutex_lock(&drng->lock);
+ }
+ } else {
+ mutex_lock(&drng->lock);
+ }
+}
+
+/* Unlock the DRNG */
+static __always_inline void lrng_drng_unlock(struct lrng_drng *drng,
+ unsigned long *flags)
+{
+ if (lrng_drng_is_atomic(drng))
+ spin_unlock_irqrestore(&drng->spin_lock, *flags);
+ else
+ mutex_unlock(&drng->lock);
+}
+
+bool lrng_get_available(void);
+void lrng_set_available(void);
+void lrng_drngs_init_cc20(void);
+void lrng_drng_reset(struct lrng_drng *drng);
+int lrng_drng_get_atomic(u8 *outbuf, u32 outbuflen);
+int lrng_drng_get_sleep(u8 *outbuf, u32 outbuflen);
+void lrng_drng_force_reseed(void);
+void lrng_drng_seed_work(struct work_struct *dummy);
+
+static inline struct lrng_drng **lrng_drng_instances(void) { return NULL; }
+static inline void lrng_drngs_numa_alloc(void) { return; }
+
+/************************** Health Test linking code **************************/
+
+enum lrng_health_res {
+ lrng_health_pass, /* Health test passes on time stamp */
+ lrng_health_fail_use, /* Time stamp unhealthy, but mix in */
+ lrng_health_fail_drop /* Time stamp unhealthy, drop it */
+};
+
+#ifdef CONFIG_LRNG_HEALTH_TESTS
+bool lrng_sp80090b_startup_complete(void);
+bool lrng_sp80090b_compliant(void);
+
+enum lrng_health_res lrng_health_test(u32 now_time);
+void lrng_health_disable(void);
+
+void lrng_reset(void);
+#else /* CONFIG_LRNG_HEALTH_TESTS */
+static inline bool lrng_sp80090b_startup_complete(void) { return true; }
+static inline bool lrng_sp80090b_compliant(void) { return false; }
+
+static inline enum lrng_health_res
+lrng_health_test(u32 now_time) { return lrng_health_pass; }
+static inline void lrng_health_disable(void) { }
+#endif /* CONFIG_LRNG_HEALTH_TESTS */
+
+/****************************** Helper code ***********************************/
+
+static inline u32 atomic_read_u32(atomic_t *v)
+{
+ return (u32)atomic_read(v);
+}
+
+/*************************** Auxiliary functions ******************************/
+
+void invalidate_batched_entropy(void);
+
+/***************************** Testing code ***********************************/
+
+#ifdef CONFIG_LRNG_TESTING
+bool lrng_raw_entropy_store(u32 value);
+#else /* CONFIG_LRNG_TESTING */
+static inline bool lrng_raw_entropy_store(u32 value) { return false; }
+#endif /* CONFIG_LRNG_TESTING */
+
+#endif /* _LRNG_INTERNAL_H */
new file mode 100644
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * LRNG Linear Feedback Shift Register
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+/* Status information about IRQ noise source */
+struct lrng_irq_info {
+ atomic_t num_events; /* Number of healthy IRQs since last read */
+ atomic_t num_events_thresh; /* Reseed threshold */
+ atomic_t reseed_in_progress; /* Flag for on executing reseed */
+ bool irq_highres_timer; /* Is high-resolution timer available? */
+ u32 irq_entropy_bits; /* LRNG_IRQ_ENTROPY_BITS? */
+};
+
+/*
+ * This is the entropy pool used by the slow noise source. Its size should
+ * be at least as large as LRNG_DRNG_SECURITY_STRENGTH_BITS.
+ *
+ * The pool array is aligned to 8 bytes to comfort the kernel crypto API cipher
+ * implementations of the hash functions used to read the pool: for some
+ * accelerated implementations, we need an alignment to avoid a realignment
+ * which involves memcpy(). The alignment to 8 bytes should satisfy all crypto
+ * implementations.
+ *
+ * LRNG_POOL_SIZE is allowed to be changed only if the taps of the polynomial
+ * used for the LFSR are changed as well. The size must be in powers of 2 due
+ * to the mask handling in lrng_pool_lfsr_u32 which uses AND instead of modulo.
+ */
+struct lrng_pool {
+ union {
+ struct {
+ /*
+ * hash_df implementation: counter, requested_bits and
+ * pool form a linear buffer that is used in the
+ * hash_df function specified in SP800-90A section
+ * 10.3.1
+ */
+ unsigned char counter;
+ __be32 requested_bits;
+
+ /* Pool */
+ atomic_t pool[LRNG_POOL_SIZE];
+ /* Ptr into pool for next IRQ word injection */
+ atomic_t pool_ptr;
+ /* rotate for LFSR */
+ atomic_t input_rotate;
+ /* All NUMA DRNGs seeded? */
+ bool all_online_numa_node_seeded;
+ /* IRQ noise source status info */
+ struct lrng_irq_info irq_info;
+ /* Serialize read of entropy pool */
+ spinlock_t lock;
+ };
+ /*
+ * Static SHA-1 implementation in lrng_cc20_hash_buffer
+ * processes data 64-byte-wise. Hence, ensure proper size
+ * of LRNG entropy pool data structure.
+ */
+ u8 hash_input_buf[LRNG_POOL_SIZE_BYTES + 64];
+ };
+};
+
+/*
+ * Implement a (modified) twisted Generalized Feedback Shift Register. (See M.
+ * Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM Transactions on
+ * Modeling and Computer Simulation 2(3):179-194. Also see M. Matsumoto & Y.
+ * Kurita, 1994. Twisted GFSR generators II. ACM Transactions on Modeling and
+ * Computer Simulation 4:254-266).
+ */
+static u32 const lrng_twist_table[8] = {
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+
+/*
+ * The polynomials for the LFSR are taken from the document "Table of Linear
+ * Feedback Shift Registers" by Roy Ward, Tim Molteno, October 26, 2007.
+ * The first polynomial is from "Primitive Binary Polynomials" by Wayne
+ * Stahnke (1973) and is primitive as well as irreducible.
+ *
+ * Note, the tap values are smaller by one compared to the documentation because
+ * they are used as an index into an array where the index starts by zero.
+ *
+ * All polynomials were also checked to be primitive and irreducible with magma
+ * which ensures that the key property of the LFSR providing a compression
+ * function for entropy is guaranteed.
+ */
+static u32 const lrng_lfsr_polynomial[][4] = {
+ { 15, 13, 12, 10 }, /* 16 words */
+ { 31, 29, 25, 24 }, /* 32 words */
+ { 63, 62, 60, 59 }, /* 64 words */
+ { 127, 28, 26, 1 }, /* 128 words by Stahnke */
+ { 255, 253, 250, 245 }, /* 256 words */
+ { 511, 509, 506, 503 }, /* 512 words */
+ { 1023, 1014, 1001, 1000 }, /* 1024 words */
+ { 2047, 2034, 2033, 2028 }, /* 2048 words */
+ { 4095, 4094, 4080, 4068 }, /* 4096 words */
+};
+
+static inline void _lrng_pool_lfsr_u32(struct lrng_pool *pool, u32 value)
+{
+ /*
+ * Process the LFSR by altering not adjacent words but rather
+ * more spaced apart words. Using a prime number ensures that all words
+ * are processed evenly. As some the LFSR polynomials taps are close
+ * together, processing adjacent words with the LSFR taps may be
+ * inappropriate as the data just mixed-in at these taps may be not
+ * independent from the current data to be mixed in.
+ */
+ u32 ptr = (u32)atomic_add_return_relaxed(67, &pool->pool_ptr) &
+ (LRNG_POOL_SIZE - 1);
+ /*
+ * Add 7 bits of rotation to the pool. At the beginning of the
+ * pool, add an extra 7 bits rotation, so that successive passes
+ * spread the input bits across the pool evenly.
+ *
+ * Note, there is a race between getting ptr and calculating
+ * input_rotate when ptr is is obtained on two or more CPUs at the
+ * same time. This race is irrelevant as it may only come into effect
+ * if 3 or more CPUs race at the same time which is very unlikely. If
+ * the race happens, it applies to one event only. As this rolling
+ * supports the LFSR without being strictly needed, we accept this
+ * race.
+ */
+ u32 input_rotate = (u32)atomic_add_return_relaxed((ptr ? 7 : 14),
+ &pool->input_rotate) & 31;
+ u32 word = rol32(value, input_rotate);
+
+ BUILD_BUG_ON(LRNG_POOL_WORD_BYTES != sizeof(pool->pool[0]));
+ BUILD_BUG_ON(LRNG_POOL_SIZE - 1 !=
+ lrng_lfsr_polynomial[CONFIG_LRNG_POOL_SIZE][0]);
+ word ^= atomic_read_u32(&pool->pool[ptr]);
+ word ^= atomic_read_u32(&pool->pool[
+ (ptr + lrng_lfsr_polynomial[CONFIG_LRNG_POOL_SIZE][0]) &
+ (LRNG_POOL_SIZE - 1)]);
+ word ^= atomic_read_u32(&pool->pool[
+ (ptr + lrng_lfsr_polynomial[CONFIG_LRNG_POOL_SIZE][1]) &
+ (LRNG_POOL_SIZE - 1)]);
+ word ^= atomic_read_u32(&pool->pool[
+ (ptr + lrng_lfsr_polynomial[CONFIG_LRNG_POOL_SIZE][2]) &
+ (LRNG_POOL_SIZE - 1)]);
+ word ^= atomic_read_u32(&pool->pool[
+ (ptr + lrng_lfsr_polynomial[CONFIG_LRNG_POOL_SIZE][3]) &
+ (LRNG_POOL_SIZE - 1)]);
+
+ word = (word >> 3) ^ lrng_twist_table[word & 7];
+ atomic_set(&pool->pool[ptr], word);
+}
+
+u32 __lrng_pool_hash_df(const struct lrng_crypto_cb *crypto_cb, void *hash,
+ struct lrng_pool *pool, u8 *outbuf, u32 requested_bits);
new file mode 100644
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Entropy pool management
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/irq_regs.h>
+#include <linux/lrng.h>
+#include <linux/percpu.h>
+#include <linux/random.h>
+#include <linux/utsname.h>
+#include <linux/workqueue.h>
+
+#include "lrng_internal.h"
+#include "lrng_lfsr.h"
+
+struct lrng_state {
+ bool lrng_operational; /* Is DRNG operational? */
+ bool lrng_fully_seeded; /* Is DRNG fully seeded? */
+ bool lrng_min_seeded; /* Is DRNG minimally seeded? */
+
+ /*
+ * To ensure that external entropy providers cannot dominate the
+ * internal noise sources but yet cannot be dominated by internal
+ * noise sources, the following booleans are intended to allow
+ * external to provide seed once when a DRNG reseed occurs. This
+ * triggering of external noise source is performed even when the
+ * entropy pool has sufficient entropy.
+ */
+ bool lrng_seed_hw; /* Allow HW to provide seed */
+ bool lrng_seed_user; /* Allow user space to provide seed */
+
+ struct work_struct lrng_seed_work; /* (re)seed work queue */
+};
+
+static struct lrng_pool lrng_pool __aligned(LRNG_KCAPI_ALIGN) = {
+ .irq_info = {
+ .irq_entropy_bits = LRNG_IRQ_ENTROPY_BITS,
+ .num_events_thresh = ATOMIC_INIT(LRNG_INIT_ENTROPY_BITS +
+ LRNG_CONDITIONING_ENTROPY_LOSS),
+ /* Sample IRQ pointer data at least during boot */
+ .irq_highres_timer = false },
+ .lock = __SPIN_LOCK_UNLOCKED(lrng_pool.lock)
+};
+
+static struct lrng_state lrng_state = { false, false, false, true, true };
+
+/********************************** Helper ***********************************/
+
+/* External entropy provider is allowed to provide seed data */
+bool lrng_state_exseed_allow(enum lrng_external_noise_source source)
+{
+ if (source == lrng_noise_source_hw)
+ return lrng_state.lrng_seed_hw;
+ return lrng_state.lrng_seed_user;
+}
+
+/* Enable / disable external entropy provider to furnish seed */
+void lrng_state_exseed_set(enum lrng_external_noise_source source, bool type)
+{
+ if (source == lrng_noise_source_hw)
+ lrng_state.lrng_seed_hw = type;
+ else
+ lrng_state.lrng_seed_user = type;
+}
+
+static inline void lrng_state_exseed_allow_all(void)
+{
+ lrng_state_exseed_set(lrng_noise_source_hw, true);
+ lrng_state_exseed_set(lrng_noise_source_user, true);
+}
+
+void lrng_state_init_seed_work(void)
+{
+ INIT_WORK(&lrng_state.lrng_seed_work, lrng_drng_seed_work);
+}
+
+static inline u32 lrng_entropy_to_data(u32 entropy_bits)
+{
+ return ((entropy_bits * lrng_pool.irq_info.irq_entropy_bits) /
+ LRNG_DRNG_SECURITY_STRENGTH_BITS);
+}
+
+static inline u32 lrng_data_to_entropy(u32 irqnum)
+{
+ return ((irqnum * LRNG_DRNG_SECURITY_STRENGTH_BITS) /
+ lrng_pool.irq_info.irq_entropy_bits);
+}
+
+u32 lrng_avail_entropy(void)
+{
+ return min_t(u32, LRNG_POOL_SIZE_BITS, lrng_data_to_entropy(
+ atomic_read_u32(&lrng_pool.irq_info.num_events)));
+}
+
+void lrng_set_entropy_thresh(u32 new)
+{
+ atomic_set(&lrng_pool.irq_info.num_events_thresh,
+ lrng_entropy_to_data(new));
+}
+
+/*
+ * Reading of the LRNG pool is only allowed by one caller. The reading is
+ * only performed to (re)seed DRNGs. Thus, if this "lock" is already taken,
+ * the reseeding operation is in progress. The caller is not intended to wait
+ * but continue with its other operation.
+ */
+int lrng_pool_trylock(void)
+{
+ return atomic_cmpxchg(&lrng_pool.irq_info.reseed_in_progress, 0, 1);
+}
+
+void lrng_pool_unlock(void)
+{
+ atomic_set(&lrng_pool.irq_info.reseed_in_progress, 0);
+}
+
+void lrng_reset_state(void)
+{
+ struct lrng_irq_info *irq_info = &lrng_pool.irq_info;
+
+ atomic_set(&irq_info->num_events, 0);
+ lrng_state.lrng_operational = false;
+ lrng_state.lrng_fully_seeded = false;
+ lrng_state.lrng_min_seeded = false;
+ lrng_pool.all_online_numa_node_seeded = false;
+ pr_debug("reset LRNG\n");
+}
+
+void lrng_pool_all_numa_nodes_seeded(void)
+{
+ lrng_pool.all_online_numa_node_seeded = true;
+}
+
+bool lrng_state_min_seeded(void)
+{
+ return lrng_state.lrng_min_seeded;
+}
+
+bool lrng_state_fully_seeded(void)
+{
+ return lrng_state.lrng_fully_seeded;
+}
+
+bool lrng_state_operational(void)
+{
+ return lrng_state.lrng_operational;
+}
+
+bool lrng_pool_highres_timer(void)
+{
+ return lrng_pool.irq_info.irq_highres_timer;
+}
+
+void lrng_pool_set_entropy(u32 entropy_bits)
+{
+ atomic_set(&lrng_pool.irq_info.num_events,
+ lrng_entropy_to_data(entropy_bits));
+}
+
+static void lrng_pool_configure(bool highres_timer, u32 irq_entropy_bits)
+{
+ struct lrng_irq_info *irq_info = &lrng_pool.irq_info;
+
+ irq_info->irq_highres_timer = highres_timer;
+ if (irq_info->irq_entropy_bits != irq_entropy_bits) {
+ irq_info->irq_entropy_bits = irq_entropy_bits;
+ /* Reset the threshold based on new oversampling factor. */
+ lrng_set_entropy_thresh(atomic_read_u32(
+ &irq_info->num_events_thresh));
+ }
+}
+
+static int __init lrng_init_time_source(void)
+{
+ if (random_get_entropy() || random_get_entropy()) {
+ /*
+ * As the highres timer is identified here, previous interrupts
+ * obtained during boot time are treated like a lowres-timer
+ * would have been present.
+ */
+ lrng_pool_configure(true, LRNG_IRQ_ENTROPY_BITS);
+ } else {
+ lrng_health_disable();
+ lrng_pool_configure(false, LRNG_IRQ_ENTROPY_BITS *
+ LRNG_IRQ_OVERSAMPLING_FACTOR);
+ pr_warn("operating without high-resolution timer and applying "
+ "IRQ oversampling factor %u\n",
+ LRNG_IRQ_OVERSAMPLING_FACTOR);
+ }
+
+ return 0;
+}
+
+core_initcall(lrng_init_time_source);
+
+/* invoke function with buffer aligned to 4 bytes */
+void lrng_pool_lfsr(const u8 *buf, u32 buflen)
+{
+ u32 *p_buf = (u32 *)buf;
+
+ for (; buflen >= 4; buflen -= 4)
+ lrng_pool_lfsr_u32(*p_buf++);
+
+ buf = (u8 *)p_buf;
+ while (buflen--)
+ lrng_pool_lfsr_u32(*buf++);
+}
+
+void lrng_pool_lfsr_nonaligned(const u8 *buf, u32 buflen)
+{
+ while (buflen) {
+ if (!((unsigned long)buf & (sizeof(u32) - 1))) {
+ lrng_pool_lfsr(buf, buflen);
+ return;
+ }
+
+ lrng_pool_lfsr_u32(*buf++);
+ buflen--;
+ }
+}
+
+/**************************** Interrupt processing ****************************/
+
+/**
+ * Hot code path - inject data into entropy pool using LFSR
+ */
+void lrng_pool_lfsr_u32(u32 value)
+{
+ _lrng_pool_lfsr_u32(&lrng_pool, value);
+}
+
+/**
+ * Hot code path - mix data into entropy pool
+ */
+void lrng_pool_add_irq(u32 irq_num)
+{
+ struct lrng_irq_info *irq_info = &lrng_pool.irq_info;
+
+ atomic_add(irq_num, &irq_info->num_events);
+
+ /*
+ * Once all DRNGs are fully seeded, the interrupt noise
+ * sources will not trigger any reseeding any more.
+ */
+ if (likely(lrng_pool.all_online_numa_node_seeded))
+ return;
+
+ /* Only try to reseed if the DRNG is alive. */
+ if (!lrng_get_available())
+ return;
+
+ /* Only trigger the DRNG reseed if we have collected enough IRQs. */
+ if (atomic_read_u32(&lrng_pool.irq_info.num_events) <
+ atomic_read_u32(&lrng_pool.irq_info.num_events_thresh))
+ return;
+
+ /* Ensure that the seeding only occurs once at any given time. */
+ if (lrng_pool_trylock())
+ return;
+
+ /* Seed the DRNG with IRQ noise. */
+ schedule_work(&lrng_state.lrng_seed_work);
+}
+
+void lrng_pool_add_entropy(u32 entropy_bits)
+{
+ lrng_pool_add_irq(lrng_entropy_to_data(entropy_bits));
+}
+
+/**
+ * Generate a hashed output of pool using the SP800-90A section 10.3.1 hash_df
+ * function
+ */
+u32 __lrng_pool_hash_df(const struct lrng_crypto_cb *crypto_cb, void *hash,
+ struct lrng_pool *pool, u8 *outbuf, u32 requested_bits)
+{
+ u32 digestsize, requested_bytes = requested_bits >> 3,
+ generated_bytes = 0;
+ u8 digest[64] __aligned(LRNG_KCAPI_ALIGN);
+
+ digestsize = crypto_cb->lrng_hash_digestsize(hash);
+ if (digestsize > sizeof(digest)) {
+ pr_err("Digest buffer too small\n");
+ return 0;
+ }
+
+ pool->counter = 1;
+ pool->requested_bits = cpu_to_be32(requested_bytes << 3);
+
+ while (requested_bytes) {
+ u32 tocopy = min_t(u32, requested_bytes, digestsize);
+
+ /* The counter must not wrap */
+ if (pool->counter == 0)
+ goto out;
+
+ if (crypto_cb->lrng_hash_buffer(hash, (u8 *)pool,
+ LRNG_POOL_SIZE_BYTES + 64,
+ digest))
+ goto out;
+
+ /* Copy the data out to the caller */
+ memcpy(outbuf + generated_bytes, digest, tocopy);
+ requested_bytes -= tocopy;
+ generated_bytes += tocopy;
+ pool->counter++;
+ }
+
+out:
+ /* Mix read data back into pool for backtracking resistance */
+ if (generated_bytes)
+ lrng_pool_lfsr(outbuf, generated_bytes);
+ memzero_explicit(digest, digestsize);
+ return (generated_bytes<<3);
+}
+
+static inline u32 lrng_pool_hash_df(const struct lrng_crypto_cb *crypto_cb,
+ void *hash, u8 *outbuf, u32 requested_bits)
+{
+ return __lrng_pool_hash_df(crypto_cb, hash, &lrng_pool, outbuf,
+ requested_bits);
+}
+
+/**
+ * Read the entropy pool out for use.
+ *
+ * This function handles the translation from the number of received interrupts
+ * into an entropy statement. The conversion depends on LRNG_IRQ_ENTROPY_BITS
+ * which defines how many interrupts must be received to obtain 256 bits of
+ * entropy. With this value, the function lrng_data_to_entropy converts a given
+ * data size (received interrupts, requested amount of data, etc.) into an
+ * entropy statement. lrng_entropy_to_data does the reverse.
+ *
+ * Both functions are agnostic about the type of data: when the number of
+ * interrupts is processed by these functions, the resulting entropy value is in
+ * bits as we assume the entropy of interrupts is measured in bits. When data is
+ * processed, the entropy value is in bytes as the data is measured in bytes.
+ *
+ * @outbuf: buffer to store data in with size LRNG_DRNG_SECURITY_STRENGTH_BYTES
+ * @requested_entropy_bits: requested bits of entropy -- the function will
+ * return at least this amount of entropy if available
+ * @entropy_retain: amount of entropy in bits that should be left in the pool
+ * @return: estimated entropy from the IRQs that was obtained
+ */
+static u32 lrng_get_pool(const struct lrng_crypto_cb *crypto_cb, void *hash,
+ u8 *outbuf, u32 requested_entropy_bits,
+ u32 entropy_retain)
+{
+ struct lrng_pool *pool = &lrng_pool;
+ struct lrng_state *state = &lrng_state;
+ struct lrng_irq_info *irq_info = &pool->irq_info;
+ unsigned long flags;
+
+ u32 irq_num_events_used, irq_num_events, avail_entropy_bits;
+
+ /* This get_pool operation must only be called once at a given time! */
+ spin_lock_irqsave(&pool->lock, flags);
+
+ /* How many unused interrupts are in entropy pool? */
+ irq_num_events = atomic_read_u32(&irq_info->num_events);
+ /* Convert available interrupts into entropy statement */
+ avail_entropy_bits = lrng_data_to_entropy(irq_num_events);
+
+ /* Cap available entropy to pool size */
+ avail_entropy_bits =
+ min_t(u32, avail_entropy_bits, LRNG_POOL_SIZE_BITS);
+
+ /* How much entropy we need to and can we use? */
+ if (unlikely(!state->lrng_fully_seeded)) {
+ /*
+ * During boot time, we read 256 bits data with
+ * avail_entropy_bits entropy. In case our conservative
+ * entropy estimate underestimates the available entropy
+ * we can transport as much available entropy as
+ * possible. The entropy pool does not operate compliant to
+ * the German AIS 21/31 NTG.1 yet.
+ */
+ requested_entropy_bits =
+ LRNG_DRNG_SECURITY_STRENGTH_BITS;
+ } else {
+ /* Provide all entropy above retaining level */
+ if (avail_entropy_bits < entropy_retain) {
+ requested_entropy_bits = 0;
+ goto out;
+ }
+ avail_entropy_bits -= entropy_retain;
+ requested_entropy_bits = min_t(u32, avail_entropy_bits,
+ requested_entropy_bits);
+ }
+
+ /* Hash is a compression function: we generate entropy amount of data */
+ requested_entropy_bits = round_down(requested_entropy_bits, 8);
+
+ requested_entropy_bits = lrng_pool_hash_df(crypto_cb, hash, outbuf,
+ requested_entropy_bits);
+
+ /* Boot time: After getting the full buffer adjust the entropy value. */
+ requested_entropy_bits = min_t(u32, avail_entropy_bits,
+ requested_entropy_bits);
+
+out:
+ /* Convert used entropy into interrupt number for subtraction */
+ irq_num_events_used = lrng_entropy_to_data(requested_entropy_bits);
+
+ /*
+ * The hash_df operation entropy assessment shows that the output
+ * entropy is one bit smaller than the input entropy. Therefore we
+ * account for this one bit of entropy here: if we have sufficient
+ * entropy in the LFSR, we say we used one bit of entropy more.
+ * Otherwise we reduce the amount of entropy we say we generated with
+ * the hash_df.
+ */
+ if (irq_num_events_used) {
+ if ((irq_num_events_used + LRNG_CONDITIONING_ENTROPY_LOSS) <=
+ lrng_entropy_to_data(avail_entropy_bits)) {
+ irq_num_events_used += LRNG_CONDITIONING_ENTROPY_LOSS;
+ } else {
+ if (unlikely(requested_entropy_bits <
+ LRNG_CONDITIONING_ENTROPY_LOSS))
+ requested_entropy_bits = 0;
+ else
+ requested_entropy_bits -=
+ LRNG_CONDITIONING_ENTROPY_LOSS;
+ }
+ }
+
+ /*
+ * New events might have arrived in the meanwhile and we don't
+ * want to throw them away unconditionally. On the other hand,
+ * these new events might have been mixed in before
+ * lrng_hash_df_pool() had been able to draw any entropy
+ * from the pool and thus, the pool capacity might have been
+ * exceeded at some point. Note that in theory, some events
+ * might get lost inbetween the atomic_read() and
+ * atomic_set() below. But that's fine, because it's no real
+ * concern while code preventing this would come at the cost of
+ * additional complexity. Likewise, some events which arrived
+ * after full or partial completion of the __lrng_hash_df_pool()
+ * above might get unnecessarily thrown away by the min()
+ * operation below; the same argument applies there.
+ */
+ irq_num_events = atomic_read_u32(&irq_info->num_events);
+ irq_num_events = min_t(u32, irq_num_events,
+ lrng_entropy_to_data(LRNG_POOL_SIZE_BITS));
+ irq_num_events -= irq_num_events_used;
+ atomic_set(&irq_info->num_events, irq_num_events);
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ /* Obtain entropy statement in bits from the used entropy */
+ pr_debug("obtained %u bits of entropy from %u newly collected "
+ "interrupts - not using %u interrupts\n",
+ requested_entropy_bits, irq_num_events_used,
+ irq_num_events);
+
+ return requested_entropy_bits;
+}
+
+/* Fill the seed buffer with data from the noise sources */
+int lrng_fill_seed_buffer(const struct lrng_crypto_cb *crypto_cb, void *hash,
+ struct entropy_buf *entropy_buf, u32 entropy_retain)
+{
+ struct lrng_state *state = &lrng_state;
+ u32 total_entropy_bits = 0;
+
+ /* Require at least 128 bits of entropy for any reseed. */
+ if (state->lrng_fully_seeded &&
+ (lrng_avail_entropy() <
+ lrng_slow_noise_req_entropy(LRNG_MIN_SEED_ENTROPY_BITS +
+ LRNG_CONDITIONING_ENTROPY_LOSS) +
+ entropy_retain))
+ goto wakeup;
+
+ /*
+ * Concatenate the output of the noise sources. This would be the
+ * spot to add an entropy extractor logic if desired. Note, this
+ * has the ability to collect entropy equal or larger than the DRNG
+ * strength.
+ */
+ total_entropy_bits = lrng_get_pool(crypto_cb, hash, entropy_buf->a,
+ LRNG_DRNG_SECURITY_STRENGTH_BITS,
+ entropy_retain);
+ total_entropy_bits += lrng_get_arch(entropy_buf->b);
+ total_entropy_bits += lrng_get_jent(entropy_buf->c,
+ LRNG_DRNG_SECURITY_STRENGTH_BYTES);
+
+ /* also reseed the DRNG with the current time stamp */
+ entropy_buf->now = random_get_entropy();
+
+ /* allow external entropy provider to provide seed */
+ lrng_state_exseed_allow_all();
+
+wakeup:
+ /*
+ * Shall we wake up user space writers? This location covers
+ * ensures that the user space provider does not dominate the internal
+ * noise sources since in case the first call of this function finds
+ * sufficient entropy in the entropy pool, it will not trigger the
+ * wakeup. This implies that when the next /dev/urandom read happens,
+ * the entropy pool is drained.
+ */
+ lrng_writer_wakeup();
+
+ return total_entropy_bits;
+}
+
+/**
+ * Set the slow noise source reseed trigger threshold. The initial threshold
+ * is set to the minimum data size that can be read from the pool: a word. Upon
+ * reaching this value, the next seed threshold of 128 bits is set followed
+ * by 256 bits.
+ *
+ * @entropy_bits: size of entropy currently injected into DRNG
+ */
+void lrng_init_ops(u32 seed_bits)
+{
+ struct lrng_state *state = &lrng_state;
+
+ if (state->lrng_operational)
+ return;
+
+ /* DRNG is seeded with full security strength */
+ if (state->lrng_fully_seeded) {
+ state->lrng_operational = lrng_sp80090b_startup_complete();
+ lrng_process_ready_list();
+ lrng_init_wakeup();
+ } else if (seed_bits >= LRNG_FULL_SEED_ENTROPY_BITS) {
+ invalidate_batched_entropy();
+ state->lrng_fully_seeded = true;
+ state->lrng_operational = lrng_sp80090b_startup_complete();
+ state->lrng_min_seeded = true;
+ pr_info("LRNG fully seeded with %u bits of entropy\n",
+ seed_bits);
+ lrng_set_entropy_thresh(LRNG_FULL_SEED_ENTROPY_BITS +
+ LRNG_CONDITIONING_ENTROPY_LOSS);
+ lrng_process_ready_list();
+ lrng_init_wakeup();
+
+ } else if (!state->lrng_min_seeded) {
+
+ /* DRNG is seeded with at least 128 bits of entropy */
+ if (seed_bits >= LRNG_MIN_SEED_ENTROPY_BITS) {
+ invalidate_batched_entropy();
+ state->lrng_min_seeded = true;
+ pr_info("LRNG minimally seeded with %u bits of "
+ "entropy\n", seed_bits);
+ lrng_set_entropy_thresh(
+ lrng_slow_noise_req_entropy(
+ LRNG_FULL_SEED_ENTROPY_BITS +
+ LRNG_CONDITIONING_ENTROPY_LOSS));
+ lrng_process_ready_list();
+ lrng_init_wakeup();
+
+ /* DRNG is seeded with at least LRNG_INIT_ENTROPY_BITS bits */
+ } else if (seed_bits >= LRNG_INIT_ENTROPY_BITS) {
+ pr_info("LRNG initial entropy level %u bits of "
+ "entropy\n", seed_bits);
+ lrng_set_entropy_thresh(
+ lrng_slow_noise_req_entropy(
+ LRNG_MIN_SEED_ENTROPY_BITS +
+ LRNG_CONDITIONING_ENTROPY_LOSS));
+ }
+ }
+}
+
+int __init rand_initialize(void)
+{
+ ktime_t now_time = ktime_get_real();
+ unsigned int i, rand;
+
+ lrng_pool_lfsr_u32(now_time);
+ for (i = 0; i < LRNG_POOL_SIZE; i++) {
+ if (!arch_get_random_seed_int(&rand) &&
+ !arch_get_random_int(&rand))
+ rand = random_get_entropy();
+ lrng_pool_lfsr_u32(rand);
+ }
+ lrng_pool_lfsr_nonaligned((u8 *)utsname(), sizeof(*(utsname())));
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG Slow Noise Source: Interrupt data collection
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#include <asm/irq_regs.h>
+#include <asm/ptrace.h>
+#include <linux/random.h>
+
+#include "lrng_internal.h"
+#include "lrng_sw_noise.h"
+
+/* Holder of time stamps before mixing them into the entropy pool */
+static DEFINE_PER_CPU(u32 [LRNG_TIME_ARRAY_SIZE], lrng_time);
+static DEFINE_PER_CPU(u32, lrng_time_ptr) = 0;
+static DEFINE_PER_CPU(u8, lrng_time_irqs) = 0;
+
+/**
+ * Batching up of entropy in per-CPU array before injecting into entropy pool.
+ */
+static inline void lrng_time_process(void)
+{
+ u32 i, ptr, now_time = random_get_entropy() &
+ (likely(lrng_state_fully_seeded()) ?
+ LRNG_TIME_SLOTSIZE_MASK : (u32)-1);
+ enum lrng_health_res health_test;
+
+ /* Ensure sufficient space in lrng_time_irqs */
+ BUILD_BUG_ON(LRNG_TIME_NUM_VALUES >= (1 << (sizeof(u8) << 3)));
+ BUILD_BUG_ON(LRNG_TIME_ARRAY_MEMBER_BITS % LRNG_TIME_SLOTSIZE_BITS);
+ /* Ensure consistency of values */
+ BUILD_BUG_ON(LRNG_TIME_ARRAY_MEMBER_BITS != sizeof(lrng_time[0]) << 3);
+
+ if (lrng_raw_entropy_store(now_time))
+ return;
+
+ health_test = lrng_health_test(now_time);
+ if (health_test > lrng_health_fail_use)
+ return;
+
+ /* During boot time, we mix the full time stamp directly into LFSR */
+ if (unlikely(!lrng_state_fully_seeded())) {
+ lrng_pool_lfsr_u32(now_time);
+ if (health_test == lrng_health_pass)
+ lrng_pool_add_irq(1);
+ return;
+ }
+
+ ptr = this_cpu_inc_return(lrng_time_ptr) & LRNG_TIME_WORD_MASK;
+ this_cpu_or(lrng_time[lrng_time_idx2array(ptr)],
+ lrng_time_slot_val(now_time & LRNG_TIME_SLOTSIZE_MASK,
+ lrng_time_idx2slot(ptr)));
+
+ /* Interrupt delivers entropy if health test passes */
+ if (health_test == lrng_health_pass)
+ this_cpu_inc(lrng_time_irqs);
+
+ /* Only mix the buffer of time stamps into LFSR when wrapping */
+ if (ptr < LRNG_TIME_WORD_MASK)
+ return;
+
+ for (i = 0; i < LRNG_TIME_ARRAY_SIZE; i++) {
+ lrng_pool_lfsr_u32(this_cpu_read(lrng_time[i]));
+ this_cpu_write(lrng_time[i], 0);
+ }
+ lrng_pool_add_irq(this_cpu_read(lrng_time_irqs));
+ this_cpu_write(lrng_time_irqs, 0);
+}
+
+/**
+ * Hot code path - Callback for interrupt handler
+ */
+void add_interrupt_randomness(int irq, int irq_flags)
+{
+ lrng_time_process();
+
+ if (!lrng_pool_highres_timer()) {
+ struct pt_regs *regs = get_irq_regs();
+ static atomic_t reg_idx = ATOMIC_INIT(0);
+ u64 ip;
+
+ lrng_pool_lfsr_u32(jiffies);
+ lrng_pool_lfsr_u32(irq);
+ lrng_pool_lfsr_u32(irq_flags);
+
+ if (regs) {
+ u32 *ptr = (u32 *)regs;
+ int reg_ptr = atomic_add_return_relaxed(1, ®_idx);
+ size_t n = (sizeof(struct pt_regs) / sizeof(u32));
+
+ ip = instruction_pointer(regs);
+ lrng_pool_lfsr_u32(*(ptr + (reg_ptr % n)));
+ } else
+ ip = _RET_IP_;
+
+ lrng_pool_lfsr_u32(ip >> 32);
+ lrng_pool_lfsr_u32(ip);
+ }
+}
+EXPORT_SYMBOL(add_interrupt_randomness);
new file mode 100644
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * LRNG Slow Noise Source: Time stamp array handling
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+/*
+ * To limit the impact on the interrupt handling, the LRNG concatenates
+ * entropic LSB parts of the time stamps in a per-CPU array and only
+ * injects them into the entropy pool when the array is full.
+ */
+
+/* Store multiple integers in one u32 */
+#define LRNG_TIME_SLOTSIZE_BITS (8)
+#define LRNG_TIME_SLOTSIZE_MASK ((1 << LRNG_TIME_SLOTSIZE_BITS) - 1)
+#define LRNG_TIME_ARRAY_MEMBER_BITS (4 << 3)
+#define LRNG_TIME_SLOTS_PER_UINT (LRNG_TIME_ARRAY_MEMBER_BITS / \
+ LRNG_TIME_SLOTSIZE_BITS)
+
+/*
+ * Number of time values to store in the array - in small environments
+ * only one atomic_t variable per CPU is used.
+ */
+#define LRNG_TIME_NUM_VALUES (CONFIG_BASE_SMALL ? \
+ LRNG_TIME_SLOTS_PER_UINT : 64)
+/* Mask of LSB of time stamp to store */
+#define LRNG_TIME_WORD_MASK (LRNG_TIME_NUM_VALUES - 1)
+
+#define LRNG_TIME_SLOTS_MASK (LRNG_TIME_SLOTS_PER_UINT - 1)
+#define LRNG_TIME_ARRAY_SIZE (LRNG_TIME_NUM_VALUES / \
+ LRNG_TIME_SLOTS_PER_UINT)
+
+/* Starting bit index of slot */
+static inline unsigned int lrng_time_slot2bitindex(unsigned int slot)
+{
+ return (LRNG_TIME_SLOTSIZE_BITS * slot);
+}
+
+/* Convert index into the array index */
+static inline unsigned int lrng_time_idx2array(unsigned int idx)
+{
+ return idx / LRNG_TIME_SLOTS_PER_UINT;
+}
+
+/* Convert index into the slot of a given array index */
+static inline unsigned int lrng_time_idx2slot(unsigned int idx)
+{
+ return idx & LRNG_TIME_SLOTS_MASK;
+}
+
+/* Convert value into slot value */
+static inline unsigned int lrng_time_slot_val(unsigned int val,
+ unsigned int slot)
+{
+ return val << lrng_time_slot2bitindex(slot);
+}
new file mode 100644
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/*
+ * Copyright (C) 2018 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#ifndef _LRNG_H
+#define _LRNG_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+/**
+ * struct lrng_crypto_cb - cryptographic callback functions
+ * @lrng_drng_name Name of DRNG
+ * @lrng_hash_name Name of Hash used for reading entropy pool
+ * @lrng_drng_alloc: Allocate DRNG -- the provided integer should be
+ * used for sanity checks.
+ * return: allocated data structure or PTR_ERR on
+ * error
+ * @lrng_drng_dealloc: Deallocate DRNG
+ * @lrng_drng_seed_helper: Seed the DRNG with data of arbitrary length
+ * drng: is pointer to data structure allocated
+ * with lrng_drng_alloc
+ * return: >= 0 on success, < 0 on error
+ * @lrng_drng_generate_helper: Generate random numbers from the DRNG with
+ * arbitrary length
+ * @lrng_drng_generate_helper_full: Generate random numbers from the DRNG with
+ * arbitrary length where the output is
+ * capable of providing 1 bit of entropy per
+ * data bit.
+ * return: generated number of bytes,
+ * < 0 on error
+ * @lrng_hash_alloc: Allocate the hash for reading the entropy pool
+ * return: allocated data structure (NULL is
+ * success too) or ERR_PTR on error
+ * @lrng_hash_dealloc: Deallocate Hash
+ * @lrng_hash_digestsize: Return the digestsize for the used hash to read
+ * out entropy pool
+ * hash: is pointer to data structure allocated
+ * with lrng_hash_alloc
+ * return: size of digest of hash in bytes
+ * @lrng_hash_buffer: Generate hash
+ * hash: is pointer to data structure allocated
+ * with lrng_hash_alloc
+ * return: 0 on success, < 0 on error
+ */
+struct lrng_crypto_cb {
+ const char *(*lrng_drng_name)(void);
+ const char *(*lrng_hash_name)(void);
+ void *(*lrng_drng_alloc)(u32 sec_strength);
+ void (*lrng_drng_dealloc)(void *drng);
+ int (*lrng_drng_seed_helper)(void *drng, const u8 *inbuf, u32 inbuflen);
+ int (*lrng_drng_generate_helper)(void *drng, u8 *outbuf, u32 outbuflen);
+ int (*lrng_drng_generate_helper_full)(void *drng, u8 *outbuf,
+ u32 outbuflen);
+ void *(*lrng_hash_alloc)(const u8 *key, u32 keylen);
+ void (*lrng_hash_dealloc)(void *hash);
+ u32 (*lrng_hash_digestsize)(void *hash);
+ int (*lrng_hash_buffer)(void *hash, const u8 *inbuf, u32 inbuflen,
+ u8 *digest);
+};
+
+/* Register cryptographic backend */
+#ifdef CONFIG_LRNG_DRNG_SWITCH
+int lrng_set_drng_cb(const struct lrng_crypto_cb *cb);
+#else /* CONFIG_LRNG_DRNG_SWITCH */
+static inline int
+lrng_set_drng_cb(const struct lrng_crypto_cb *cb) { return -EOPNOTSUPP; }
+#endif /* CONFIG_LRNG_DRNG_SWITCH */
+
+#endif /* _LRNG_H */