@@ -610,6 +610,11 @@ u64 get_random_u64(void)
warn_unseeded_randomness(&previous);
+ if (!crng_ready()) {
+ _get_random_bytes(&ret, sizeof(ret));
+ return ret;
+ }
+
local_lock_irqsave(&batched_entropy_u64.lock, flags);
batch = raw_cpu_ptr(&batched_entropy_u64);
@@ -644,6 +649,11 @@ u32 get_random_u32(void)
warn_unseeded_randomness(&previous);
+ if (!crng_ready()) {
+ _get_random_bytes(&ret, sizeof(ret));
+ return ret;
+ }
+
local_lock_irqsave(&batched_entropy_u32.lock, flags);
batch = raw_cpu_ptr(&batched_entropy_u32);
@@ -820,10 +830,8 @@ static void credit_entropy_bits(size_t nbits)
if (unlikely(crng_init == 0 && entropy_count >= POOL_FAST_INIT_BITS)) {
spin_lock_irqsave(&base_crng.lock, flags);
- if (crng_init == 0) {
- ++base_crng.generation;
+ if (crng_init == 0)
crng_init = 1;
- }
spin_unlock_irqrestore(&base_crng.lock, flags);
}
@@ -1029,7 +1037,6 @@ int __init rand_initialize(void)
_mix_pool_bytes(utsname(), sizeof(*(utsname())));
extract_entropy(base_crng.key, sizeof(base_crng.key));
- ++base_crng.generation;
if (arch_init && trust_cpu && !crng_ready()) {
crng_init = 2;
It's too hard to keep the batches synchronized, and pointless anyway, since in !crng_ready(), we're updating the base_crng key really often, where batching only hurts. So instead, if the crng isn't ready, just call into get_random_bytes(). At this stage nothing is performance critical anyhow. Cc: Theodore Ts'o <tytso@mit.edu> Cc: Dominik Brodowski <linux@dominikbrodowski.net> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com> --- drivers/char/random.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-)