diff mbox

[v4,01/13] random: invalidate batched entropy after crng init

Message ID 20170606174804.31124-2-Jason@zx2c4.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jason A. Donenfeld June 6, 2017, 5:47 p.m. UTC
It's possible that get_random_{u32,u64} is used before the crng has
initialized, in which case, its output might not be cryptographically
secure. For this problem, directly, this patch set is introducing the
*_wait variety of functions, but even with that, there's a subtle issue:
what happens to our batched entropy that was generated before
initialization. Prior to this commit, it'd stick around, supplying bad
numbers. After this commit, we force the entropy to be re-extracted
after each phase of the crng has initialized.

In order to avoid a race condition with the position counter, we
introduce a simple rwlock for this invalidation. Since it's only during
this awkward transition period, after things are all set up, we stop
using it, so that it doesn't have an impact on performance.

This should probably be backported to 4.11.

(Also: adding my copyright to the top. With the patch series from
January, this patch, and then the ones that come after, I think there's
a relevant amount of code in here to add my name to the top.)

Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
 drivers/char/random.c | 37 +++++++++++++++++++++++++++++++++++++
 1 file changed, 37 insertions(+)

Comments

Theodore Ts'o June 7, 2017, 11:58 p.m. UTC | #1
On Tue, Jun 06, 2017 at 07:47:52PM +0200, Jason A. Donenfeld wrote:
> It's possible that get_random_{u32,u64} is used before the crng has
> initialized, in which case, its output might not be cryptographically
> secure. For this problem, directly, this patch set is introducing the
> *_wait variety of functions, but even with that, there's a subtle issue:
> what happens to our batched entropy that was generated before
> initialization. Prior to this commit, it'd stick around, supplying bad
> numbers. After this commit, we force the entropy to be re-extracted
> after each phase of the crng has initialized.
> 
> In order to avoid a race condition with the position counter, we
> introduce a simple rwlock for this invalidation. Since it's only during
> this awkward transition period, after things are all set up, we stop
> using it, so that it doesn't have an impact on performance.
> 
> This should probably be backported to 4.11.
> 
> (Also: adding my copyright to the top. With the patch series from
> January, this patch, and then the ones that come after, I think there's
> a relevant amount of code in here to add my name to the top.)
> 
> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

Thanks, applied.  This will be on the for_stable that I will be
sending to Linus sometime during 4.12-rcX.

					- Ted
Jason A. Donenfeld June 8, 2017, 12:52 a.m. UTC | #2
On Thu, Jun 8, 2017 at 1:58 AM, Theodore Ts'o <tytso@mit.edu> wrote:
> Thanks, applied.  This will be on the for_stable that I will be
> sending to Linus sometime during 4.12-rcX.

I think you might have just missed the kbuild test robot complaining
about an incorrect compiler warning, when using an ancient gcc for the
sh platform. I fixed that bug in v5, which I posted about an hour
ago. The only change was a single commit, so you can just cherry-pick
that as you wish:

https://git.kernel.org/pub/scm/linux/kernel/git/zx2c4/linux.git/patch/?id=2f390fd961d1f97738d7af4f4797542e05f4607e

Other than that one commit change, it was the same.

Or, if it's easier, the tag for-random-4.12 in
git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random.git is for you.
diff mbox

Patch

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0ab024918907..2291e6224ed3 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,6 +1,9 @@ 
 /*
  * random.c -- A strong random number generator
  *
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
+ * Rights Reserved.
+ *
  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
  *
  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
@@ -762,6 +765,8 @@  static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
 static struct crng_state **crng_node_pool __read_mostly;
 #endif
 
+static void invalidate_batched_entropy(void);
+
 static void crng_initialize(struct crng_state *crng)
 {
 	int		i;
@@ -799,6 +804,7 @@  static int crng_fast_load(const char *cp, size_t len)
 		cp++; crng_init_cnt++; len--;
 	}
 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+		invalidate_batched_entropy();
 		crng_init = 1;
 		wake_up_interruptible(&crng_init_wait);
 		pr_notice("random: fast init done\n");
@@ -836,6 +842,7 @@  static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
 	memzero_explicit(&buf, sizeof(buf));
 	crng->init_time = jiffies;
 	if (crng == &primary_crng && crng_init < 2) {
+		invalidate_batched_entropy();
 		crng_init = 2;
 		process_random_ready_list();
 		wake_up_interruptible(&crng_init_wait);
@@ -2019,6 +2026,7 @@  struct batched_entropy {
 	};
 	unsigned int position;
 };
+static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
 
 /*
  * Get a random word for internal kernel use only. The quality of the random
@@ -2029,6 +2037,8 @@  static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
 u64 get_random_u64(void)
 {
 	u64 ret;
+	bool use_lock = crng_init < 2;
+	unsigned long flags;
 	struct batched_entropy *batch;
 
 #if BITS_PER_LONG == 64
@@ -2041,11 +2051,15 @@  u64 get_random_u64(void)
 #endif
 
 	batch = &get_cpu_var(batched_entropy_u64);
+	if (use_lock)
+		read_lock_irqsave(&batched_entropy_reset_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
 		extract_crng((u8 *)batch->entropy_u64);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u64[batch->position++];
+	if (use_lock)
+		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 	put_cpu_var(batched_entropy_u64);
 	return ret;
 }
@@ -2055,22 +2069,45 @@  static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
 u32 get_random_u32(void)
 {
 	u32 ret;
+	bool use_lock = crng_init < 2;
+	unsigned long flags;
 	struct batched_entropy *batch;
 
 	if (arch_get_random_int(&ret))
 		return ret;
 
 	batch = &get_cpu_var(batched_entropy_u32);
+	if (use_lock)
+		read_lock_irqsave(&batched_entropy_reset_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
 		extract_crng((u8 *)batch->entropy_u32);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u32[batch->position++];
+	if (use_lock)
+		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 	put_cpu_var(batched_entropy_u32);
 	return ret;
 }
 EXPORT_SYMBOL(get_random_u32);
 
+/* It's important to invalidate all potential batched entropy that might
+ * be stored before the crng is initialized, which we can do lazily by
+ * simply resetting the counter to zero so that it's re-extracted on the
+ * next usage. */
+static void invalidate_batched_entropy(void)
+{
+	int cpu;
+	unsigned long flags;
+
+	write_lock_irqsave(&batched_entropy_reset_lock, flags);
+	for_each_possible_cpu (cpu) {
+		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+	}
+	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+}
+
 /**
  * randomize_page - Generate a random, page aligned address
  * @start:	The smallest acceptable address the caller will take.