diff mbox series

[RFC,v1] random: use static branch for crng_ready()

Message ID 20220503134052.646325-1-Jason@zx2c4.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series [RFC,v1] random: use static branch for crng_ready() | expand

Commit Message

Jason A. Donenfeld May 3, 2022, 1:40 p.m. UTC
Since crng_ready() is only false briefly during initialization and then
forever after becomes true, we don't need to evaluate it after, making
it a prime candidate for a static branch.

One complication, however, is that it changes state in a particular call
to credit_entropy_bits(), which might be made from atomic context. So
rather than changing it then, we keep around the same state variable we
had before, but the next time it's used from non-atomic context, we
change it lazily then.

This is an RFC for now because it seems a bit complicated and fiddly,
and potentially not worth the complexity. I'm interested to hear if
people have opinions about this or if there's a better way to do it.

Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Dominik Brodowski <linux@dominikbrodowski.net>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
 drivers/char/random.c | 16 ++++++++++++++--
 1 file changed, 14 insertions(+), 2 deletions(-)

Comments

Jason A. Donenfeld May 11, 2022, 11:41 a.m. UTC | #1
On Tue, May 03, 2022 at 03:40:52PM +0200, Jason A. Donenfeld wrote:
> +static bool crng_ready_slowpath(void)
> +{
> +	if (crng_init <= 1)
> +		return false;
> +	if (in_atomic() || irqs_disabled() || cmpxchg(&crng_init, 2, 3) != 2)
> +		return true;

Nobody chimed in here, but for posterity I thought I should point out
that this approach actually won't work, since in_atomic() doesn't work
with CONFIG_PREEMPT_COUNT=n kernels.

So back to the drawing board in trying to figure out the best way to do
this...
 
Jason
Herbert Xu May 12, 2022, 4:35 a.m. UTC | #2
Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> On Tue, May 03, 2022 at 03:40:52PM +0200, Jason A. Donenfeld wrote:
>> +static bool crng_ready_slowpath(void)
>> +{
>> +     if (crng_init <= 1)
>> +             return false;
>> +     if (in_atomic() || irqs_disabled() || cmpxchg(&crng_init, 2, 3) != 2)
>> +             return true;
> 
> Nobody chimed in here, but for posterity I thought I should point out
> that this approach actually won't work, since in_atomic() doesn't work
> with CONFIG_PREEMPT_COUNT=n kernels.
> 
> So back to the drawing board in trying to figure out the best way to do
> this...

Well the standard solution to code paths that require sleeping is
to use a work queue.  So any reason why you can't just schedule
a work to do the static_branch_enable?

Cheers,
Jason A. Donenfeld May 12, 2022, 11:18 a.m. UTC | #3
Hi Herbert,

On Thu, May 12, 2022 at 12:35:51PM +0800, Herbert Xu wrote:
> Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> > On Tue, May 03, 2022 at 03:40:52PM +0200, Jason A. Donenfeld wrote:
> >> +static bool crng_ready_slowpath(void)
> >> +{
> >> +     if (crng_init <= 1)
> >> +             return false;
> >> +     if (in_atomic() || irqs_disabled() || cmpxchg(&crng_init, 2, 3) != 2)
> >> +             return true;
> > 
> > Nobody chimed in here, but for posterity I thought I should point out
> > that this approach actually won't work, since in_atomic() doesn't work
> > with CONFIG_PREEMPT_COUNT=n kernels.
> > 
> > So back to the drawing board in trying to figure out the best way to do
> > this...
> 
> Well the standard solution to code paths that require sleeping is
> to use a work queue.  So any reason why you can't just schedule
> a work to do the static_branch_enable?

Yea, that would work but becomes kind of messier than most cases,
because the transition from not-init'd to init'd can happen before
workqueues are even initialized, which means some logic is necessary for
detecting this and then deferring that to happen later on in
initialization. I thought this was pretty "meh", hence looking for other
solutions. But maybe you're right and it's the best we can do here.

For the sake of discussion, below is what the code for that would
approximately look like, modulo whatever details I'm missing. I guess
it's not _that_ bad, if I just rely on the obscure property that
rand_initialize() is called before IRQs are enabled system-wide but
after system_wq has been initialized, but I just hate having the
deferred state be implicit like that. If we go that route, it'll need
comments I suppose.

Jason

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8fa7a5b9aa93..809ee3c1cf78 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -79,7 +79,8 @@ static enum {
 	CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
 	CRNG_READY = 2  /* Fully initialized with POOL_READY_BITS collected */
 } crng_init = CRNG_EMPTY;
-#define crng_ready() (likely(crng_init >= CRNG_READY))
+static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
+#define crng_ready() (static_branch_likely(&crng_is_ready) || unlikely(crng_init >= CRNG_READY))
 /* Various types of waiters for crng_init->CRNG_READY transition. */
 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
 static struct fasync_struct *fasync;
@@ -109,6 +110,12 @@ bool rng_is_initialized(void)
 }
 EXPORT_SYMBOL(rng_is_initialized);

+static void crng_set_ready(struct work_struct *work)
+{
+	static_branch_enable(&crng_is_ready);
+}
+static DECLARE_WORK(crng_set_ready_work, crng_set_ready);
+
 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
 static void try_to_generate_entropy(void);

@@ -268,6 +275,8 @@ static void crng_reseed(void)
 		++next_gen;
 	WRITE_ONCE(base_crng.generation, next_gen);
 	WRITE_ONCE(base_crng.birth, jiffies);
+	if (!crng_ready() && system_wq)
+		schedule_work(&crng_set_ready_work);
 	crng_init = CRNG_READY;
 	spin_unlock_irqrestore(&base_crng.lock, flags);
 	memzero_explicit(key, sizeof(key));
@@ -945,9 +954,18 @@ int __init rand_initialize(void)
 	_mix_pool_bytes(&now, sizeof(now));
 	_mix_pool_bytes(utsname(), sizeof(*(utsname())));

-	if (crng_ready())
+	if (crng_ready()) {
+		/*
+		 * If rand_initialize() is called with the crng already
+		 * initialized, then it means it was done so prior to
+		 * system_wq being available, which means we should now
+		 * schedule the work to change the static key.
+		 */
+		schedule_work(&crng_set_ready_work);
+
+		/* Immediately use the above architectural contributions. */
 		crng_reseed();
-	else if (arch_init && trust_cpu)
+	} else if (arch_init && trust_cpu)
 		credit_init_bits(BLAKE2S_BLOCK_SIZE * 8);

 	WARN_ON(register_pm_notifier(&pm_notifier));
diff mbox series

Patch

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 845f610b6611..977093022430 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -75,12 +75,13 @@ 
  * crng_init =  0 --> Uninitialized
  *		1 --> Initialized
  *		2 --> Initialized from input_pool
+ *		3 --> Initialized from input_pool and static key set
  *
  * crng_init is protected by base_crng->lock, and only increases
- * its value (from 0->1->2).
+ * its value (from 0->1->2->3).
  */
 static int crng_init = 0;
-#define crng_ready() (likely(crng_init > 1))
+static DEFINE_STATIC_KEY_FALSE(crng_ready_static);
 /* Various types of waiters for crng_init->2 transition. */
 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
 static struct fasync_struct *fasync;
@@ -96,6 +97,17 @@  static int ratelimit_disable __read_mostly;
 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
 
+static bool crng_ready_slowpath(void)
+{
+	if (crng_init <= 1)
+		return false;
+	if (in_atomic() || irqs_disabled() || cmpxchg(&crng_init, 2, 3) != 2)
+		return true;
+	static_branch_enable(&crng_ready_static);
+	return true;
+}
+#define crng_ready() (static_branch_likely(&crng_ready_static) || unlikely(crng_ready_slowpath()))
+
 /*
  * Returns whether or not the input pool has been seeded and thus guaranteed
  * to supply cryptographically secure random numbers. This applies to: the