@@ -1162,10 +1162,12 @@ EXPORT_SYMBOL_GPL(add_bootloader_randomness);
struct fast_pool {
unsigned long pool[16 / sizeof(long)];
+ struct work_struct mix;
unsigned long last;
+ atomic_t count;
u16 reg_idx;
- u8 count;
};
+#define FAST_POOL_MIX_INFLIGHT (1U << 31)
/*
* This is a fast mixing routine used by the interrupt randomness
@@ -1214,12 +1216,57 @@ static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
return *ptr;
}
+static void mix_interrupt_randomness(struct work_struct *work)
+{
+ struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
+ unsigned long pool[ARRAY_SIZE(fast_pool->pool)];
+ unsigned int count_snapshot;
+ size_t i;
+
+ /* Check to see if we're running on the wrong CPU due to hotplug. */
+ migrate_disable();
+ if (fast_pool != this_cpu_ptr(&irq_randomness)) {
+ migrate_enable();
+ /*
+ * If we are unlucky enough to have been moved to another CPU,
+ * then we set our count to zero atomically so that when the
+ * CPU comes back online, it can enqueue work again. The
+ * _release here pairs with the atomic_inc_return_acquire in
+ * add_interrupt_randomness().
+ */
+ atomic_set_release(&fast_pool->count, 0);
+ return;
+ }
+
+ /*
+ * Copy the pool to the stack so that the mixer always has a
+ * consistent view. It's extremely unlikely but possible that
+ * this 2 or 4 word read is interrupted by an irq, but in case
+ * it is, we double check that count stays the same.
+ */
+ do {
+ count_snapshot = (unsigned int)atomic_read(&fast_pool->count);
+ for (i = 0; i < ARRAY_SIZE(pool); ++i)
+ pool[i] = READ_ONCE(fast_pool->pool[i]);
+ } while (count_snapshot != (unsigned int)atomic_read(&fast_pool->count));
+
+ /* We take care to zero out the count only after we're done reading the pool. */
+ atomic_set(&fast_pool->count, 0);
+ fast_pool->last = jiffies;
+ migrate_enable();
+
+ mix_pool_bytes(pool, sizeof(pool));
+ credit_entropy_bits(1);
+ memzero_explicit(pool, sizeof(pool));
+}
+
void add_interrupt_randomness(int irq)
{
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned long now = jiffies;
cycles_t cycles = random_get_entropy();
+ unsigned int new_count;
if (cycles == 0)
cycles = get_reg(fast_pool, regs);
@@ -1235,12 +1282,13 @@ void add_interrupt_randomness(int irq)
}
fast_mix((u32 *)fast_pool->pool);
- ++fast_pool->count;
+ /* The _acquire here pairs with the atomic_set_release in mix_interrupt_randomness(). */
+ new_count = (unsigned int)atomic_inc_return_acquire(&fast_pool->count);
if (unlikely(crng_init == 0)) {
- if (fast_pool->count >= 64 &&
+ if (new_count >= 64 &&
crng_fast_load(fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
- fast_pool->count = 0;
+ atomic_set(&fast_pool->count, 0);
fast_pool->last = now;
/*
@@ -1254,20 +1302,16 @@ void add_interrupt_randomness(int irq)
return;
}
- if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
+ if (new_count & FAST_POOL_MIX_INFLIGHT)
return;
- if (!spin_trylock(&input_pool.lock))
+ if (new_count < 64 && !time_after(now, fast_pool->last + HZ))
return;
- fast_pool->last = now;
- _mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&input_pool.lock);
-
- fast_pool->count = 0;
-
- /* Award one bit for the contents of the fast pool. */
- credit_entropy_bits(1);
+ if (unlikely(!fast_pool->mix.func))
+ INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
+ atomic_or(FAST_POOL_MIX_INFLIGHT, &fast_pool->count);
+ queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);