@@ -7,3 +7,5 @@ obj-y += lrng_pool.o lrng_aux.o \
lrng_sw_noise.o lrng_archrandom.o \
lrng_drng.o lrng_chacha20.o \
lrng_interfaces.o \
+
+obj-$(CONFIG_NUMA) += lrng_numa.o
@@ -256,8 +256,13 @@ int lrng_drng_get_sleep(u8 *outbuf, u32 outbuflen);
void lrng_drng_force_reseed(void);
void lrng_drng_seed_work(struct work_struct *dummy);
+#ifdef CONFIG_NUMA
+struct lrng_drng **lrng_drng_instances(void);
+void lrng_drngs_numa_alloc(void);
+#else /* CONFIG_NUMA */
static inline struct lrng_drng **lrng_drng_instances(void) { return NULL; }
static inline void lrng_drngs_numa_alloc(void) { return; }
+#endif /* CONFIG_NUMA */
/************************** Health Test linking code **************************/
new file mode 100644
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/*
+ * LRNG NUMA support
+ *
+ * Copyright (C) 2016 - 2020, Stephan Mueller <smueller@chronox.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/lrng.h>
+#include <linux/slab.h>
+
+#include "lrng_internal.h"
+
+static struct lrng_drng **lrng_drng __read_mostly = NULL;
+
+struct lrng_drng **lrng_drng_instances(void)
+{
+ return lrng_drng;
+}
+
+/* Allocate the data structures for the per-NUMA node DRNGs */
+static void _lrng_drngs_numa_alloc(struct work_struct *work)
+{
+ struct lrng_drng **drngs;
+ struct lrng_drng *lrng_drng_init = lrng_drng_init_instance();
+ u32 node;
+ bool init_drng_used = false;
+
+ mutex_lock(&lrng_crypto_cb_update);
+
+ /* per-NUMA-node DRNGs are already present */
+ if (lrng_drng)
+ goto unlock;
+
+ drngs = kcalloc(nr_node_ids, sizeof(void *), GFP_KERNEL|__GFP_NOFAIL);
+ for_each_online_node(node) {
+ struct lrng_drng *drng;
+
+ if (!init_drng_used) {
+ drngs[node] = lrng_drng_init;
+ init_drng_used = true;
+ continue;
+ }
+
+ drng = kmalloc_node(sizeof(struct lrng_drng),
+ GFP_KERNEL|__GFP_NOFAIL, node);
+ memset(drng, 0, sizeof(lrng_drng));
+
+ drng->crypto_cb = lrng_drng_init->crypto_cb;
+ drng->drng = drng->crypto_cb->lrng_drng_alloc(
+ LRNG_DRNG_SECURITY_STRENGTH_BYTES);
+ if (IS_ERR(drng->drng)) {
+ kfree(drng);
+ goto err;
+ }
+
+ mutex_init(&drng->lock);
+ spin_lock_init(&drng->spin_lock);
+
+ /*
+ * No reseeding of NUMA DRNGs from previous DRNGs as this
+ * would complicate the code. Let it simply reseed.
+ */
+ lrng_drng_reset(drng);
+ drngs[node] = drng;
+
+ lrng_pool_inc_numa_node();
+ pr_info("DRNG for NUMA node %d allocated\n", node);
+ }
+
+ /* Ensure that all NUMA nodes receive changed memory here. */
+ mb();
+
+ if (!cmpxchg(&lrng_drng, NULL, drngs))
+ goto unlock;
+
+err:
+ for_each_online_node(node) {
+ struct lrng_drng *drng = drngs[node];
+
+ if (drng == lrng_drng_init)
+ continue;
+
+ if (drng) {
+ drng->crypto_cb->lrng_drng_dealloc(drng->drng);
+ kfree(drng);
+ }
+ }
+ kfree(drngs);
+
+unlock:
+ mutex_unlock(&lrng_crypto_cb_update);
+}
+
+static DECLARE_WORK(lrng_drngs_numa_alloc_work, _lrng_drngs_numa_alloc);
+
+void lrng_drngs_numa_alloc(void)
+{
+ schedule_work(&lrng_drngs_numa_alloc_work);
+}