@@ -11,6 +11,7 @@ obj-$(CONFIG_CRYPTO_FIPS) += fips.o
crypto_algapi-$(CONFIG_PROC_FS) += proc.o
crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y)
+crypto_algapi-$(CONFIG_DEBUG_CRYPTO) += debugfs.o
obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
@@ -1098,11 +1098,13 @@ static int __init crypto_algapi_init(void)
{
crypto_init_proc();
crypto_start_tests();
+ crypto_init_debugfs();
return 0;
}
static void __exit crypto_algapi_exit(void)
{
+ crypto_exit_debugfs();
crypto_exit_proc();
}
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include "internal.h"
static unsigned int cryptd_max_cpu_qlen = 1000;
module_param(cryptd_max_cpu_qlen, uint, 0);
@@ -176,6 +177,8 @@ static void cryptd_queue_worker(struct work_struct *work)
if (!req)
return;
+ crypto_cryptd_delay();
+
if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
crypto_request_complete(req, 0);
new file mode 100644
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+
+static struct dentry *crypto_debugfs_dir;
+static bool simd_force_async;
+static u32 cryptd_delay;
+
+bool crypto_simd_force_async(void)
+{
+ return simd_force_async;
+}
+
+void crypto_cryptd_delay(void)
+{
+ unsigned int d = READ_ONCE(cryptd_delay);
+
+ if (d)
+ msleep(d);
+}
+
+void __init crypto_init_debugfs(void)
+{
+ crypto_debugfs_dir = debugfs_create_dir("crypto", NULL);
+
+#if IS_ENABLED(CONFIG_CRYPTO_SIMD)
+ debugfs_create_bool("simd_force_async", 0644, crypto_debugfs_dir, &simd_force_async);
+#endif
+
+#if IS_ENABLED(CONFIG_CRYPTO_CRYPTD)
+ debugfs_create_u32("cryptd_delay_ms", 0644, crypto_debugfs_dir, &cryptd_delay);
+#endif
+}
+
+void __exit crypto_exit_debugfs(void)
+{
+ debugfs_remove_recursive(crypto_debugfs_dir);
+}
@@ -96,6 +96,25 @@ static inline void crypto_exit_proc(void)
{ }
#endif
+#if IS_ENABLED(CONFIG_DEBUG_CRYPTO)
+void __init crypto_init_debugfs(void);
+void __exit crypto_exit_debugfs(void);
+
+bool crypto_simd_force_async(void);
+void crypto_cryptd_delay(void);
+#else
+static inline void crypto_init_debugfs(void)
+{ }
+static inline void crypto_exit_debugfs(void)
+{ }
+static inline bool crypto_simd_force_async(void)
+{
+ return false;
+}
+static inline void crypto_cryptd_delay(void)
+{ }
+#endif
+
static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include <linux/preempt.h>
#include <asm/simd.h>
+#include "internal.h"
/* skcipher support */
@@ -69,7 +70,7 @@ static int simd_skcipher_encrypt(struct skcipher_request *req)
subreq = skcipher_request_ctx(req);
*subreq = *req;
- if (!crypto_simd_usable() ||
+ if (crypto_simd_force_async() || !crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
@@ -90,7 +91,7 @@ static int simd_skcipher_decrypt(struct skcipher_request *req)
subreq = skcipher_request_ctx(req);
*subreq = *req;
- if (!crypto_simd_usable() ||
+ if (crypto_simd_force_async() || !crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
@@ -317,7 +318,7 @@ static int simd_aead_encrypt(struct aead_request *req)
subreq = aead_request_ctx(req);
*subreq = *req;
- if (!crypto_simd_usable() ||
+ if (crypto_simd_force_async() || !crypto_simd_usable() ||
(in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
@@ -338,7 +339,7 @@ static int simd_aead_decrypt(struct aead_request *req)
subreq = aead_request_ctx(req);
*subreq = *req;
- if (!crypto_simd_usable() ||
+ if (crypto_simd_force_async() || !crypto_simd_usable() ||
(in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
@@ -679,6 +679,12 @@ source "net/Kconfig.debug"
endmenu # "Networking Debugging"
+config DEBUG_CRYPTO
+ bool "Cryptographic API Debugging"
+ depends on DEBUG_KERNEL && CRYPTO && DEBUG_FS
+ help
+ Enable debugging options in the core Cryptographic API.
+
menu "Memory Debugging"
source "mm/Kconfig.debug"
The networking code has multiple users of async crypto (tls, ipsec, macsec), but networking developers often don't have access to crypto accelerators, which limits the testing we can do on the async completion codepaths. I've been using this patch to uncover some bugs [1] in the tls implementation, forcing AESNI (which I do have on my laptop and test machines) to go through cryptd. The intent with the cryptd_delay_ms knob was to try to trigger some issues with async completions and closing tls sockets. Link: https://lore.kernel.org/netdev/cover.1694018970.git.sd@queasysnail.net/ [1] Signed-off-by: Sabrina Dubroca <sd@queasysnail.net> --- crypto/Makefile | 1 + crypto/algapi.c | 2 ++ crypto/cryptd.c | 3 +++ crypto/debugfs.c | 38 ++++++++++++++++++++++++++++++++++++++ crypto/internal.h | 19 +++++++++++++++++++ crypto/simd.c | 9 +++++---- lib/Kconfig.debug | 6 ++++++ 7 files changed, 74 insertions(+), 4 deletions(-) create mode 100644 crypto/debugfs.c