diff mbox series

[v3,02/36] crypto_pool: Add crypto_pool_reserve_scratch()

Message ID 20221027204347.529913-3-dima@arista.com (mailing list archive)
State Changes Requested
Headers show
Series net/tcp: Add TCP-AO support | expand

Checks

Context Check Description
netdev/tree_selection success Guessed tree name to be net-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count fail Series longer than 15 patches (and no cover letter)
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/cc_maintainers success CCed 3 of 3 maintainers
netdev/build_clang success Errors and warnings before: 2 this patch: 1
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 1 this patch: 1
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 142 lines checked
netdev/kdoc fail Errors and warnings before: 0 this patch: 1
netdev/source_inline success Was 0 now: 0

Commit Message

Dmitry Safonov Oct. 27, 2022, 8:43 p.m. UTC
Instead of having build-time hardcoded constant, reallocate scratch
area, if needed by user. Different algos, different users may need
different size of temp per-CPU buffer. Only up-sizing supported for
simplicity.

Signed-off-by: Dmitry Safonov <dima@arista.com>
---
 crypto/Kconfig        |  6 ++++
 crypto/crypto_pool.c  | 77 ++++++++++++++++++++++++++++++++++---------
 include/crypto/pool.h |  3 +-
 3 files changed, 69 insertions(+), 17 deletions(-)

Comments

kernel test robot Oct. 28, 2022, 7:06 a.m. UTC | #1
Hi Dmitry,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on 4dc12f37a8e98e1dca5521c14625c869537b50b6]

url:    https://github.com/intel-lab-lkp/linux/commits/Dmitry-Safonov/net-tcp-Add-TCP-AO-support/20221028-045452
base:   4dc12f37a8e98e1dca5521c14625c869537b50b6
patch link:    https://lore.kernel.org/r/20221027204347.529913-3-dima%40arista.com
patch subject: [PATCH v3 02/36] crypto_pool: Add crypto_pool_reserve_scratch()
config: sparc-allyesconfig
compiler: sparc64-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/77f338ef8d76766e86b5b0b722a2a54fd2973c93
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Dmitry-Safonov/net-tcp-Add-TCP-AO-support/20221028-045452
        git checkout 77f338ef8d76766e86b5b0b722a2a54fd2973c93
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=sparc SHELL=/bin/bash

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> crypto/crypto_pool.c:31: warning: expecting prototype for crypto_pool_reserve_scratch(). Prototype was for FREE_BATCH_SIZE() instead


vim +31 crypto/crypto_pool.c

    25	
    26	/* Slow-path */
    27	/**
    28	 * crypto_pool_reserve_scratch - re-allocates scratch buffer, slow-path
    29	 * @size: request size for the scratch/temp buffer
    30	 */
  > 31	#define FREE_BATCH_SIZE		64
    32	int crypto_pool_reserve_scratch(unsigned long size)
    33	{
    34		void *free_batch[FREE_BATCH_SIZE];
    35		int cpu, err = 0;
    36		unsigned int i = 0;
    37	
    38		mutex_lock(&cpool_mutex);
    39		if (size == scratch_size) {
    40			for_each_possible_cpu(cpu) {
    41				if (per_cpu(crypto_pool_scratch, cpu))
    42					continue;
    43				goto allocate_scratch;
    44			}
    45			mutex_unlock(&cpool_mutex);
    46			return 0;
    47		}
    48	allocate_scratch:
    49		size = max(size, scratch_size);
    50		cpus_read_lock();
    51		for_each_possible_cpu(cpu) {
    52			void *scratch, *old_scratch;
    53	
    54			scratch = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
    55			if (!scratch) {
    56				err = -ENOMEM;
    57				break;
    58			}
    59	
    60			old_scratch = per_cpu(crypto_pool_scratch, cpu);
    61			/* Pairs with crypto_pool_get() */
    62			WRITE_ONCE(*per_cpu_ptr(&crypto_pool_scratch, cpu), scratch);
    63			if (!cpu_online(cpu)) {
    64				kfree(old_scratch);
    65				continue;
    66			}
    67			free_batch[i++] = old_scratch;
    68			if (i == FREE_BATCH_SIZE) {
    69				cpus_read_unlock();
    70				synchronize_rcu();
    71				while (i > 0)
    72					kfree(free_batch[--i]);
    73				cpus_read_lock();
    74			}
    75		}
    76		cpus_read_unlock();
    77		if (!err)
    78			scratch_size = size;
    79		mutex_unlock(&cpool_mutex);
    80	
    81		if (i > 0) {
    82			synchronize_rcu();
    83			while (i > 0)
    84				kfree(free_batch[--i]);
    85		}
    86		return err;
    87	}
    88	EXPORT_SYMBOL_GPL(crypto_pool_reserve_scratch);
    89
diff mbox series

Patch

diff --git a/crypto/Kconfig b/crypto/Kconfig
index 768d331e626b..e002cd321e79 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1397,6 +1397,12 @@  config CRYPTO_POOL
 	help
 	  Per-CPU pool of crypto requests ready for usage in atomic contexts.
 
+config CRYPTO_POOL_DEFAULT_SCRATCH_SIZE
+	hex "Per-CPU default scratch area size"
+	depends on CRYPTO_POOL
+	default 0x100
+	range 0x100 0x10000
+
 if !KMSAN # avoid false positives from assembly
 if ARM
 source "arch/arm/crypto/Kconfig"
diff --git a/crypto/crypto_pool.c b/crypto/crypto_pool.c
index 37131952c5a7..2f1deb3f5218 100644
--- a/crypto/crypto_pool.c
+++ b/crypto/crypto_pool.c
@@ -1,13 +1,14 @@ 
 // SPDX-License-Identifier: GPL-2.0-or-later
 
 #include <crypto/pool.h>
+#include <linux/cpu.h>
 #include <linux/kref.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/percpu.h>
 #include <linux/workqueue.h>
 
-static unsigned long scratch_size = DEFAULT_CRYPTO_POOL_SCRATCH_SZ;
+static unsigned long scratch_size = CONFIG_CRYPTO_POOL_DEFAULT_SCRATCH_SIZE;
 static DEFINE_PER_CPU(void *, crypto_pool_scratch);
 
 struct crypto_pool_entry {
@@ -22,26 +23,69 @@  static struct crypto_pool_entry cpool[CPOOL_SIZE];
 static unsigned int cpool_populated;
 static DEFINE_MUTEX(cpool_mutex);
 
-static int crypto_pool_scratch_alloc(void)
+/* Slow-path */
+/**
+ * crypto_pool_reserve_scratch - re-allocates scratch buffer, slow-path
+ * @size: request size for the scratch/temp buffer
+ */
+#define FREE_BATCH_SIZE		64
+int crypto_pool_reserve_scratch(unsigned long size)
 {
-	int cpu;
-
-	lockdep_assert_held(&cpool_mutex);
+	void *free_batch[FREE_BATCH_SIZE];
+	int cpu, err = 0;
+	unsigned int i = 0;
 
+	mutex_lock(&cpool_mutex);
+	if (size == scratch_size) {
+		for_each_possible_cpu(cpu) {
+			if (per_cpu(crypto_pool_scratch, cpu))
+				continue;
+			goto allocate_scratch;
+		}
+		mutex_unlock(&cpool_mutex);
+		return 0;
+	}
+allocate_scratch:
+	size = max(size, scratch_size);
+	cpus_read_lock();
 	for_each_possible_cpu(cpu) {
-		void *scratch = per_cpu(crypto_pool_scratch, cpu);
+		void *scratch, *old_scratch;
 
-		if (scratch)
+		scratch = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
+		if (!scratch) {
+			err = -ENOMEM;
+			break;
+		}
+
+		old_scratch = per_cpu(crypto_pool_scratch, cpu);
+		/* Pairs with crypto_pool_get() */
+		WRITE_ONCE(*per_cpu_ptr(&crypto_pool_scratch, cpu), scratch);
+		if (!cpu_online(cpu)) {
+			kfree(old_scratch);
 			continue;
+		}
+		free_batch[i++] = old_scratch;
+		if (i == FREE_BATCH_SIZE) {
+			cpus_read_unlock();
+			synchronize_rcu();
+			while (i > 0)
+				kfree(free_batch[--i]);
+			cpus_read_lock();
+		}
+	}
+	cpus_read_unlock();
+	if (!err)
+		scratch_size = size;
+	mutex_unlock(&cpool_mutex);
 
-		scratch = kmalloc_node(scratch_size, GFP_KERNEL,
-				       cpu_to_node(cpu));
-		if (!scratch)
-			return -ENOMEM;
-		per_cpu(crypto_pool_scratch, cpu) = scratch;
+	if (i > 0) {
+		synchronize_rcu();
+		while (i > 0)
+			kfree(free_batch[--i]);
 	}
-	return 0;
+	return err;
 }
+EXPORT_SYMBOL_GPL(crypto_pool_reserve_scratch);
 
 static void crypto_pool_scratch_free(void)
 {
@@ -138,7 +182,6 @@  int crypto_pool_alloc_ahash(const char *alg)
 
 	/* slow-path */
 	mutex_lock(&cpool_mutex);
-
 	for (i = 0; i < cpool_populated; i++) {
 		if (cpool[i].alg && !strcmp(cpool[i].alg, alg)) {
 			if (kref_read(&cpool[i].kref) > 0) {
@@ -263,7 +306,11 @@  int crypto_pool_get(unsigned int id, struct crypto_pool *c)
 		return -EINVAL;
 	}
 	ret->req = *this_cpu_ptr(cpool[id].req);
-	ret->base.scratch = this_cpu_read(crypto_pool_scratch);
+	/*
+	 * Pairs with crypto_pool_reserve_scratch(), scartch area is
+	 * valid (allocated) until crypto_pool_put().
+	 */
+	ret->base.scratch = READ_ONCE(*this_cpu_ptr(&crypto_pool_scratch));
 	return 0;
 }
 EXPORT_SYMBOL_GPL(crypto_pool_get);
diff --git a/include/crypto/pool.h b/include/crypto/pool.h
index 2c61aa45faff..c7d817860cc3 100644
--- a/include/crypto/pool.h
+++ b/include/crypto/pool.h
@@ -4,8 +4,6 @@ 
 
 #include <crypto/hash.h>
 
-#define DEFAULT_CRYPTO_POOL_SCRATCH_SZ	128
-
 struct crypto_pool {
 	void *scratch;
 };
@@ -20,6 +18,7 @@  struct crypto_pool_ahash {
 	struct ahash_request *req;
 };
 
+int crypto_pool_reserve_scratch(unsigned long size);
 int crypto_pool_alloc_ahash(const char *alg);
 void crypto_pool_add(unsigned int id);
 void crypto_pool_release(unsigned int id);