@@ -67,7 +67,7 @@ static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
{
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
unsigned int size = crypto_tfm_alg_blocksize(tfm);
- u8 buffer[size + alignmask];
+ u8 buffer[MAX_BLOCKSIZE + MAX_ALIGNMASK];
u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(tmp, src, size);
@@ -105,9 +105,14 @@ static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
+ const unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
+ const unsigned int size = crypto_tfm_alg_blocksize(tfm);
struct cipher_tfm *ops = &tfm->crt_cipher;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
+ if (size > MAX_BLOCKSIZE || alignmask > MAX_ALIGNMASK)
+ return -EINVAL;
+
ops->cit_setkey = setkey;
ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ?
cipher_encrypt_unaligned : cipher->cia_encrypt;
We avoid a VLA[1] by always allocating MAX_BLOCKSIZE + MAX_ALIGNMASK bytes. We also check the selected cipher at initialization time, if it doesn't comply with these limits, the initialization will fail. [1] http://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Signed-off-by: Salvatore Mesoraca <s.mesoraca16@gmail.com> --- crypto/cipher.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)