@@ -66,10 +66,11 @@ struct ahash_request {
#define AHASH_MAX_DIGESTSIZE 512
#define AHASH_MAX_STATESIZE 512
+#define AHASH_MAX_REQSIZE 808
#define AHASH_REQUEST_ON_STACK(name, ahash) \
char __##name##_desc[sizeof(struct ahash_request) + \
- crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
+ AHASH_MAX_REQSIZE] CRYPTO_MINALIGN_ATTR; \
struct ahash_request *name = (void *)__##name##_desc
/**
@@ -142,6 +142,7 @@ static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
unsigned int reqsize)
{
+ BUG_ON(reqsize > AHASH_MAX_REQSIZE);
tfm->reqsize = reqsize;
}
In the quest to remove all stack VLA usage from the kernel[1], this caps the ahash request size similar to the other limits and adds a sanity check at registration. Unfortunately, these reqsizes can be very large. Looking at instrumented tcrypt output, the largest is for sha512: crypt: testing sha512 crypto_ahash_set_reqsize: 528 crypto_ahash_set_reqsize: 728 crypto_ahash_set_reqsize: 808 [1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Signed-off-by: Kees Cook <keescook@chromium.org> --- include/crypto/hash.h | 3 ++- include/crypto/internal/hash.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-)