Message ID | 20180620190408.45104-10-keescook@chromium.org (mailing list archive) |
---|---|
State | Changes Requested, archived |
Delegated to: | Mike Snitzer |
Headers | show |
On Wed, Jun 20, 2018 at 12:04:06PM -0700, Kees Cook wrote: > In the quest to remove all stack VLA usage from the kernel[1], this uses > the newly defined max alignment to perform unaligned hashing to avoid > VLAs, and drops the helper function while adding sanity checks on the > resulting buffer sizes. > > [1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com > > Signed-off-by: Kees Cook <keescook@chromium.org> > --- > crypto/shash.c | 21 ++++++++++----------- > 1 file changed, 10 insertions(+), 11 deletions(-) > > diff --git a/crypto/shash.c b/crypto/shash.c > index ab6902c6dae7..1bb58209330a 100644 > --- a/crypto/shash.c > +++ b/crypto/shash.c > @@ -73,13 +73,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, > } > EXPORT_SYMBOL_GPL(crypto_shash_setkey); > > -static inline unsigned int shash_align_buffer_size(unsigned len, > - unsigned long mask) > -{ > - typedef u8 __aligned_largest u8_aligned; > - return len + (mask & ~(__alignof__(u8_aligned) - 1)); > -} > - > static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, > unsigned int len) > { > @@ -88,11 +81,14 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, > unsigned long alignmask = crypto_shash_alignmask(tfm); > unsigned int unaligned_len = alignmask + 1 - > ((unsigned long)data & alignmask); > - u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)] > - __aligned_largest; > + u8 ubuf[CRYPTO_ALG_MAX_ALIGNMASK] > + __aligned(CRYPTO_ALG_MAX_ALIGNMASK + 1); > u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); > int err; Are you sure that __attribute__((aligned(64))) works correctly on stack variables on all architectures? And if it is expected to work, then why is the buffer still aligned by hand on the very next line? > > + if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf))) > + return -EINVAL; > + > if (unaligned_len > len) > unaligned_len = len; > > @@ -124,11 +120,14 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out) > unsigned long alignmask = crypto_shash_alignmask(tfm); > struct shash_alg *shash = crypto_shash_alg(tfm); > unsigned int ds = crypto_shash_digestsize(tfm); > - u8 ubuf[shash_align_buffer_size(ds, alignmask)] > - __aligned_largest; > + u8 ubuf[SHASH_MAX_DIGESTSIZE] > + __aligned(CRYPTO_ALG_MAX_ALIGNMASK + 1); > u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); > int err; Same questions here. > > + if (WARN_ON(buf + ds > ubuf + sizeof(ubuf))) > + return -EINVAL; > + > err = shash->final(desc, buf); > if (err) > goto out; > -- - Eric -- dm-devel mailing list dm-devel@redhat.com https://www.redhat.com/mailman/listinfo/dm-devel
On Wed, Jun 20, 2018 at 4:57 PM, Eric Biggers <ebiggers3@gmail.com> wrote: > On Wed, Jun 20, 2018 at 12:04:06PM -0700, Kees Cook wrote: >> In the quest to remove all stack VLA usage from the kernel[1], this uses >> the newly defined max alignment to perform unaligned hashing to avoid >> VLAs, and drops the helper function while adding sanity checks on the >> resulting buffer sizes. >> >> [1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com >> >> Signed-off-by: Kees Cook <keescook@chromium.org> >> --- >> crypto/shash.c | 21 ++++++++++----------- >> 1 file changed, 10 insertions(+), 11 deletions(-) >> >> diff --git a/crypto/shash.c b/crypto/shash.c >> index ab6902c6dae7..1bb58209330a 100644 >> --- a/crypto/shash.c >> +++ b/crypto/shash.c >> @@ -73,13 +73,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, >> } >> EXPORT_SYMBOL_GPL(crypto_shash_setkey); >> >> -static inline unsigned int shash_align_buffer_size(unsigned len, >> - unsigned long mask) >> -{ >> - typedef u8 __aligned_largest u8_aligned; >> - return len + (mask & ~(__alignof__(u8_aligned) - 1)); >> -} >> - >> static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, >> unsigned int len) >> { >> @@ -88,11 +81,14 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, >> unsigned long alignmask = crypto_shash_alignmask(tfm); >> unsigned int unaligned_len = alignmask + 1 - >> ((unsigned long)data & alignmask); >> - u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)] >> - __aligned_largest; >> + u8 ubuf[CRYPTO_ALG_MAX_ALIGNMASK] >> + __aligned(CRYPTO_ALG_MAX_ALIGNMASK + 1); >> u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); >> int err; > > Are you sure that __attribute__((aligned(64))) works correctly on stack > variables on all architectures? > > And if it is expected to work, then why is the buffer still aligned by hand on > the very next line? I really don't know -- the existing code was doing both the __align bit and the manual alignment, so I was trying to simplify it while removing the VLA. I'm totally open to suggestions here. BTW, these are also the only users of __aligned_largest() in the kernel, and the only use of unsized __attribute__((aligned)) -Kees
diff --git a/crypto/shash.c b/crypto/shash.c index ab6902c6dae7..1bb58209330a 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -73,13 +73,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key, } EXPORT_SYMBOL_GPL(crypto_shash_setkey); -static inline unsigned int shash_align_buffer_size(unsigned len, - unsigned long mask) -{ - typedef u8 __aligned_largest u8_aligned; - return len + (mask & ~(__alignof__(u8_aligned) - 1)); -} - static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, unsigned int len) { @@ -88,11 +81,14 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, unsigned long alignmask = crypto_shash_alignmask(tfm); unsigned int unaligned_len = alignmask + 1 - ((unsigned long)data & alignmask); - u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)] - __aligned_largest; + u8 ubuf[CRYPTO_ALG_MAX_ALIGNMASK] + __aligned(CRYPTO_ALG_MAX_ALIGNMASK + 1); u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); int err; + if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf))) + return -EINVAL; + if (unaligned_len > len) unaligned_len = len; @@ -124,11 +120,14 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out) unsigned long alignmask = crypto_shash_alignmask(tfm); struct shash_alg *shash = crypto_shash_alg(tfm); unsigned int ds = crypto_shash_digestsize(tfm); - u8 ubuf[shash_align_buffer_size(ds, alignmask)] - __aligned_largest; + u8 ubuf[SHASH_MAX_DIGESTSIZE] + __aligned(CRYPTO_ALG_MAX_ALIGNMASK + 1); u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1); int err; + if (WARN_ON(buf + ds > ubuf + sizeof(ubuf))) + return -EINVAL; + err = shash->final(desc, buf); if (err) goto out;
In the quest to remove all stack VLA usage from the kernel[1], this uses the newly defined max alignment to perform unaligned hashing to avoid VLAs, and drops the helper function while adding sanity checks on the resulting buffer sizes. [1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com Signed-off-by: Kees Cook <keescook@chromium.org> --- crypto/shash.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-)