@@ -61,8 +61,8 @@ struct generic_gcmaes_ctx {
};
struct aesni_xts_ctx {
- u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
- u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
+ struct crypto_aes_ctx tweak_ctx AESNI_ALIGN_ATTR;
+ struct crypto_aes_ctx crypt_ctx AESNI_ALIGN_ATTR;
};
#define GCM_BLOCK_LEN 16
@@ -885,13 +885,12 @@ static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
keylen /= 2;
/* first half of xts-key is for crypt */
- err = aes_set_key_common(aes_ctx(ctx->raw_crypt_ctx), key, keylen);
+ err = aes_set_key_common(aes_ctx(&ctx->crypt_ctx), key, keylen);
if (err)
return err;
/* second half of xts-key is for tweak */
- return aes_set_key_common(aes_ctx(ctx->raw_tweak_ctx), key + keylen,
- keylen);
+ return aes_set_key_common(aes_ctx(&ctx->tweak_ctx), key + keylen, keylen);
}
static int xts_crypt(struct skcipher_request *req, bool encrypt)
@@ -933,7 +932,7 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
/* calculate first value of T */
- aesni_enc(aes_ctx(ctx->raw_tweak_ctx), walk.iv, walk.iv);
+ aesni_enc(aes_ctx(&ctx->tweak_ctx), walk.iv, walk.iv);
while (walk.nbytes > 0) {
int nbytes = walk.nbytes;
@@ -942,11 +941,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
nbytes &= ~(AES_BLOCK_SIZE - 1);
if (encrypt)
- aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_encrypt(aes_ctx(&ctx->crypt_ctx),
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
else
- aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_decrypt(aes_ctx(&ctx->crypt_ctx),
walk.dst.virt.addr, walk.src.virt.addr,
nbytes, walk.iv);
kernel_fpu_end();
@@ -974,11 +973,11 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
kernel_fpu_begin();
if (encrypt)
- aesni_xts_encrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_encrypt(aes_ctx(&ctx->crypt_ctx),
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
else
- aesni_xts_decrypt(aes_ctx(ctx->raw_crypt_ctx),
+ aesni_xts_decrypt(aes_ctx(&ctx->crypt_ctx),
walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes, walk.iv);
kernel_fpu_end();
Currently, every field in struct aesni_xts_ctx is defined as a byte array of the same size as struct crypto_aes_ctx. This data type is obscure and the choice lacks justification. To rectify this, update the field type in struct aesni_xts_ctx to match its actual structure. Suggested-by: Eric Biggers <ebiggers@kernel.org> Link: https://lore.kernel.org/all/ZFWQ4sZEVu%2FLHq+Q@gmail.com/ Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com> Cc: linux-crypto@vger.kernel.org Cc: x86@kernel.org Cc: linux-kernel@vger.kernel.org --- arch/x86/crypto/aesni-intel_glue.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-)