diff mbox series

[2/8] crypto: arm64/gcm-aes-ce - fix no-NEON fallback code

Message ID 20190313051252.2917-3-ebiggers@kernel.org (mailing list archive)
State Accepted
Delegated to: Herbert Xu
Headers show
Series crypto: test the !may_use_simd() fallback code | expand

Commit Message

Eric Biggers March 13, 2019, 5:12 a.m. UTC
From: Eric Biggers <ebiggers@google.com>

The arm64 gcm-aes-ce algorithm is failing the extra crypto self-tests
following my patches to test the !may_use_simd() code paths, which
previously were untested.  The problem is that in the !may_use_simd()
case, an odd number of AES blocks can be processed within each step of
the skcipher_walk.  However, the skcipher_walk is being done with a
"stride" of 2 blocks and is advanced by an even number of blocks after
each step.  This causes the encryption to produce the wrong ciphertext
and authentication tag, and causes the decryption to incorrectly fail.

Fix it by only processing an even number of blocks per step.

Fixes: c2b24c36e0a3 ("crypto: arm64/aes-gcm-ce - fix scatterwalk API violation")
Fixes: 71e52c278c54 ("crypto: arm64/aes-ce-gcm - operate on two input blocks at a time")
Cc: <stable@vger.kernel.org> # v4.19+
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
 arch/arm64/crypto/ghash-ce-glue.c | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

Comments

Ard Biesheuvel March 13, 2019, 10:29 a.m. UTC | #1
On Wed, 13 Mar 2019 at 06:15, Eric Biggers <ebiggers@kernel.org> wrote:
>
> From: Eric Biggers <ebiggers@google.com>
>
> The arm64 gcm-aes-ce algorithm is failing the extra crypto self-tests
> following my patches to test the !may_use_simd() code paths, which
> previously were untested.  The problem is that in the !may_use_simd()
> case, an odd number of AES blocks can be processed within each step of
> the skcipher_walk.  However, the skcipher_walk is being done with a
> "stride" of 2 blocks and is advanced by an even number of blocks after
> each step.  This causes the encryption to produce the wrong ciphertext
> and authentication tag, and causes the decryption to incorrectly fail.
>
> Fix it by only processing an even number of blocks per step.
>
> Fixes: c2b24c36e0a3 ("crypto: arm64/aes-gcm-ce - fix scatterwalk API violation")
> Fixes: 71e52c278c54 ("crypto: arm64/aes-ce-gcm - operate on two input blocks at a time")
> Cc: <stable@vger.kernel.org> # v4.19+
> Signed-off-by: Eric Biggers <ebiggers@google.com>

Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>


> ---
>  arch/arm64/crypto/ghash-ce-glue.c | 10 ++++++----
>  1 file changed, 6 insertions(+), 4 deletions(-)
>
> diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
> index 791ad422c427..089b09286da7 100644
> --- a/arch/arm64/crypto/ghash-ce-glue.c
> +++ b/arch/arm64/crypto/ghash-ce-glue.c
> @@ -473,9 +473,11 @@ static int gcm_encrypt(struct aead_request *req)
>                 put_unaligned_be32(2, iv + GCM_IV_SIZE);
>
>                 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
> -                       int blocks = walk.nbytes / AES_BLOCK_SIZE;
> +                       const int blocks =
> +                               walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
>                         u8 *dst = walk.dst.virt.addr;
>                         u8 *src = walk.src.virt.addr;
> +                       int remaining = blocks;
>
>                         do {
>                                 __aes_arm64_encrypt(ctx->aes_key.key_enc,
> @@ -485,9 +487,9 @@ static int gcm_encrypt(struct aead_request *req)
>
>                                 dst += AES_BLOCK_SIZE;
>                                 src += AES_BLOCK_SIZE;
> -                       } while (--blocks > 0);
> +                       } while (--remaining > 0);
>
> -                       ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
> +                       ghash_do_update(blocks, dg,
>                                         walk.dst.virt.addr, &ctx->ghash_key,
>                                         NULL, pmull_ghash_update_p64);
>
> @@ -609,7 +611,7 @@ static int gcm_decrypt(struct aead_request *req)
>                 put_unaligned_be32(2, iv + GCM_IV_SIZE);
>
>                 while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
> -                       int blocks = walk.nbytes / AES_BLOCK_SIZE;
> +                       int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
>                         u8 *dst = walk.dst.virt.addr;
>                         u8 *src = walk.src.virt.addr;
>
> --
> 2.21.0
>
diff mbox series

Patch

diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c
index 791ad422c427..089b09286da7 100644
--- a/arch/arm64/crypto/ghash-ce-glue.c
+++ b/arch/arm64/crypto/ghash-ce-glue.c
@@ -473,9 +473,11 @@  static int gcm_encrypt(struct aead_request *req)
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
 		while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
-			int blocks = walk.nbytes / AES_BLOCK_SIZE;
+			const int blocks =
+				walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 			u8 *dst = walk.dst.virt.addr;
 			u8 *src = walk.src.virt.addr;
+			int remaining = blocks;
 
 			do {
 				__aes_arm64_encrypt(ctx->aes_key.key_enc,
@@ -485,9 +487,9 @@  static int gcm_encrypt(struct aead_request *req)
 
 				dst += AES_BLOCK_SIZE;
 				src += AES_BLOCK_SIZE;
-			} while (--blocks > 0);
+			} while (--remaining > 0);
 
-			ghash_do_update(walk.nbytes / AES_BLOCK_SIZE, dg,
+			ghash_do_update(blocks, dg,
 					walk.dst.virt.addr, &ctx->ghash_key,
 					NULL, pmull_ghash_update_p64);
 
@@ -609,7 +611,7 @@  static int gcm_decrypt(struct aead_request *req)
 		put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
 		while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
-			int blocks = walk.nbytes / AES_BLOCK_SIZE;
+			int blocks = walk.nbytes / (2 * AES_BLOCK_SIZE) * 2;
 			u8 *dst = walk.dst.virt.addr;
 			u8 *src = walk.src.virt.addr;