From patchwork Tue Sep 1 11:48:40 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herbert Xu X-Patchwork-Id: 11748519 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 6EAE613B6 for ; Tue, 1 Sep 2020 14:49:23 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 60C1D22229 for ; Tue, 1 Sep 2020 14:49:23 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727061AbgIAOtW (ORCPT ); Tue, 1 Sep 2020 10:49:22 -0400 Received: from helcar.hmeau.com ([216.24.177.18]:38560 "EHLO fornost.hmeau.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727037AbgIALsn (ORCPT ); Tue, 1 Sep 2020 07:48:43 -0400 Received: from gwarestrin.arnor.me.apana.org.au ([192.168.0.7]) by fornost.hmeau.com with smtp (Exim 4.92 #5 (Debian)) id 1kD4mG-0000VO-VK; Tue, 01 Sep 2020 21:48:42 +1000 Received: by gwarestrin.arnor.me.apana.org.au (sSMTP sendmail emulation); Tue, 01 Sep 2020 21:48:40 +1000 Date: Tue, 1 Sep 2020 21:48:40 +1000 From: Herbert Xu To: Ard Biesheuvel Cc: Linux Crypto Mailing List Subject: [v2 PATCH 1/2] crypto: arm/aes-neonbs - Use generic cbc encryption path Message-ID: <20200901114840.GA2519@gondor.apana.org.au> References: <20200901062804.GA1533@gondor.apana.org.au> <20200901114542.GA2441@gondor.apana.org.au> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20200901114542.GA2441@gondor.apana.org.au> User-Agent: Mutt/1.10.1 (2018-07-13) Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org Since commit b56f5cbc7e08ec7d31c42fc41e5247677f20b143 ("crypto: arm/aes-neonbs - resolve fallback cipher at runtime") the CBC encryption path in aes-neonbs is now identical to that obtained through the cbc template. This means that it can simply call the generic cbc template instead of doing its own thing. This patch removes the custom encryption path and simply invokes the generic cbc template. Signed-off-by: Herbert Xu diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index e6fd32919c81..b324c5500846 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -8,7 +8,6 @@ #include #include #include -#include #include #include #include @@ -49,7 +48,7 @@ struct aesbs_ctx { struct aesbs_cbc_ctx { struct aesbs_ctx key; - struct crypto_cipher *enc_tfm; + struct crypto_skcipher *enc_tfm; }; struct aesbs_xts_ctx { @@ -140,19 +139,23 @@ static int aesbs_cbc_setkey(struct crypto_skcipher *tfm, const u8 *in_key, kernel_neon_end(); memzero_explicit(&rk, sizeof(rk)); - return crypto_cipher_setkey(ctx->enc_tfm, in_key, key_len); + return crypto_skcipher_setkey(ctx->enc_tfm, in_key, key_len); } -static void cbc_encrypt_one(struct crypto_skcipher *tfm, const u8 *src, u8 *dst) +static int cbc_encrypt(struct skcipher_request *req) { + struct skcipher_request *subreq = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_cipher_encrypt_one(ctx->enc_tfm, dst, src); -} + skcipher_request_set_tfm(subreq, ctx->enc_tfm); + skcipher_request_set_callback(subreq, + skcipher_request_flags(req), + NULL, NULL); + skcipher_request_set_crypt(subreq, req->src, req->dst, + req->cryptlen, req->iv); -static int cbc_encrypt(struct skcipher_request *req) -{ - return crypto_cbc_encrypt_walk(req, cbc_encrypt_one); + return crypto_skcipher_encrypt(subreq); } static int cbc_decrypt(struct skcipher_request *req) @@ -183,20 +186,27 @@ static int cbc_decrypt(struct skcipher_request *req) return err; } -static int cbc_init(struct crypto_tfm *tfm) +static int cbc_init(struct crypto_skcipher *tfm) { - struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); + unsigned int reqsize; + + ctx->enc_tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(ctx->enc_tfm)) + return PTR_ERR(ctx->enc_tfm); - ctx->enc_tfm = crypto_alloc_cipher("aes", 0, 0); + reqsize = sizeof(struct skcipher_request); + reqsize += crypto_skcipher_reqsize(ctx->enc_tfm); + crypto_skcipher_set_reqsize(tfm, reqsize); - return PTR_ERR_OR_ZERO(ctx->enc_tfm); + return 0; } -static void cbc_exit(struct crypto_tfm *tfm) +static void cbc_exit(struct crypto_skcipher *tfm) { - struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); - crypto_free_cipher(ctx->enc_tfm); + crypto_free_skcipher(ctx->enc_tfm); } static int aesbs_ctr_setkey_sync(struct crypto_skcipher *tfm, const u8 *in_key, @@ -432,8 +442,6 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_ctxsize = sizeof(struct aesbs_cbc_ctx), .base.cra_module = THIS_MODULE, .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_init = cbc_init, - .base.cra_exit = cbc_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, @@ -442,6 +450,8 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_cbc_setkey, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, + .init = cbc_init, + .exit = cbc_exit, }, { .base.cra_name = "__ctr(aes)", .base.cra_driver_name = "__ctr-aes-neonbs", From patchwork Tue Sep 1 11:49:11 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herbert Xu X-Patchwork-Id: 11748513 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 873A413B6 for ; Tue, 1 Sep 2020 14:48:31 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 7442D2220F for ; Tue, 1 Sep 2020 14:48:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727886AbgIAOPL (ORCPT ); Tue, 1 Sep 2020 10:15:11 -0400 Received: from helcar.hmeau.com ([216.24.177.18]:38566 "EHLO fornost.hmeau.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727786AbgIALt1 (ORCPT ); Tue, 1 Sep 2020 07:49:27 -0400 Received: from gwarestrin.arnor.me.apana.org.au ([192.168.0.7]) by fornost.hmeau.com with smtp (Exim 4.92 #5 (Debian)) id 1kD4ml-0000VV-7Y; Tue, 01 Sep 2020 21:49:12 +1000 Received: by gwarestrin.arnor.me.apana.org.au (sSMTP sendmail emulation); Tue, 01 Sep 2020 21:49:11 +1000 Date: Tue, 1 Sep 2020 21:49:11 +1000 From: Herbert Xu To: Ard Biesheuvel Cc: Linux Crypto Mailing List Subject: [v2 PATCH 2/2] crypto: cbc - Remove cbc.h Message-ID: <20200901114911.GB2519@gondor.apana.org.au> References: <20200901062804.GA1533@gondor.apana.org.au> <20200901114542.GA2441@gondor.apana.org.au> <20200901114840.GA2519@gondor.apana.org.au> MIME-Version: 1.0 Content-Disposition: inline In-Reply-To: <20200901114840.GA2519@gondor.apana.org.au> User-Agent: Mutt/1.10.1 (2018-07-13) Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org Now that crypto/cbc.h is only used by the generic cbc template, we can merge it back into the CBC code. Signed-off-by: Herbert Xu diff --git a/crypto/cbc.c b/crypto/cbc.c index e6f6273a7d39..0d9509dff891 100644 --- a/crypto/cbc.c +++ b/crypto/cbc.c @@ -6,7 +6,6 @@ */ #include -#include #include #include #include @@ -14,34 +13,157 @@ #include #include -static inline void crypto_cbc_encrypt_one(struct crypto_skcipher *tfm, - const u8 *src, u8 *dst) +static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk, + struct crypto_skcipher *skcipher) { - crypto_cipher_encrypt_one(skcipher_cipher_simple(tfm), dst, src); + unsigned int bsize = crypto_skcipher_blocksize(skcipher); + void (*fn)(struct crypto_tfm *, u8 *, const u8 *); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + struct crypto_cipher *cipher; + struct crypto_tfm *tfm; + u8 *iv = walk->iv; + + cipher = skcipher_cipher_simple(skcipher); + tfm = crypto_cipher_tfm(cipher); + fn = crypto_cipher_alg(cipher)->cia_encrypt; + + do { + crypto_xor(iv, src, bsize); + fn(tfm, dst, iv); + memcpy(iv, dst, bsize); + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + return nbytes; +} + +static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk, + struct crypto_skcipher *skcipher) +{ + unsigned int bsize = crypto_skcipher_blocksize(skcipher); + void (*fn)(struct crypto_tfm *, u8 *, const u8 *); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + struct crypto_cipher *cipher; + struct crypto_tfm *tfm; + u8 *iv = walk->iv; + + cipher = skcipher_cipher_simple(skcipher); + tfm = crypto_cipher_tfm(cipher); + fn = crypto_cipher_alg(cipher)->cia_encrypt; + + do { + crypto_xor(src, iv, bsize); + fn(tfm, src, src); + iv = src; + + src += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; } static int crypto_cbc_encrypt(struct skcipher_request *req) { - return crypto_cbc_encrypt_walk(req, crypto_cbc_encrypt_one); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct skcipher_walk walk; + int err; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes) { + if (walk.src.virt.addr == walk.dst.virt.addr) + err = crypto_cbc_encrypt_inplace(&walk, skcipher); + else + err = crypto_cbc_encrypt_segment(&walk, skcipher); + err = skcipher_walk_done(&walk, err); + } + + return err; +} + +static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk, + struct crypto_skcipher *skcipher) +{ + unsigned int bsize = crypto_skcipher_blocksize(skcipher); + void (*fn)(struct crypto_tfm *, u8 *, const u8 *); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 *dst = walk->dst.virt.addr; + struct crypto_cipher *cipher; + struct crypto_tfm *tfm; + u8 *iv = walk->iv; + + cipher = skcipher_cipher_simple(skcipher); + tfm = crypto_cipher_tfm(cipher); + fn = crypto_cipher_alg(cipher)->cia_decrypt; + + do { + fn(tfm, dst, src); + crypto_xor(dst, iv, bsize); + iv = src; + + src += bsize; + dst += bsize; + } while ((nbytes -= bsize) >= bsize); + + memcpy(walk->iv, iv, bsize); + + return nbytes; } -static inline void crypto_cbc_decrypt_one(struct crypto_skcipher *tfm, - const u8 *src, u8 *dst) +static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk, + struct crypto_skcipher *skcipher) { - crypto_cipher_decrypt_one(skcipher_cipher_simple(tfm), dst, src); + unsigned int bsize = crypto_skcipher_blocksize(skcipher); + void (*fn)(struct crypto_tfm *, u8 *, const u8 *); + unsigned int nbytes = walk->nbytes; + u8 *src = walk->src.virt.addr; + u8 last_iv[MAX_CIPHER_BLOCKSIZE]; + struct crypto_cipher *cipher; + struct crypto_tfm *tfm; + + cipher = skcipher_cipher_simple(skcipher); + tfm = crypto_cipher_tfm(cipher); + fn = crypto_cipher_alg(cipher)->cia_decrypt; + + /* Start of the last block. */ + src += nbytes - (nbytes & (bsize - 1)) - bsize; + memcpy(last_iv, src, bsize); + + for (;;) { + fn(tfm, src, src); + if ((nbytes -= bsize) < bsize) + break; + crypto_xor(src, src - bsize, bsize); + src -= bsize; + } + + crypto_xor(src, walk->iv, bsize); + memcpy(walk->iv, last_iv, bsize); + + return nbytes; } static int crypto_cbc_decrypt(struct skcipher_request *req) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct skcipher_walk walk; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { - err = crypto_cbc_decrypt_blocks(&walk, tfm, - crypto_cbc_decrypt_one); + if (walk.src.virt.addr == walk.dst.virt.addr) + err = crypto_cbc_decrypt_inplace(&walk, skcipher); + else + err = crypto_cbc_decrypt_segment(&walk, skcipher); err = skcipher_walk_done(&walk, err); } diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h deleted file mode 100644 index 2b6422db42e2..000000000000 --- a/include/crypto/cbc.h +++ /dev/null @@ -1,141 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * CBC: Cipher Block Chaining mode - * - * Copyright (c) 2016 Herbert Xu - */ - -#ifndef _CRYPTO_CBC_H -#define _CRYPTO_CBC_H - -#include -#include -#include - -static inline int crypto_cbc_encrypt_segment( - struct skcipher_walk *walk, struct crypto_skcipher *tfm, - void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) -{ - unsigned int bsize = crypto_skcipher_blocksize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; - - do { - crypto_xor(iv, src, bsize); - fn(tfm, iv, dst); - memcpy(iv, dst, bsize); - - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); - - return nbytes; -} - -static inline int crypto_cbc_encrypt_inplace( - struct skcipher_walk *walk, struct crypto_skcipher *tfm, - void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) -{ - unsigned int bsize = crypto_skcipher_blocksize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *iv = walk->iv; - - do { - crypto_xor(src, iv, bsize); - fn(tfm, src, src); - iv = src; - - src += bsize; - } while ((nbytes -= bsize) >= bsize); - - memcpy(walk->iv, iv, bsize); - - return nbytes; -} - -static inline int crypto_cbc_encrypt_walk(struct skcipher_request *req, - void (*fn)(struct crypto_skcipher *, - const u8 *, u8 *)) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct skcipher_walk walk; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes) { - if (walk.src.virt.addr == walk.dst.virt.addr) - err = crypto_cbc_encrypt_inplace(&walk, tfm, fn); - else - err = crypto_cbc_encrypt_segment(&walk, tfm, fn); - err = skcipher_walk_done(&walk, err); - } - - return err; -} - -static inline int crypto_cbc_decrypt_segment( - struct skcipher_walk *walk, struct crypto_skcipher *tfm, - void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) -{ - unsigned int bsize = crypto_skcipher_blocksize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 *dst = walk->dst.virt.addr; - u8 *iv = walk->iv; - - do { - fn(tfm, src, dst); - crypto_xor(dst, iv, bsize); - iv = src; - - src += bsize; - dst += bsize; - } while ((nbytes -= bsize) >= bsize); - - memcpy(walk->iv, iv, bsize); - - return nbytes; -} - -static inline int crypto_cbc_decrypt_inplace( - struct skcipher_walk *walk, struct crypto_skcipher *tfm, - void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) -{ - unsigned int bsize = crypto_skcipher_blocksize(tfm); - unsigned int nbytes = walk->nbytes; - u8 *src = walk->src.virt.addr; - u8 last_iv[MAX_CIPHER_BLOCKSIZE]; - - /* Start of the last block. */ - src += nbytes - (nbytes & (bsize - 1)) - bsize; - memcpy(last_iv, src, bsize); - - for (;;) { - fn(tfm, src, src); - if ((nbytes -= bsize) < bsize) - break; - crypto_xor(src, src - bsize, bsize); - src -= bsize; - } - - crypto_xor(src, walk->iv, bsize); - memcpy(walk->iv, last_iv, bsize); - - return nbytes; -} - -static inline int crypto_cbc_decrypt_blocks( - struct skcipher_walk *walk, struct crypto_skcipher *tfm, - void (*fn)(struct crypto_skcipher *, const u8 *, u8 *)) -{ - if (walk->src.virt.addr == walk->dst.virt.addr) - return crypto_cbc_decrypt_inplace(walk, tfm, fn); - else - return crypto_cbc_decrypt_segment(walk, tfm, fn); -} - -#endif /* _CRYPTO_CBC_H */