From patchwork Wed Jun 3 06:49:27 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herbert Xu X-Patchwork-Id: 6532571 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: X-Original-To: patchwork-linux-crypto@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id DFDF69F326 for ; Wed, 3 Jun 2015 06:49:34 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id D368120686 for ; Wed, 3 Jun 2015 06:49:33 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id AEE88206B6 for ; Wed, 3 Jun 2015 06:49:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753066AbbFCGtb (ORCPT ); Wed, 3 Jun 2015 02:49:31 -0400 Received: from helcar.hengli.com.au ([209.40.204.226]:54445 "EHLO helcar.hengli.com.au" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753022AbbFCGta (ORCPT ); Wed, 3 Jun 2015 02:49:30 -0400 Received: from gondolin.me.apana.org.au ([192.168.0.6]) by norbury.hengli.com.au with esmtp (Exim 4.80 #3 (Debian)) id 1Z02UR-0005Bx-M4; Wed, 03 Jun 2015 16:49:27 +1000 Received: from herbert by gondolin.me.apana.org.au with local (Exim 4.80) (envelope-from ) id 1Z02UR-0001Gg-AV; Wed, 03 Jun 2015 14:49:27 +0800 Subject: [PATCH 4/8] crypto: seqiv - Move IV seeding into init function References: <20150603064641.GA4655@gondor.apana.org.au> To: Linux Crypto Mailing List , Stephan Mueller Message-Id: From: Herbert Xu Date: Wed, 03 Jun 2015 14:49:27 +0800 Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP We currently do the IV seeding on the first givencrypt call in order to conserve entropy. However, this does not work with DRBG which cannot be called from interrupt context. In fact, with DRBG we don't need to conserve entropy anyway. So this patch moves the seeding into the init function. Signed-off-by: Herbert Xu --- crypto/seqiv.c | 113 +++++++-------------------------------------------------- 1 file changed, 15 insertions(+), 98 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe linux-crypto" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/crypto/seqiv.c b/crypto/seqiv.c index 2333974..42e4ee5 100644 --- a/crypto/seqiv.c +++ b/crypto/seqiv.c @@ -474,98 +474,6 @@ static int seqiv_aead_decrypt(struct aead_request *req) return crypto_aead_decrypt(subreq); } -static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req) -{ - struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); - struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); - int err = 0; - - spin_lock_bh(&ctx->lock); - if (crypto_ablkcipher_crt(geniv)->givencrypt != seqiv_givencrypt_first) - goto unlock; - - crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt; - err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, - crypto_ablkcipher_ivsize(geniv)); - -unlock: - spin_unlock_bh(&ctx->lock); - - if (err) - return err; - - return seqiv_givencrypt(req); -} - -static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req) -{ - struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); - struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); - int err = 0; - - spin_lock_bh(&ctx->lock); - if (crypto_aead_crt(geniv)->givencrypt != seqiv_aead_givencrypt_first) - goto unlock; - - crypto_aead_crt(geniv)->givencrypt = seqiv_aead_givencrypt; - err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, - crypto_aead_ivsize(geniv)); - -unlock: - spin_unlock_bh(&ctx->lock); - - if (err) - return err; - - return seqiv_aead_givencrypt(req); -} - -static int seqniv_aead_encrypt_first(struct aead_request *req) -{ - struct crypto_aead *geniv = crypto_aead_reqtfm(req); - struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); - int err = 0; - - spin_lock_bh(&ctx->geniv.lock); - if (geniv->encrypt != seqniv_aead_encrypt_first) - goto unlock; - - geniv->encrypt = seqniv_aead_encrypt; - err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, - crypto_aead_ivsize(geniv)); - -unlock: - spin_unlock_bh(&ctx->geniv.lock); - - if (err) - return err; - - return seqniv_aead_encrypt(req); -} - -static int seqiv_aead_encrypt_first(struct aead_request *req) -{ - struct crypto_aead *geniv = crypto_aead_reqtfm(req); - struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); - int err = 0; - - spin_lock_bh(&ctx->geniv.lock); - if (geniv->encrypt != seqiv_aead_encrypt_first) - goto unlock; - - geniv->encrypt = seqiv_aead_encrypt; - err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, - crypto_aead_ivsize(geniv)); - -unlock: - spin_unlock_bh(&ctx->geniv.lock); - - if (err) - return err; - - return seqiv_aead_encrypt(req); -} - static int seqiv_init(struct crypto_tfm *tfm) { struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); @@ -575,7 +483,9 @@ static int seqiv_init(struct crypto_tfm *tfm) tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); - return skcipher_geniv_init(tfm); + return crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_ablkcipher_ivsize(geniv)) ?: + skcipher_geniv_init(tfm); } static int seqiv_old_aead_init(struct crypto_tfm *tfm) @@ -588,7 +498,9 @@ static int seqiv_old_aead_init(struct crypto_tfm *tfm) crypto_aead_set_reqsize(__crypto_aead_cast(tfm), sizeof(struct aead_request)); - return aead_geniv_init(tfm); + return crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)) ?: + aead_geniv_init(tfm); } static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize) @@ -601,6 +513,11 @@ static int seqiv_aead_init_common(struct crypto_tfm *tfm, unsigned int reqsize) crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); + err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, + crypto_aead_ivsize(geniv)); + if (err) + goto out; + ctx->null = crypto_get_default_null_skcipher(); err = PTR_ERR(ctx->null); if (IS_ERR(ctx->null)) @@ -654,7 +571,7 @@ static int seqiv_ablkcipher_create(struct crypto_template *tmpl, if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) goto free_inst; - inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt_first; + inst->alg.cra_ablkcipher.givencrypt = seqiv_givencrypt; inst->alg.cra_init = seqiv_init; inst->alg.cra_exit = skcipher_geniv_exit; @@ -685,7 +602,7 @@ static int seqiv_old_aead_create(struct crypto_template *tmpl, if (inst->alg.cra_aead.ivsize < sizeof(u64)) goto free_inst; - inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt_first; + inst->alg.cra_aead.givencrypt = seqiv_aead_givencrypt; inst->alg.cra_init = seqiv_old_aead_init; inst->alg.cra_exit = aead_geniv_exit; @@ -732,7 +649,7 @@ static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) if (inst->alg.ivsize != sizeof(u64)) goto free_inst; - inst->alg.encrypt = seqiv_aead_encrypt_first; + inst->alg.encrypt = seqiv_aead_encrypt; inst->alg.decrypt = seqiv_aead_decrypt; inst->alg.base.cra_init = seqiv_aead_init; @@ -804,7 +721,7 @@ static int seqniv_create(struct crypto_template *tmpl, struct rtattr **tb) if (inst->alg.ivsize != sizeof(u64)) goto free_inst; - inst->alg.encrypt = seqniv_aead_encrypt_first; + inst->alg.encrypt = seqniv_aead_encrypt; inst->alg.decrypt = seqniv_aead_decrypt; inst->alg.base.cra_init = seqniv_aead_init;