From patchwork Wed Sep 16 12:36:40 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11780543 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 693DF139F for ; Wed, 16 Sep 2020 19:05:45 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 471132064E for ; Wed, 16 Sep 2020 19:05:45 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1600283145; bh=BnJn+ur9GqzFa4zwQue+QDarPbQWiUfb7hyWM/oKasw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=hMOGmnEDh/h1fTi/CKUrsjWtVGn03CGVeiDhMr9eI32OdM91ctxJ68LtCC1sEJBWK nrFQIqPKrVV7CvoFmFobNBx5L6WYgpK9PaghJlG4YjVzXQWMxOteLu4bFyrCyE5hEJ R8JXz87k0wXa6JbjDtKGx3tNKrZ2m0/V/vqddTXQ= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727222AbgIPTFn (ORCPT ); Wed, 16 Sep 2020 15:05:43 -0400 Received: from mail.kernel.org ([198.145.29.99]:37580 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727430AbgIPRqT (ORCPT ); Wed, 16 Sep 2020 13:46:19 -0400 Received: from e123331-lin.nice.arm.com (adsl-245.46.190.88.tellas.gr [46.190.88.245]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 714C522226; Wed, 16 Sep 2020 12:36:52 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1600259813; bh=BnJn+ur9GqzFa4zwQue+QDarPbQWiUfb7hyWM/oKasw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=USoUUYpvWuXTSLWLa71CN1jQmIZO4WPaOyoQddCVZngZUn05Y2xpr2rHzvsEFHVy/ E2cwiBCOuKu1ckbUF08hLYZcbI3KX+AqSuDbJEsvG1d/dDMzxlFdwoPnEJamlNk1QM Phpbcs0Pt2cYHN5WfkjKJYqTCW57jmlFNo5uzbfM= From: Ard Biesheuvel To: linux-crypto@vger.kernel.org Cc: herbert@gondor.apana.org.au, Ard Biesheuvel Subject: [PATCH 1/3] crypto: arm/aes-neonbs - avoid hacks to prevent Thumb2 mode switches Date: Wed, 16 Sep 2020 15:36:40 +0300 Message-Id: <20200916123642.20805-2-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200916123642.20805-1-ardb@kernel.org> References: <20200916123642.20805-1-ardb@kernel.org> Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org Instead of using a homegrown macrofied version of the adr instruction that sets the Thumb bit in the output value, only to ensure that any bx instructions consuming that value will not switch out of Thumb mode when branching, use non-interworking mov (to PC) instructions, which achieve the same thing. Signed-off-by: Ard Biesheuvel --- arch/arm/crypto/aes-neonbs-core.S | 49 +++++++++----------- 1 file changed, 22 insertions(+), 27 deletions(-) diff --git a/arch/arm/crypto/aes-neonbs-core.S b/arch/arm/crypto/aes-neonbs-core.S index cfaed4e67535..07cde1374bb0 100644 --- a/arch/arm/crypto/aes-neonbs-core.S +++ b/arch/arm/crypto/aes-neonbs-core.S @@ -77,11 +77,6 @@ vldr \out\()h, \sym + 8 .endm - .macro __adr, reg, lbl - adr \reg, \lbl -THUMB( orr \reg, \reg, #1 ) - .endm - .macro in_bs_ch, b0, b1, b2, b3, b4, b5, b6, b7 veor \b2, \b2, \b1 veor \b5, \b5, \b6 @@ -629,11 +624,11 @@ ENDPROC(aesbs_decrypt8) push {r4-r6, lr} ldr r5, [sp, #16] // number of blocks -99: __adr ip, 0f +99: adr ip, 0f and lr, r5, #7 cmp r5, #8 sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q0}, [r1]! vld1.8 {q1}, [r1]! @@ -648,11 +643,11 @@ ENDPROC(aesbs_decrypt8) mov rounds, r3 bl \do8 - __adr ip, 1f + adr ip, 1f and lr, r5, #7 cmp r5, #8 sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vst1.8 {\o0}, [r0]! vst1.8 {\o1}, [r0]! @@ -689,12 +684,12 @@ ENTRY(aesbs_cbc_decrypt) push {r4-r6, lr} ldm ip, {r5-r6} // load args 4-5 -99: __adr ip, 0f +99: adr ip, 0f and lr, r5, #7 cmp r5, #8 sub ip, ip, lr, lsl #2 mov lr, r1 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q0}, [lr]! vld1.8 {q1}, [lr]! @@ -718,11 +713,11 @@ ENTRY(aesbs_cbc_decrypt) vmov q14, q8 vmov q15, q8 - __adr ip, 1f + adr ip, 1f and lr, r5, #7 cmp r5, #8 sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q9}, [r1]! vld1.8 {q10}, [r1]! @@ -733,9 +728,9 @@ ENTRY(aesbs_cbc_decrypt) vld1.8 {q15}, [r1]! W(nop) -1: __adr ip, 2f +1: adr ip, 2f sub ip, ip, lr, lsl #3 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 veor q0, q0, q8 vst1.8 {q0}, [r0]! @@ -804,13 +799,13 @@ ENTRY(aesbs_ctr_encrypt) vmov q6, q0 vmov q7, q0 - __adr ip, 0f + adr ip, 0f sub lr, r5, #1 and lr, lr, #7 cmp r5, #8 sub ip, ip, lr, lsl #5 sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 next_ctr q1 next_ctr q2 @@ -824,13 +819,13 @@ ENTRY(aesbs_ctr_encrypt) mov rounds, r3 bl aesbs_encrypt8 - __adr ip, 1f + adr ip, 1f and lr, r5, #7 cmp r5, #8 movgt r4, #0 ldrle r4, [sp, #40] // load final in the last round sub ip, ip, lr, lsl #2 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q8}, [r1]! vld1.8 {q9}, [r1]! @@ -843,10 +838,10 @@ ENTRY(aesbs_ctr_encrypt) 1: bne 2f vld1.8 {q15}, [r1]! -2: __adr ip, 3f +2: adr ip, 3f cmp r5, #8 sub ip, ip, lr, lsl #3 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 veor q0, q0, q8 vst1.8 {q0}, [r0]! @@ -900,12 +895,12 @@ __xts_prepare8: vshr.u64 d30, d31, #7 vmov q12, q14 - __adr ip, 0f + adr ip, 0f and r4, r6, #7 cmp r6, #8 sub ip, ip, r4, lsl #5 mov r4, sp - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q0}, [r1]! next_tweak q12, q14, q15, q13 @@ -973,12 +968,12 @@ ENDPROC(__xts_prepare8) mov rounds, r3 bl \do8 - __adr ip, 0f + adr ip, 0f and lr, r6, #7 cmp r6, #8 sub ip, ip, lr, lsl #2 mov r4, sp - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 vld1.8 {q8}, [r4, :128]! vld1.8 {q9}, [r4, :128]! @@ -989,9 +984,9 @@ ENDPROC(__xts_prepare8) vld1.8 {q14}, [r4, :128]! vld1.8 {q15}, [r4, :128] -0: __adr ip, 1f +0: adr ip, 1f sub ip, ip, lr, lsl #3 - bxlt ip // computed goto if blocks < 8 + movlt pc, ip // computed goto if blocks < 8 veor \o0, \o0, q8 vst1.8 {\o0}, [r0]! From patchwork Wed Sep 16 12:36:41 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11780599 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 5243E112E for ; Wed, 16 Sep 2020 19:12:50 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 350A620732 for ; Wed, 16 Sep 2020 19:12:50 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1600283570; bh=jdhet4pAVNc3fpclGZ8QPI56K1G+WFUaqh6M3wUEr+c=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=sCpjqtF3s55tv+tRdPJHAn5KabQ53Ft3v3OUdNQM3ccO65xWNmuDOaw/1HU5b+6Ir uwB3SyAXsWp2XIjoEgvYCsiUWi28wLUJCE9hOZBo4AnFZ/frKxUvwCw82M2W/lx98T DyXdymBVbdgraw73iElmqTEJ1MpXMUmFOAYmnZJ8= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727333AbgIPTMo (ORCPT ); Wed, 16 Sep 2020 15:12:44 -0400 Received: from mail.kernel.org ([198.145.29.99]:56298 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727326AbgIPRkj (ORCPT ); Wed, 16 Sep 2020 13:40:39 -0400 Received: from e123331-lin.nice.arm.com (adsl-245.46.190.88.tellas.gr [46.190.88.245]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 2E83121741; Wed, 16 Sep 2020 12:36:53 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1600259815; bh=jdhet4pAVNc3fpclGZ8QPI56K1G+WFUaqh6M3wUEr+c=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ToOOTPbvq/tUHYgszQiFk4RgRRzkb0HAgIqSj6e7EFb7GvDwh+RDug0Fu9jzLlkCe plKuiXL+EVfmRQ0PEb0jXIC7UCOHCFMwLlC+jKtbhkTJkksaaeP7YTujcK+9jLSFcL g49Qleb1JKlileDF15aiU1vb+6EJJ1tJ2Nvo7rmE= From: Ard Biesheuvel To: linux-crypto@vger.kernel.org Cc: herbert@gondor.apana.org.au, Ard Biesheuvel Subject: [PATCH 2/3] crypto: arm/aes-neonbs - avoid loading reorder argument on encryption Date: Wed, 16 Sep 2020 15:36:41 +0300 Message-Id: <20200916123642.20805-3-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200916123642.20805-1-ardb@kernel.org> References: <20200916123642.20805-1-ardb@kernel.org> Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org Reordering the tweak is never necessary for encryption, so avoid the argument load on the encryption path. Signed-off-by: Ard Biesheuvel --- arch/arm/crypto/aes-neonbs-core.S | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm/crypto/aes-neonbs-core.S b/arch/arm/crypto/aes-neonbs-core.S index 07cde1374bb0..7d0cc7f226a5 100644 --- a/arch/arm/crypto/aes-neonbs-core.S +++ b/arch/arm/crypto/aes-neonbs-core.S @@ -956,8 +956,7 @@ ENDPROC(__xts_prepare8) push {r4-r8, lr} mov r5, sp // preserve sp ldrd r6, r7, [sp, #24] // get blocks and iv args - ldr r8, [sp, #32] // reorder final tweak? - rsb r8, r8, #1 + rsb r8, ip, #1 sub ip, sp, #128 // make room for 8x tweak bic ip, ip, #0xf // align sp to 16 bytes mov sp, ip @@ -1013,9 +1012,11 @@ ENDPROC(__xts_prepare8) .endm ENTRY(aesbs_xts_encrypt) + mov ip, #0 // never reorder final tweak __xts_crypt aesbs_encrypt8, q0, q1, q4, q6, q3, q7, q2, q5 ENDPROC(aesbs_xts_encrypt) ENTRY(aesbs_xts_decrypt) + ldr ip, [sp, #8] // reorder final tweak? __xts_crypt aesbs_decrypt8, q0, q1, q6, q4, q2, q7, q3, q5 ENDPROC(aesbs_xts_decrypt) From patchwork Wed Sep 16 12:36:42 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11780145 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 110FD112E for ; Wed, 16 Sep 2020 16:51:50 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id E2387229C4 for ; Wed, 16 Sep 2020 16:51:49 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1600275110; bh=8acf8dUH6GiKq8Ed33yHe0O0HLMpSsQM/jVHtUreTmw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=McR4qJWWheo1atSGlNDIfVXcfzNxOjfwy2QdkJOdB5O8xaIrL6kpNgEfv2OJCgbg0 QKw58z7LNaS9JkaBR2lmobkzoiOyPVgDnpp2JF3yqxH+RKRlbGHgqiRw0ejrojah+5 aB2beuDt3QBjz1aS+w/kCLYu9Sx/eb1zRFKa7aZM= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726534AbgIPQvf (ORCPT ); Wed, 16 Sep 2020 12:51:35 -0400 Received: from mail.kernel.org ([198.145.29.99]:46252 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726572AbgIPQd2 (ORCPT ); Wed, 16 Sep 2020 12:33:28 -0400 Received: from e123331-lin.nice.arm.com (adsl-245.46.190.88.tellas.gr [46.190.88.245]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id EA0CB21D90; Wed, 16 Sep 2020 12:36:55 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1600259817; bh=8acf8dUH6GiKq8Ed33yHe0O0HLMpSsQM/jVHtUreTmw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ZsJ97UW/s6BtwtRf12CuusLiINQ/p/sQEI9nZHq6A5PuwcXI8/NcxwEevNogz3jff CKbX96zC2JTUMS0BcYnitK+es5Cr61GDKARUKy4fFVAETk70eDXhUr18k5ux2OM8j7 mpa6HlLeK8+T2RuICcy/QgwvsHUCQx7myE1IsO6Q= From: Ard Biesheuvel To: linux-crypto@vger.kernel.org Cc: herbert@gondor.apana.org.au, Ard Biesheuvel Subject: [PATCH 3/3] crypto: arm/aes-neonbs - use typed init/exit routines for XTS Date: Wed, 16 Sep 2020 15:36:42 +0300 Message-Id: <20200916123642.20805-4-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200916123642.20805-1-ardb@kernel.org> References: <20200916123642.20805-1-ardb@kernel.org> Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org Use the typed skcipher init/exit routines instead of the generic cra_init/_exit routines when instantiating/releasing the XTS skciphers. Signed-off-by: Ard Biesheuvel --- arch/arm/crypto/aes-neonbs-glue.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm/crypto/aes-neonbs-glue.c b/arch/arm/crypto/aes-neonbs-glue.c index e1603ec7e815..bda8bf17631e 100644 --- a/arch/arm/crypto/aes-neonbs-glue.c +++ b/arch/arm/crypto/aes-neonbs-glue.c @@ -314,9 +314,9 @@ static int aesbs_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key, return aesbs_setkey(tfm, in_key, key_len); } -static int xts_init(struct crypto_tfm *tfm) +static int xts_init(struct crypto_skcipher *tfm) { - struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->cts_tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(ctx->cts_tfm)) @@ -329,9 +329,9 @@ static int xts_init(struct crypto_tfm *tfm) return PTR_ERR_OR_ZERO(ctx->tweak_tfm); } -static void xts_exit(struct crypto_tfm *tfm) +static void xts_exit(struct crypto_skcipher *tfm) { - struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm); + struct aesbs_xts_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_cipher(ctx->tweak_tfm); crypto_free_cipher(ctx->cts_tfm); @@ -493,8 +493,6 @@ static struct skcipher_alg aes_algs[] = { { .base.cra_ctxsize = sizeof(struct aesbs_xts_ctx), .base.cra_module = THIS_MODULE, .base.cra_flags = CRYPTO_ALG_INTERNAL, - .base.cra_init = xts_init, - .base.cra_exit = xts_exit, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, @@ -503,6 +501,8 @@ static struct skcipher_alg aes_algs[] = { { .setkey = aesbs_xts_setkey, .encrypt = xts_encrypt, .decrypt = xts_decrypt, + .init = xts_init, + .exit = xts_exit, } }; static struct simd_skcipher_alg *aes_simd_algs[ARRAY_SIZE(aes_algs)];