From patchwork Wed Jan 12 13:12:02 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Jason A. Donenfeld" X-Patchwork-Id: 12711371 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id A9B74C433F5 for ; Wed, 12 Jan 2022 13:12:25 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1353393AbiALNMX (ORCPT ); Wed, 12 Jan 2022 08:12:23 -0500 Received: from dfw.source.kernel.org ([139.178.84.217]:44440 "EHLO dfw.source.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S240733AbiALNMV (ORCPT ); Wed, 12 Jan 2022 08:12:21 -0500 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 5C35161900; Wed, 12 Jan 2022 13:12:21 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 17432C36AEC; Wed, 12 Jan 2022 13:12:20 +0000 (UTC) Authentication-Results: smtp.kernel.org; dkim=pass (1024-bit key) header.d=zx2c4.com header.i=@zx2c4.com header.b="RO0j5js+" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=zx2c4.com; s=20210105; t=1641993139; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=EZeEOsQzp7A9Xxz0RniXuFFSOX71sbni/W3rqZZSC8k=; b=RO0j5js+KTkccvR2BacDyDyuCKDTszFR+JAzmNODT/IoOkv3tjBia9v+SKmI5sDWu8NFxh ypSIIz5lmksmdIIJTUPKaYfGCAM4lKM9XmUD2DsohuztEGjnmfbJ1UWarm2OaLHXfqZiqV MHXTpWclNI8UrWxUT0y0AeyhOaaqNls= Received: by mail.zx2c4.com (ZX2C4 Mail Server) with ESMTPSA id 32ecdc37 (TLSv1.3:AEAD-AES256-GCM-SHA384:256:NO); Wed, 12 Jan 2022 13:12:19 +0000 (UTC) From: "Jason A. Donenfeld" To: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Cc: "Jason A. Donenfeld" , Geert Uytterhoeven , Herbert Xu , Ard Biesheuvel , Jean-Philippe Aumasson , linux-crypto@vger.kernel.org Subject: [PATCH RFC v1 1/3] bpf: move from sha1 to blake2s in tag calculation Date: Wed, 12 Jan 2022 14:12:02 +0100 Message-Id: <20220112131204.800307-2-Jason@zx2c4.com> In-Reply-To: <20220112131204.800307-1-Jason@zx2c4.com> References: <20220112131204.800307-1-Jason@zx2c4.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org BLAKE2s is faster and more secure. SHA-1 has been broken for a long time now. This also removes quite a bit of code, and lets us potentially remove sha1 from lib, which would further reduce vmlinux size. Cc: Geert Uytterhoeven Cc: Herbert Xu Cc: Ard Biesheuvel Cc: Jean-Philippe Aumasson Cc: linux-crypto@vger.kernel.org Signed-off-by: Jason A. Donenfeld --- kernel/bpf/core.c | 39 ++++----------------------------------- 1 file changed, 4 insertions(+), 35 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 2405e39d800f..d01976749467 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -265,24 +266,16 @@ void __bpf_prog_free(struct bpf_prog *fp) int bpf_prog_calc_tag(struct bpf_prog *fp) { - const u32 bits_offset = SHA1_BLOCK_SIZE - sizeof(__be64); u32 raw_size = bpf_prog_tag_scratch_size(fp); - u32 digest[SHA1_DIGEST_WORDS]; - u32 ws[SHA1_WORKSPACE_WORDS]; - u32 i, bsize, psize, blocks; struct bpf_insn *dst; bool was_ld_map; - u8 *raw, *todo; - __be32 *result; - __be64 *bits; + u8 *raw; + int i; raw = vmalloc(raw_size); if (!raw) return -ENOMEM; - sha1_init(digest); - memset(ws, 0, sizeof(ws)); - /* We need to take out the map fd for the digest calculation * since they are unstable from user space side. */ @@ -307,31 +300,7 @@ int bpf_prog_calc_tag(struct bpf_prog *fp) } } - psize = bpf_prog_insn_size(fp); - memset(&raw[psize], 0, raw_size - psize); - raw[psize++] = 0x80; - - bsize = round_up(psize, SHA1_BLOCK_SIZE); - blocks = bsize / SHA1_BLOCK_SIZE; - todo = raw; - if (bsize - psize >= sizeof(__be64)) { - bits = (__be64 *)(todo + bsize - sizeof(__be64)); - } else { - bits = (__be64 *)(todo + bsize + bits_offset); - blocks++; - } - *bits = cpu_to_be64((psize - 1) << 3); - - while (blocks--) { - sha1_transform(digest, todo, ws); - todo += SHA1_BLOCK_SIZE; - } - - result = (__force __be32 *)digest; - for (i = 0; i < SHA1_DIGEST_WORDS; i++) - result[i] = cpu_to_be32(digest[i]); - memcpy(fp->tag, result, sizeof(fp->tag)); - + blake2s(fp->tag, raw, NULL, sizeof(fp->tag), bpf_prog_insn_size(fp), 0); vfree(raw); return 0; } From patchwork Wed Jan 12 13:12:03 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Jason A. Donenfeld" X-Patchwork-Id: 12711373 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id F1A4DC433EF for ; Wed, 12 Jan 2022 13:12:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1353403AbiALNM1 (ORCPT ); Wed, 12 Jan 2022 08:12:27 -0500 Received: from dfw.source.kernel.org ([139.178.84.217]:44464 "EHLO dfw.source.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1353395AbiALNMY (ORCPT ); Wed, 12 Jan 2022 08:12:24 -0500 Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by dfw.source.kernel.org (Postfix) with ESMTPS id 51B6B61919; Wed, 12 Jan 2022 13:12:24 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 08D7EC36AE5; Wed, 12 Jan 2022 13:12:22 +0000 (UTC) Authentication-Results: smtp.kernel.org; dkim=pass (1024-bit key) header.d=zx2c4.com header.i=@zx2c4.com header.b="bpi5Ljpz" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=zx2c4.com; s=20210105; t=1641993142; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=8ACiCoxGwtTWuBvroNZW3ALXnH1pqDhtmmMKZfIh188=; b=bpi5Ljpz+BM0ViMX5o3eoyyF9lGqSnSvmGgbp/6rjmMaGygRj6lw2vR0bftIOeanmLrnyX vcUX9SQm1/aVPeqHmGA9Tc+m88VRd2wz23N256axUfSfyITmsjPLcIH0b0KVEAqVX/D36h vgUC06TWRTc4DpNCfWxRN3Vn+0TbgRs= Received: by mail.zx2c4.com (ZX2C4 Mail Server) with ESMTPSA id aaccd1a8 (TLSv1.3:AEAD-AES256-GCM-SHA384:256:NO); Wed, 12 Jan 2022 13:12:21 +0000 (UTC) From: "Jason A. Donenfeld" To: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Cc: "Jason A. Donenfeld" , Geert Uytterhoeven , Herbert Xu , Ard Biesheuvel , Jean-Philippe Aumasson , linux-crypto@vger.kernel.org Subject: [PATCH RFC v1 2/3] ipv6: move from sha1 to blake2s in address calculation Date: Wed, 12 Jan 2022 14:12:03 +0100 Message-Id: <20220112131204.800307-3-Jason@zx2c4.com> In-Reply-To: <20220112131204.800307-1-Jason@zx2c4.com> References: <20220112131204.800307-1-Jason@zx2c4.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org BLAKE2s is faster and more secure. SHA-1 has been broken for a long time now. This also removes some code complexity, and lets us potentially remove sha1 from lib, which would further reduce vmlinux size. Cc: Geert Uytterhoeven Cc: Herbert Xu Cc: Ard Biesheuvel Cc: Jean-Philippe Aumasson Cc: linux-crypto@vger.kernel.org Signed-off-by: Jason A. Donenfeld --- net/ipv6/addrconf.c | 31 +++++++++---------------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3445f8017430..f5cb534aa261 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -61,7 +61,7 @@ #include #include #include -#include +#include #include #include @@ -3225,25 +3225,16 @@ static int ipv6_generate_stable_address(struct in6_addr *address, const struct inet6_dev *idev) { static DEFINE_SPINLOCK(lock); - static __u32 digest[SHA1_DIGEST_WORDS]; - static __u32 workspace[SHA1_WORKSPACE_WORDS]; - - static union { - char __data[SHA1_BLOCK_SIZE]; - struct { - struct in6_addr secret; - __be32 prefix[2]; - unsigned char hwaddr[MAX_ADDR_LEN]; - u8 dad_count; - } __packed; - } data; - + struct { + struct in6_addr secret; + __be32 prefix[2]; + unsigned char hwaddr[MAX_ADDR_LEN]; + u8 dad_count; + } __packed data; struct in6_addr secret; struct in6_addr temp; struct net *net = dev_net(idev->dev); - BUILD_BUG_ON(sizeof(data.__data) != sizeof(data)); - if (idev->cnf.stable_secret.initialized) secret = idev->cnf.stable_secret.secret; else if (net->ipv6.devconf_dflt->stable_secret.initialized) @@ -3254,20 +3245,16 @@ static int ipv6_generate_stable_address(struct in6_addr *address, retry: spin_lock_bh(&lock); - sha1_init(digest); memset(&data, 0, sizeof(data)); - memset(workspace, 0, sizeof(workspace)); memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len); data.prefix[0] = address->s6_addr32[0]; data.prefix[1] = address->s6_addr32[1]; data.secret = secret; data.dad_count = dad_count; - sha1_transform(digest, data.__data, workspace); - temp = *address; - temp.s6_addr32[2] = (__force __be32)digest[0]; - temp.s6_addr32[3] = (__force __be32)digest[1]; + blake2s((u8 *)&temp.s6_addr32[2], (u8 *)&data, NULL, + sizeof(temp.s6_addr32[2]) * 2, sizeof(data), 0); spin_unlock_bh(&lock); From patchwork Wed Jan 12 13:12:04 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Jason A. Donenfeld" X-Patchwork-Id: 12711374 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B9B3BC433EF for ; Wed, 12 Jan 2022 13:12:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1353394AbiALNMg (ORCPT ); Wed, 12 Jan 2022 08:12:36 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:57540 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1353411AbiALNM3 (ORCPT ); Wed, 12 Jan 2022 08:12:29 -0500 Received: from ams.source.kernel.org (ams.source.kernel.org [IPv6:2604:1380:4601:e00::1]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 6D0D1C061751; Wed, 12 Jan 2022 05:12:28 -0800 (PST) Received: from smtp.kernel.org (relay.kernel.org [52.25.139.140]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ams.source.kernel.org (Postfix) with ESMTPS id 2E63BB81ECB; Wed, 12 Jan 2022 13:12:27 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 45C06C36AEA; Wed, 12 Jan 2022 13:12:25 +0000 (UTC) Authentication-Results: smtp.kernel.org; dkim=pass (1024-bit key) header.d=zx2c4.com header.i=@zx2c4.com header.b="CtiJEVe5" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=zx2c4.com; s=20210105; t=1641993144; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=By/tQK/8VZLYbqA4VX4X7pTOoqXILmSD8IlvO0zFDDg=; b=CtiJEVe58E6TStIi0KWjif9TlOYjgW1NCCnlDbrfF3Yo2XQDR47bwXTfVM5zcVEQKsjY+9 D9qsmYizK37P70/1gNT/9rlZDGClAQ3qJVvotOksBdQcGxUBGdHc7SJrWl1aNbo/JcD/Wm mKNoNgEPYEv2R6poOGDc0luN8HyO7Ko= Received: by mail.zx2c4.com (ZX2C4 Mail Server) with ESMTPSA id bfb5e05b (TLSv1.3:AEAD-AES256-GCM-SHA384:256:NO); Wed, 12 Jan 2022 13:12:24 +0000 (UTC) From: "Jason A. Donenfeld" To: netdev@vger.kernel.org, linux-kernel@vger.kernel.org Cc: "Jason A. Donenfeld" , Geert Uytterhoeven , Herbert Xu , Ard Biesheuvel , linux-crypto@vger.kernel.org Subject: [PATCH RFC v1 3/3] crypto: sha1_generic - import lib/sha1.c locally Date: Wed, 12 Jan 2022 14:12:04 +0100 Message-Id: <20220112131204.800307-4-Jason@zx2c4.com> In-Reply-To: <20220112131204.800307-1-Jason@zx2c4.com> References: <20220112131204.800307-1-Jason@zx2c4.com> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org With no non-crypto API users of this function, we can move it into the generic crypto/ code where it belongs. Cc: Geert Uytterhoeven Cc: Herbert Xu Cc: Ard Biesheuvel Cc: linux-crypto@vger.kernel.org Signed-off-by: Jason A. Donenfeld --- crypto/sha1_generic.c | 114 +++++++++++++++++++++++++++++++++++ include/crypto/sha1.h | 10 --- lib/Makefile | 2 +- lib/sha1.c | 137 ------------------------------------------ 4 files changed, 115 insertions(+), 148 deletions(-) delete mode 100644 lib/sha1.c diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 325b57fe28dc..a2b019803561 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -16,9 +16,123 @@ #include #include #include +#include +#include #include #include #include +#include + +#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4) +#define SHA1_WORKSPACE_WORDS 16 + +/* + * If you have 32 registers or more, the compiler can (and should) + * try to change the array[] accesses into registers. However, on + * machines with less than ~25 registers, that won't really work, + * and at least gcc will make an unholy mess of it. + * + * So to avoid that mess which just slows things down, we force + * the stores to memory to actually happen (we might be better off + * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as + * suggested by Artur Skawina - that will also make gcc unable to + * try to do the silly "optimize away loads" part because it won't + * see what the value will be). + * + * Ben Herrenschmidt reports that on PPC, the C version comes close + * to the optimized asm with this (ie on PPC you don't want that + * 'volatile', since there are lots of registers). + * + * On ARM we get the best code generation by forcing a full memory barrier + * between each SHA_ROUND, otherwise gcc happily get wild with spilling and + * the stack frame size simply explode and performance goes down the drain. + */ + +#ifdef CONFIG_X86 + #define setW(x, val) (*(volatile __u32 *)&W(x) = (val)) +#elif defined(CONFIG_ARM) + #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) +#else + #define setW(x, val) (W(x) = (val)) +#endif + +/* This "rolls" over the 512-bit array */ +#define W(x) (array[(x)&15]) + +/* + * Where do we get the source from? The first 16 iterations get it from + * the input data, the next mix it from the 512-bit array. + */ +#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t) +#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1) + +#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ + __u32 TEMP = input(t); setW(t, TEMP); \ + E += TEMP + rol32(A,5) + (fn) + (constant); \ + B = ror32(B, 2); \ + TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0) + +#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) +#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) +#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) +#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) +#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) + +/** + * sha1_transform - single block SHA1 transform (deprecated) + * + * @digest: 160 bit digest to update + * @data: 512 bits of data to hash + * @array: 16 words of workspace (see note) + * + * This function executes SHA-1's internal compression function. It updates the + * 160-bit internal state (@digest) with a single 512-bit data block (@data). + * + * Don't use this function. SHA-1 is no longer considered secure. And even if + * you do have to use SHA-1, this isn't the correct way to hash something with + * SHA-1 as this doesn't handle padding and finalization. + * + * Note: If the hash is security sensitive, the caller should be sure + * to clear the workspace. This is left to the caller to avoid + * unnecessary clears between chained hashing operations. + */ +static void sha1_transform(__u32 *digest, const char *data, __u32 *array) +{ + __u32 A, B, C, D, E; + unsigned int i = 0; + + A = digest[0]; + B = digest[1]; + C = digest[2]; + D = digest[3]; + E = digest[4]; + + /* Round 1 - iterations 0-16 take their input from 'data' */ + for (; i < 16; ++i) + T_0_15(i, A, B, C, D, E); + + /* Round 1 - tail. Input from 512-bit mixing array */ + for (; i < 20; ++i) + T_16_19(i, A, B, C, D, E); + + /* Round 2 */ + for (; i < 40; ++i) + T_20_39(i, A, B, C, D, E); + + /* Round 3 */ + for (; i < 60; ++i) + T_40_59(i, A, B, C, D, E); + + /* Round 4 */ + for (; i < 80; ++i) + T_60_79(i, A, B, C, D, E); + + digest[0] += A; + digest[1] += B; + digest[2] += C; + digest[3] += D; + digest[4] += E; +} const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = { 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, diff --git a/include/crypto/sha1.h b/include/crypto/sha1.h index 044ecea60ac8..118a3cad5eb3 100644 --- a/include/crypto/sha1.h +++ b/include/crypto/sha1.h @@ -33,14 +33,4 @@ extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data, extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash); -/* - * An implementation of SHA-1's compression function. Don't use in new code! - * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't - * the correct way to hash something with SHA-1 (use crypto_shash instead). - */ -#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4) -#define SHA1_WORKSPACE_WORDS 16 -void sha1_init(__u32 *buf); -void sha1_transform(__u32 *digest, const char *data, __u32 *W); - #endif /* _CRYPTO_SHA1_H */ diff --git a/lib/Makefile b/lib/Makefile index 364c23f15578..83ac3f0c1fbe 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -29,7 +29,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ - idr.o extable.o sha1.o irq_regs.o argv_split.o \ + idr.o extable.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ diff --git a/lib/sha1.c b/lib/sha1.c deleted file mode 100644 index 0494766fc574..000000000000 --- a/lib/sha1.c +++ /dev/null @@ -1,137 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * SHA1 routine optimized to do word accesses rather than byte accesses, - * and to avoid unnecessary copies into the context array. - * - * This was based on the git SHA1 implementation. - */ - -#include -#include -#include -#include -#include -#include - -/* - * If you have 32 registers or more, the compiler can (and should) - * try to change the array[] accesses into registers. However, on - * machines with less than ~25 registers, that won't really work, - * and at least gcc will make an unholy mess of it. - * - * So to avoid that mess which just slows things down, we force - * the stores to memory to actually happen (we might be better off - * with a 'W(t)=(val);asm("":"+m" (W(t))' there instead, as - * suggested by Artur Skawina - that will also make gcc unable to - * try to do the silly "optimize away loads" part because it won't - * see what the value will be). - * - * Ben Herrenschmidt reports that on PPC, the C version comes close - * to the optimized asm with this (ie on PPC you don't want that - * 'volatile', since there are lots of registers). - * - * On ARM we get the best code generation by forcing a full memory barrier - * between each SHA_ROUND, otherwise gcc happily get wild with spilling and - * the stack frame size simply explode and performance goes down the drain. - */ - -#ifdef CONFIG_X86 - #define setW(x, val) (*(volatile __u32 *)&W(x) = (val)) -#elif defined(CONFIG_ARM) - #define setW(x, val) do { W(x) = (val); __asm__("":::"memory"); } while (0) -#else - #define setW(x, val) (W(x) = (val)) -#endif - -/* This "rolls" over the 512-bit array */ -#define W(x) (array[(x)&15]) - -/* - * Where do we get the source from? The first 16 iterations get it from - * the input data, the next mix it from the 512-bit array. - */ -#define SHA_SRC(t) get_unaligned_be32((__u32 *)data + t) -#define SHA_MIX(t) rol32(W(t+13) ^ W(t+8) ^ W(t+2) ^ W(t), 1) - -#define SHA_ROUND(t, input, fn, constant, A, B, C, D, E) do { \ - __u32 TEMP = input(t); setW(t, TEMP); \ - E += TEMP + rol32(A,5) + (fn) + (constant); \ - B = ror32(B, 2); \ - TEMP = E; E = D; D = C; C = B; B = A; A = TEMP; } while (0) - -#define T_0_15(t, A, B, C, D, E) SHA_ROUND(t, SHA_SRC, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) -#define T_16_19(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (((C^D)&B)^D) , 0x5a827999, A, B, C, D, E ) -#define T_20_39(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0x6ed9eba1, A, B, C, D, E ) -#define T_40_59(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, ((B&C)+(D&(B^C))) , 0x8f1bbcdc, A, B, C, D, E ) -#define T_60_79(t, A, B, C, D, E) SHA_ROUND(t, SHA_MIX, (B^C^D) , 0xca62c1d6, A, B, C, D, E ) - -/** - * sha1_transform - single block SHA1 transform (deprecated) - * - * @digest: 160 bit digest to update - * @data: 512 bits of data to hash - * @array: 16 words of workspace (see note) - * - * This function executes SHA-1's internal compression function. It updates the - * 160-bit internal state (@digest) with a single 512-bit data block (@data). - * - * Don't use this function. SHA-1 is no longer considered secure. And even if - * you do have to use SHA-1, this isn't the correct way to hash something with - * SHA-1 as this doesn't handle padding and finalization. - * - * Note: If the hash is security sensitive, the caller should be sure - * to clear the workspace. This is left to the caller to avoid - * unnecessary clears between chained hashing operations. - */ -void sha1_transform(__u32 *digest, const char *data, __u32 *array) -{ - __u32 A, B, C, D, E; - unsigned int i = 0; - - A = digest[0]; - B = digest[1]; - C = digest[2]; - D = digest[3]; - E = digest[4]; - - /* Round 1 - iterations 0-16 take their input from 'data' */ - for (; i < 16; ++i) - T_0_15(i, A, B, C, D, E); - - /* Round 1 - tail. Input from 512-bit mixing array */ - for (; i < 20; ++i) - T_16_19(i, A, B, C, D, E); - - /* Round 2 */ - for (; i < 40; ++i) - T_20_39(i, A, B, C, D, E); - - /* Round 3 */ - for (; i < 60; ++i) - T_40_59(i, A, B, C, D, E); - - /* Round 4 */ - for (; i < 80; ++i) - T_60_79(i, A, B, C, D, E); - - digest[0] += A; - digest[1] += B; - digest[2] += C; - digest[3] += D; - digest[4] += E; -} -EXPORT_SYMBOL(sha1_transform); - -/** - * sha1_init - initialize the vectors for a SHA1 digest - * @buf: vector to initialize - */ -void sha1_init(__u32 *buf) -{ - buf[0] = 0x67452301; - buf[1] = 0xefcdab89; - buf[2] = 0x98badcfe; - buf[3] = 0x10325476; - buf[4] = 0xc3d2e1f0; -} -EXPORT_SYMBOL(sha1_init);