From patchwork Thu Jul 2 10:19:41 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11638463 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id BC0DF618 for ; Thu, 2 Jul 2020 10:23:07 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 99AAB20747 for ; Thu, 2 Jul 2020 10:23:07 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685387; bh=6O3v3G6KDM/OEBLVqrtSC+uYYKO9EsBk28ODePzAW5E=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=o5Vr1CL2Ly5BVGe/VkUucrHadArU7gW2uuKMvR32a/1GiIAW5bazFFcHNDmTuJXld y9XCfOgLti5Z8NESvsIiblLpGS8n9eOvV2ayCYGKcGQvwuz2g4t0/R9pWM5wKEv6Ui WMm8i3SWtMfCypc4pqQtPS84lrOjoh/S8nKtZfi8= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728584AbgGBKWt (ORCPT ); Thu, 2 Jul 2020 06:22:49 -0400 Received: from mail.kernel.org ([198.145.29.99]:59776 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728377AbgGBKUZ (ORCPT ); Thu, 2 Jul 2020 06:20:25 -0400 Received: from e123331-lin.nice.arm.com (lfbn-nic-1-188-42.w2-15.abo.wanadoo.fr [2.15.37.42]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 593B420772; Thu, 2 Jul 2020 10:20:21 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685224; bh=6O3v3G6KDM/OEBLVqrtSC+uYYKO9EsBk28ODePzAW5E=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=qVtTjk4QmvkwOXdf0PQzN9qpPR9SsWHroHEo10mckL5FWoMBO0cWipsIBxajE1SU/ /dePiGSDAhkMPUOJLAXmKgpP0ao00gbIjcmb4bIbQy5XN7LfkWVtOGnyKe7Nw7FjOC ANISLtRqOBiFZvQTyGLTUAp7wxd6TjGIWOSnx2xI= From: Ard Biesheuvel To: linux-kernel@vger.kernel.org Cc: Ard Biesheuvel , Herbert Xu , "David S. Miller" , Greg Kroah-Hartman , Trond Myklebust , Anna Schumaker , "J. Bruce Fields" , Chuck Lever , Eric Biggers , linux-crypto@vger.kernel.org, netdev@vger.kernel.org, devel@driverdev.osuosl.org, linux-nfs@vger.kernel.org Subject: [RFC PATCH 1/7] staging/rtl8192e: switch to RC4 library interface Date: Thu, 2 Jul 2020 12:19:41 +0200 Message-Id: <20200702101947.682-2-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200702101947.682-1-ardb@kernel.org> References: <20200702101947.682-1-ardb@kernel.org> Sender: linux-nfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org Switch to the ARC4 library interface, to remove the pointless dependency on the skcipher API, from which we will hopefully be able to drop ecb(arc4) skcipher support. Signed-off-by: Ard Biesheuvel Acked-by: Greg Kroah-Hartman --- drivers/staging/rtl8192e/Kconfig | 4 +- drivers/staging/rtl8192e/rtllib_crypt_tkip.c | 70 ++++--------------- drivers/staging/rtl8192e/rtllib_crypt_wep.c | 72 ++++---------------- 3 files changed, 28 insertions(+), 118 deletions(-) diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig index 1007eea6c8fc..4c440bdaaf6e 100644 --- a/drivers/staging/rtl8192e/Kconfig +++ b/drivers/staging/rtl8192e/Kconfig @@ -25,7 +25,7 @@ config RTLLIB_CRYPTO_CCMP config RTLLIB_CRYPTO_TKIP tristate "Support for rtllib TKIP crypto" depends on RTLLIB - select CRYPTO_ARC4 + select CRYPTO_LIB_ARC4 select CRYPTO_MICHAEL_MIC default y help @@ -35,7 +35,7 @@ config RTLLIB_CRYPTO_TKIP config RTLLIB_CRYPTO_WEP tristate "Support for rtllib WEP crypto" - select CRYPTO_ARC4 + select CRYPTO_LIB_ARC4 depends on RTLLIB default y help diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c index 8d2a58e706d5..8c2ff37b2d3a 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c @@ -5,8 +5,9 @@ * Copyright (c) 2003-2004, Jouni Malinen */ +#include #include -#include +#include #include #include #include @@ -16,7 +17,6 @@ #include #include #include -#include #include #include @@ -45,9 +45,9 @@ struct rtllib_tkip_data { u32 dot11RSNAStatsTKIPLocalMICFailures; int key_idx; - struct crypto_sync_skcipher *rx_tfm_arc4; + struct arc4_ctx rx_ctx_arc4; + struct arc4_ctx tx_ctx_arc4; struct crypto_shash *rx_tfm_michael; - struct crypto_sync_skcipher *tx_tfm_arc4; struct crypto_shash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16]; @@ -58,16 +58,13 @@ static void *rtllib_tkip_init(int key_idx) { struct rtllib_tkip_data *priv; + if (fips_enabled) + return NULL; + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; - priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); - if (IS_ERR(priv->tx_tfm_arc4)) { - pr_debug("Could not allocate crypto API arc4\n"); - priv->tx_tfm_arc4 = NULL; - goto fail; - } priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->tx_tfm_michael)) { @@ -76,13 +73,6 @@ static void *rtllib_tkip_init(int key_idx) goto fail; } - priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); - if (IS_ERR(priv->rx_tfm_arc4)) { - pr_debug("Could not allocate crypto API arc4\n"); - priv->rx_tfm_arc4 = NULL; - goto fail; - } - priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->rx_tfm_michael)) { pr_debug("Could not allocate crypto API michael_mic\n"); @@ -94,9 +84,7 @@ static void *rtllib_tkip_init(int key_idx) fail: if (priv) { crypto_free_shash(priv->tx_tfm_michael); - crypto_free_sync_skcipher(priv->tx_tfm_arc4); crypto_free_shash(priv->rx_tfm_michael); - crypto_free_sync_skcipher(priv->rx_tfm_arc4); kfree(priv); } @@ -110,11 +98,9 @@ static void rtllib_tkip_deinit(void *priv) if (_priv) { crypto_free_shash(_priv->tx_tfm_michael); - crypto_free_sync_skcipher(_priv->tx_tfm_arc4); crypto_free_shash(_priv->rx_tfm_michael); - crypto_free_sync_skcipher(_priv->rx_tfm_arc4); } - kfree(priv); + kzfree(priv); } @@ -289,7 +275,6 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) int ret = 0; u8 rc4key[16], *icv; u32 crc; - struct scatterlist sg; if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 || skb->len < hdr_len) @@ -331,8 +316,6 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) *pos++ = (tkey->tx_iv32 >> 24) & 0xff; if (!tcb_desc->bHwSec) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4); - icv = skb_put(skb, 4); crc = ~crc32_le(~0, pos, len); icv[0] = crc; @@ -340,15 +323,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; - sg_init_one(&sg, pos, len+4); - - - crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); - skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); - ret = crypto_skcipher_encrypt(req); - skcipher_request_zero(req); + arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16); + arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4); } tkey->tx_iv16++; @@ -376,9 +352,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 rc4key[16]; u8 icv[4]; u32 crc; - struct scatterlist sg; int plen; - int err; if (skb->len < hdr_len + 8 + 4) return -1; @@ -414,8 +388,6 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += 8; if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4); - if ((iv32 < tkey->rx_iv32 || (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) && tkey->initialized) { @@ -439,22 +411,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 12; - sg_init_one(&sg, pos, plen+4); - - crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); - skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); - err = crypto_skcipher_decrypt(req); - skcipher_request_zero(req); - if (err) { - if (net_ratelimit()) { - netdev_dbg(skb->dev, - "Failed to decrypt received packet from %pM\n", - hdr->addr2); - } - return -7; - } + arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16); + arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4); crc = ~crc32_le(~0, pos, plen); icv[0] = crc; @@ -657,17 +615,13 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv) struct rtllib_tkip_data *tkey = priv; int keyidx; struct crypto_shash *tfm = tkey->tx_tfm_michael; - struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4; struct crypto_shash *tfm3 = tkey->rx_tfm_michael; - struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); tkey->key_idx = keyidx; tkey->tx_tfm_michael = tfm; - tkey->tx_tfm_arc4 = tfm2; tkey->rx_tfm_michael = tfm3; - tkey->rx_tfm_arc4 = tfm4; if (len == TKIP_KEY_LEN) { memcpy(tkey->key, key, TKIP_KEY_LEN); diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c index b1ea650036d2..7cdd17f907fa 100644 --- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c +++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c @@ -5,7 +5,8 @@ * Copyright (c) 2002-2004, Jouni Malinen */ -#include +#include +#include #include #include #include @@ -14,7 +15,6 @@ #include #include "rtllib.h" -#include #include struct prism2_wep_data { @@ -23,8 +23,8 @@ struct prism2_wep_data { u8 key[WEP_KEY_LEN + 1]; u8 key_len; u8 key_idx; - struct crypto_sync_skcipher *tx_tfm; - struct crypto_sync_skcipher *rx_tfm; + struct arc4_ctx rx_ctx_arc4; + struct arc4_ctx tx_ctx_arc4; }; @@ -32,48 +32,24 @@ static void *prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; + if (fips_enabled) + return NULL; + priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) - goto fail; + return NULL; priv->key_idx = keyidx; - priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); - if (IS_ERR(priv->tx_tfm)) { - pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n"); - priv->tx_tfm = NULL; - goto fail; - } - priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); - if (IS_ERR(priv->rx_tfm)) { - pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n"); - priv->rx_tfm = NULL; - goto fail; - } - /* start WEP IV from a random value */ get_random_bytes(&priv->iv, 4); return priv; - -fail: - if (priv) { - crypto_free_sync_skcipher(priv->tx_tfm); - crypto_free_sync_skcipher(priv->rx_tfm); - kfree(priv); - } - return NULL; } static void prism2_wep_deinit(void *priv) { - struct prism2_wep_data *_priv = priv; - - if (_priv) { - crypto_free_sync_skcipher(_priv->tx_tfm); - crypto_free_sync_skcipher(_priv->rx_tfm); - } - kfree(priv); + kzfree(priv); } /* Perform WEP encryption on given skb that has at least 4 bytes of headroom @@ -92,8 +68,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) MAX_DEV_ADDR_SIZE); u32 crc; u8 *icv; - struct scatterlist sg; - int err; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len){ @@ -131,8 +105,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm); - /* Append little-endian CRC32 and encrypt it to produce ICV */ crc = ~crc32_le(~0, pos, len); icv = skb_put(skb, 4); @@ -141,14 +113,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; - sg_init_one(&sg, pos, len+4); - crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen); - skcipher_request_set_sync_tfm(req, wep->tx_tfm); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); - err = crypto_skcipher_encrypt(req); - skcipher_request_zero(req); - return err; + arc4_setkey(&wep->tx_ctx_arc4, key, klen); + arc4_crypt(&wep->tx_ctx_arc4, pos, pos, len + 4); } return 0; @@ -172,8 +138,6 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) MAX_DEV_ADDR_SIZE); u32 crc; u8 icv[4]; - struct scatterlist sg; - int err; if (skb->len < hdr_len + 8) return -1; @@ -195,17 +159,9 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 8; if (!tcb_desc->bHwSec) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm); - - sg_init_one(&sg, pos, plen+4); - crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen); - skcipher_request_set_sync_tfm(req, wep->rx_tfm); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); - err = crypto_skcipher_decrypt(req); - skcipher_request_zero(req); - if (err) - return -7; + arc4_setkey(&wep->rx_ctx_arc4, key, klen); + arc4_crypt(&wep->rx_ctx_arc4, pos, pos, plen + 4); + crc = ~crc32_le(~0, pos, plen); icv[0] = crc; icv[1] = crc >> 8; From patchwork Thu Jul 2 10:19:42 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11638471 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id CC205618 for ; Thu, 2 Jul 2020 10:23:10 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id ADE0D206DD for ; Thu, 2 Jul 2020 10:23:10 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685390; bh=K/YKWmFmMxrknHU9GpBAMuzcSIeTYnk+XWbWEpLs/+M=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=2qw4mX9FXK8jusbQ5lbootKqkNupiHuBguXZT1RL5q2yZgJWEMeKsT13951wlGfku yBkx0nNSYWA99EiSUGnl7Ir2Qh1JhCDkLq0O5E4dCZLE/Crer2fjEhiPSxFOYjuI/t vgUgt+4TKC95LFC6oyYdWfStnmhC0N0OH14h7/yw= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728101AbgGBKWr (ORCPT ); Thu, 2 Jul 2020 06:22:47 -0400 Received: from mail.kernel.org ([198.145.29.99]:59820 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728379AbgGBKU2 (ORCPT ); Thu, 2 Jul 2020 06:20:28 -0400 Received: from e123331-lin.nice.arm.com (lfbn-nic-1-188-42.w2-15.abo.wanadoo.fr [2.15.37.42]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 9C61120747; Thu, 2 Jul 2020 10:20:24 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685227; bh=K/YKWmFmMxrknHU9GpBAMuzcSIeTYnk+XWbWEpLs/+M=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ffi+t5z0X97+zWS7W130a3OgzDDu34UJR1ix9sN4yi86JLWskkFzoFRKgfcZx9tSZ ZGWNlIpieN0GLFF3Vfj5KKTRIq2nlEx/KjGrALWlKziECLTMtlxLDar+FqBwL92MzG w/+6JsJFxna0mSJHhaZZbc4lTXj/rf6oftUGt7Io= From: Ard Biesheuvel To: linux-kernel@vger.kernel.org Cc: Ard Biesheuvel , Herbert Xu , "David S. Miller" , Greg Kroah-Hartman , Trond Myklebust , Anna Schumaker , "J. Bruce Fields" , Chuck Lever , Eric Biggers , linux-crypto@vger.kernel.org, netdev@vger.kernel.org, devel@driverdev.osuosl.org, linux-nfs@vger.kernel.org Subject: [RFC PATCH 2/7] staging/rtl8192u: switch to RC4 library interface Date: Thu, 2 Jul 2020 12:19:42 +0200 Message-Id: <20200702101947.682-3-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200702101947.682-1-ardb@kernel.org> References: <20200702101947.682-1-ardb@kernel.org> Sender: linux-nfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org Switch to the ARC4 library interface, to remove the pointless dependency on the skcipher API, from which we will hopefully be able to drop ecb(arc4) skcipher support. Signed-off-by: Ard Biesheuvel Acked-by: Greg Kroah-Hartman --- drivers/staging/rtl8192u/Kconfig | 1 + drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c | 82 ++++---------------- drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c | 64 +++------------ 3 files changed, 27 insertions(+), 120 deletions(-) diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig index 1edca5c304fb..ef883d462d3d 100644 --- a/drivers/staging/rtl8192u/Kconfig +++ b/drivers/staging/rtl8192u/Kconfig @@ -8,3 +8,4 @@ config RTL8192U select CRYPTO select CRYPTO_AES select CRYPTO_CCM + select CRYPTO_LIB_ARC4 diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c index ffe624ed0c0c..a315133c20db 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c @@ -5,6 +5,7 @@ * Copyright (c) 2003-2004, Jouni Malinen */ +#include #include #include #include @@ -17,9 +18,7 @@ #include "ieee80211.h" -#include -#include - #include +#include #include MODULE_AUTHOR("Jouni Malinen"); @@ -49,9 +48,9 @@ struct ieee80211_tkip_data { int key_idx; - struct crypto_sync_skcipher *rx_tfm_arc4; + struct arc4_ctx rx_ctx_arc4; + struct arc4_ctx tx_ctx_arc4; struct crypto_shash *rx_tfm_michael; - struct crypto_sync_skcipher *tx_tfm_arc4; struct crypto_shash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ @@ -62,19 +61,14 @@ static void *ieee80211_tkip_init(int key_idx) { struct ieee80211_tkip_data *priv; + if (fips_enabled) + return NULL; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) goto fail; priv->key_idx = key_idx; - priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); - if (IS_ERR(priv->tx_tfm_arc4)) { - printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " - "crypto API arc4\n"); - priv->tx_tfm_arc4 = NULL; - goto fail; - } - priv->tx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->tx_tfm_michael)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " @@ -83,14 +77,6 @@ static void *ieee80211_tkip_init(int key_idx) goto fail; } - priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); - if (IS_ERR(priv->rx_tfm_arc4)) { - printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " - "crypto API arc4\n"); - priv->rx_tfm_arc4 = NULL; - goto fail; - } - priv->rx_tfm_michael = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->rx_tfm_michael)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " @@ -104,9 +90,7 @@ static void *ieee80211_tkip_init(int key_idx) fail: if (priv) { crypto_free_shash(priv->tx_tfm_michael); - crypto_free_sync_skcipher(priv->tx_tfm_arc4); crypto_free_shash(priv->rx_tfm_michael); - crypto_free_sync_skcipher(priv->rx_tfm_arc4); kfree(priv); } @@ -120,11 +104,9 @@ static void ieee80211_tkip_deinit(void *priv) if (_priv) { crypto_free_shash(_priv->tx_tfm_michael); - crypto_free_sync_skcipher(_priv->tx_tfm_arc4); crypto_free_shash(_priv->rx_tfm_michael); - crypto_free_sync_skcipher(_priv->rx_tfm_arc4); } - kfree(priv); + kzfree(priv); } @@ -290,10 +272,8 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 *pos; struct rtl_80211_hdr_4addr *hdr; struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); - int ret = 0; u8 rc4key[16], *icv; u32 crc; - struct scatterlist sg; if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 || skb->len < hdr_len) @@ -334,21 +314,15 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) *pos++ = (tkey->tx_iv32 >> 24) & 0xff; if (!tcb_desc->bHwSec) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4); - icv = skb_put(skb, 4); crc = ~crc32_le(~0, pos, len); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; - crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); - sg_init_one(&sg, pos, len + 4); - skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); - ret = crypto_skcipher_encrypt(req); - skcipher_request_zero(req); + + arc4_setkey(&tkey->tx_ctx_arc4, rc4key, 16); + arc4_crypt(&tkey->tx_ctx_arc4, pos, pos, len + 4); } tkey->tx_iv16++; @@ -357,12 +331,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) tkey->tx_iv32++; } - if (!tcb_desc->bHwSec) - return ret; - else - return 0; - - + return 0; } static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) @@ -376,9 +345,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) u8 rc4key[16]; u8 icv[4]; u32 crc; - struct scatterlist sg; int plen; - int err; if (skb->len < hdr_len + 8 + 4) return -1; @@ -412,8 +379,6 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += 8; if (!tcb_desc->bHwSec) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4); - if (iv32 < tkey->rx_iv32 || (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) { if (net_ratelimit()) { @@ -434,23 +399,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 12; - crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); - sg_init_one(&sg, pos, plen + 4); - - skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); - - err = crypto_skcipher_decrypt(req); - skcipher_request_zero(req); - if (err) { - if (net_ratelimit()) { - netdev_dbg(skb->dev, "TKIP: failed to decrypt " - "received packet from %pM\n", - hdr->addr2); - } - return -7; - } + arc4_setkey(&tkey->rx_ctx_arc4, rc4key, 16); + arc4_crypt(&tkey->rx_ctx_arc4, pos, pos, plen + 4); crc = ~crc32_le(~0, pos, plen); icv[0] = crc; @@ -655,17 +605,13 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv) struct ieee80211_tkip_data *tkey = priv; int keyidx; struct crypto_shash *tfm = tkey->tx_tfm_michael; - struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4; struct crypto_shash *tfm3 = tkey->rx_tfm_michael; - struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); tkey->key_idx = keyidx; tkey->tx_tfm_michael = tfm; - tkey->tx_tfm_arc4 = tfm2; tkey->rx_tfm_michael = tfm3; - tkey->rx_tfm_arc4 = tfm4; if (len == TKIP_KEY_LEN) { memcpy(tkey->key, key, TKIP_KEY_LEN); diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c index 26482c3dcd1c..1c56e2d03aae 100644 --- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c +++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c @@ -5,6 +5,7 @@ * Copyright (c) 2002-2004, Jouni Malinen */ +#include #include #include #include @@ -14,8 +15,7 @@ #include "ieee80211.h" -#include -#include +#include #include MODULE_AUTHOR("Jouni Malinen"); @@ -28,8 +28,8 @@ struct prism2_wep_data { u8 key[WEP_KEY_LEN + 1]; u8 key_len; u8 key_idx; - struct crypto_sync_skcipher *tx_tfm; - struct crypto_sync_skcipher *rx_tfm; + struct arc4_ctx rx_ctx_arc4; + struct arc4_ctx tx_ctx_arc4; }; @@ -37,39 +37,24 @@ static void *prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; + if (fips_enabled) + return NULL; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return NULL; priv->key_idx = keyidx; - priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); - if (IS_ERR(priv->tx_tfm)) - goto free_priv; - priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0); - if (IS_ERR(priv->rx_tfm)) - goto free_tx; - /* start WEP IV from a random value */ get_random_bytes(&priv->iv, 4); return priv; -free_tx: - crypto_free_sync_skcipher(priv->tx_tfm); -free_priv: - kfree(priv); - return NULL; } static void prism2_wep_deinit(void *priv) { - struct prism2_wep_data *_priv = priv; - - if (_priv) { - crypto_free_sync_skcipher(_priv->tx_tfm); - crypto_free_sync_skcipher(_priv->rx_tfm); - } - kfree(priv); + kzfree(priv); } /* Perform WEP encryption on given skb that has at least 4 bytes of headroom @@ -87,8 +72,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); u32 crc; u8 *icv; - struct scatterlist sg; - int err; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len) @@ -124,8 +107,6 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm); - /* Append little-endian CRC32 and encrypt it to produce ICV */ crc = ~crc32_le(~0, pos, len); icv = skb_put(skb, 4); @@ -134,16 +115,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) icv[2] = crc >> 16; icv[3] = crc >> 24; - crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen); - sg_init_one(&sg, pos, len + 4); - - skcipher_request_set_sync_tfm(req, wep->tx_tfm); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL); - - err = crypto_skcipher_encrypt(req); - skcipher_request_zero(req); - return err; + arc4_setkey(&wep->tx_ctx_arc4, key, klen); + arc4_crypt(&wep->tx_ctx_arc4, pos, pos, len + 4); } return 0; @@ -166,8 +139,6 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); u32 crc; u8 icv[4]; - struct scatterlist sg; - int err; if (skb->len < hdr_len + 8) return -1; @@ -189,19 +160,8 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) plen = skb->len - hdr_len - 8; if (!tcb_desc->bHwSec) { - SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm); - - crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen); - sg_init_one(&sg, pos, plen + 4); - - skcipher_request_set_sync_tfm(req, wep->rx_tfm); - skcipher_request_set_callback(req, 0, NULL, NULL); - skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL); - - err = crypto_skcipher_decrypt(req); - skcipher_request_zero(req); - if (err) - return -7; + arc4_setkey(&wep->rx_ctx_arc4, key, klen); + arc4_crypt(&wep->rx_ctx_arc4, pos, pos, plen + 4); crc = ~crc32_le(~0, pos, plen); icv[0] = crc; From patchwork Thu Jul 2 10:19:43 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11638441 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id D04CF13B6 for ; Thu, 2 Jul 2020 10:20:55 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 99F0F20772 for ; Thu, 2 Jul 2020 10:20:55 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685255; bh=0VWlPF65B36etpYEliENYzyvd3lQfUhUI+LiSrttQ9w=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=O7aqJltHd2jKZ6awHryVRXQqR61Bnab8McLXFgyGxatjQcNJeBTEcwnfbO+Ct/j9X 868fsJPfM473MMFjG8ovQ3fLkxfD48dtpRICOz/9Bkj6kFhrAepwixIgG9YTfrBcVz ILtVVFvpcajPvfnAEJ89QUVvIiOg5XVMd3L5xJHU= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728388AbgGBKUy (ORCPT ); Thu, 2 Jul 2020 06:20:54 -0400 Received: from mail.kernel.org ([198.145.29.99]:59844 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728380AbgGBKUc (ORCPT ); Thu, 2 Jul 2020 06:20:32 -0400 Received: from e123331-lin.nice.arm.com (lfbn-nic-1-188-42.w2-15.abo.wanadoo.fr [2.15.37.42]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id B4726207CD; Thu, 2 Jul 2020 10:20:27 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685230; bh=0VWlPF65B36etpYEliENYzyvd3lQfUhUI+LiSrttQ9w=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=caibLeQSqBHxbdFxxeF/yq+R6w4/VsjInsPKWlgOUFlLU4k97Qxf5zJhWU7wjuzOq scHU2n5xSR2bY3ysWtmW+zsen/2rqlAbDeU7nOO8pCEpVYQ6i53f5TeJrVskBXPsEq 64zuGOeUgz64IvVpweVtEdLzixR1pB2dJ3W94pWs= From: Ard Biesheuvel To: linux-kernel@vger.kernel.org Cc: Ard Biesheuvel , Herbert Xu , "David S. Miller" , Greg Kroah-Hartman , Trond Myklebust , Anna Schumaker , "J. Bruce Fields" , Chuck Lever , Eric Biggers , linux-crypto@vger.kernel.org, netdev@vger.kernel.org, devel@driverdev.osuosl.org, linux-nfs@vger.kernel.org Subject: [RFC PATCH 3/7] SUNRPC: remove RC4-HMAC-MD5 support from KerberosV Date: Thu, 2 Jul 2020 12:19:43 +0200 Message-Id: <20200702101947.682-4-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200702101947.682-1-ardb@kernel.org> References: <20200702101947.682-1-ardb@kernel.org> Sender: linux-nfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org The RC4-HMAC-MD5 KerberosV algorithm is based on RFC 4757 [0], which was specifically issued for interoperability with Windows 2000, but was never intended to receive the same level of support. The RFC says The IETF Kerberos community supports publishing this specification as an informational document in order to describe this widely implemented technology. However, while these encryption types provide the operations necessary to implement the base Kerberos specification [RFC4120], they do not provide all the required operations in the Kerberos cryptography framework [RFC3961]. As a result, it is not generally possible to implement potential extensions to Kerberos using these encryption types. The Kerberos encryption type negotiation mechanism [RFC4537] provides one approach for using such extensions even when a Kerberos infrastructure uses long-term RC4 keys. Because this specification does not implement operations required by RFC 3961 and because of security concerns with the use of RC4 and MD4 discussed in Section 8, this specification is not appropriate for publication on the standards track. The RC4-HMAC encryption types are used to ease upgrade of existing Windows NT environments, provide strong cryptography (128-bit key lengths), and provide exportable (meet United States government export restriction requirements) encryption. This document describes the implementation of those encryption types. Furthermore, this RFC was re-classified as 'historic' by RFC 8429 [1] in 2018, stating that 'none of the encryption types it specifies should be used' Note that other outdated algorithms are left in place (some of which are guarded by CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES), so this should only adversely affect interoperability with Windows NT/2000 systems that have not received any updates since 2008 (but are connected to a network nonetheless) [0] https://tools.ietf.org/html/rfc4757 [1] https://tools.ietf.org/html/rfc8429 Signed-off-by: Ard Biesheuvel Acked-by: J. Bruce Fields --- include/linux/sunrpc/gss_krb5.h | 11 - include/linux/sunrpc/gss_krb5_enctypes.h | 9 +- net/sunrpc/Kconfig | 1 - net/sunrpc/auth_gss/gss_krb5_crypto.c | 276 -------------------- net/sunrpc/auth_gss/gss_krb5_mech.c | 95 ------- net/sunrpc/auth_gss/gss_krb5_seal.c | 1 - net/sunrpc/auth_gss/gss_krb5_seqnum.c | 87 ------ net/sunrpc/auth_gss/gss_krb5_unseal.c | 1 - net/sunrpc/auth_gss/gss_krb5_wrap.c | 65 +---- 9 files changed, 16 insertions(+), 530 deletions(-) diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index e8f8ffe7448b..91f43d86879d 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -141,14 +141,12 @@ enum sgn_alg { SGN_ALG_MD2_5 = 0x0001, SGN_ALG_DES_MAC = 0x0002, SGN_ALG_3 = 0x0003, /* not published */ - SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; no support */ SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004 }; enum seal_alg { SEAL_ALG_NONE = 0xffff, SEAL_ALG_DES = 0x0000, SEAL_ALG_1 = 0x0001, /* not published */ - SEAL_ALG_MICROSOFT_RC4 = 0x0010,/* microsoft w2k; no support */ SEAL_ALG_DES3KD = 0x0002 }; @@ -316,14 +314,5 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, struct xdr_buf *buf, u32 *plainoffset, u32 *plainlen); -int -krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, - struct crypto_sync_skcipher *cipher, - unsigned char *cksum); - -int -krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, - struct crypto_sync_skcipher *cipher, - s32 seqnum); void gss_krb5_make_confounder(char *p, u32 conflen); diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h index 981c89cef19d..87eea679d750 100644 --- a/include/linux/sunrpc/gss_krb5_enctypes.h +++ b/include/linux/sunrpc/gss_krb5_enctypes.h @@ -13,15 +13,13 @@ #ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES /* - * NB: This list includes encryption types that were deprecated - * by RFC 8429 (DES3_CBC_SHA1 and ARCFOUR_HMAC). + * NB: This list includes DES3_CBC_SHA1, which was deprecated by RFC 8429. * * ENCTYPE_AES256_CTS_HMAC_SHA1_96 * ENCTYPE_AES128_CTS_HMAC_SHA1_96 * ENCTYPE_DES3_CBC_SHA1 - * ENCTYPE_ARCFOUR_HMAC */ -#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23" +#define KRB5_SUPPORTED_ENCTYPES "18,17,16" #else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ @@ -32,12 +30,11 @@ * ENCTYPE_AES256_CTS_HMAC_SHA1_96 * ENCTYPE_AES128_CTS_HMAC_SHA1_96 * ENCTYPE_DES3_CBC_SHA1 - * ENCTYPE_ARCFOUR_HMAC * ENCTYPE_DES_CBC_MD5 * ENCTYPE_DES_CBC_CRC * ENCTYPE_DES_CBC_MD4 */ -#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" +#define KRB5_SUPPORTED_ENCTYPES "18,17,16,3,1,2" #endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 3bcf985507be..bbbb5af0af13 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig @@ -21,7 +21,6 @@ config RPCSEC_GSS_KRB5 depends on SUNRPC && CRYPTO depends on CRYPTO_MD5 && CRYPTO_DES && CRYPTO_CBC && CRYPTO_CTS depends on CRYPTO_ECB && CRYPTO_HMAC && CRYPTO_SHA1 && CRYPTO_AES - depends on CRYPTO_ARC4 default y select SUNRPC_GSS help diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index e7180da1fc6a..634b6c6e0dcb 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -138,135 +138,6 @@ checksummer(struct scatterlist *sg, void *data) return crypto_ahash_update(req); } -static int -arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4]) -{ - unsigned int ms_usage; - - switch (usage) { - case KG_USAGE_SIGN: - ms_usage = 15; - break; - case KG_USAGE_SEAL: - ms_usage = 13; - break; - default: - return -EINVAL; - } - salt[0] = (ms_usage >> 0) & 0xff; - salt[1] = (ms_usage >> 8) & 0xff; - salt[2] = (ms_usage >> 16) & 0xff; - salt[3] = (ms_usage >> 24) & 0xff; - - return 0; -} - -static u32 -make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen, - struct xdr_buf *body, int body_offset, u8 *cksumkey, - unsigned int usage, struct xdr_netobj *cksumout) -{ - struct scatterlist sg[1]; - int err = -1; - u8 *checksumdata; - u8 *rc4salt; - struct crypto_ahash *md5; - struct crypto_ahash *hmac_md5; - struct ahash_request *req; - - if (cksumkey == NULL) - return GSS_S_FAILURE; - - if (cksumout->len < kctx->gk5e->cksumlength) { - dprintk("%s: checksum buffer length, %u, too small for %s\n", - __func__, cksumout->len, kctx->gk5e->name); - return GSS_S_FAILURE; - } - - rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS); - if (!rc4salt) - return GSS_S_FAILURE; - - if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) { - dprintk("%s: invalid usage value %u\n", __func__, usage); - goto out_free_rc4salt; - } - - checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS); - if (!checksumdata) - goto out_free_rc4salt; - - md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(md5)) - goto out_free_cksum; - - hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, - CRYPTO_ALG_ASYNC); - if (IS_ERR(hmac_md5)) - goto out_free_md5; - - req = ahash_request_alloc(md5, GFP_NOFS); - if (!req) - goto out_free_hmac_md5; - - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); - - err = crypto_ahash_init(req); - if (err) - goto out; - sg_init_one(sg, rc4salt, 4); - ahash_request_set_crypt(req, sg, NULL, 4); - err = crypto_ahash_update(req); - if (err) - goto out; - - sg_init_one(sg, header, hdrlen); - ahash_request_set_crypt(req, sg, NULL, hdrlen); - err = crypto_ahash_update(req); - if (err) - goto out; - err = xdr_process_buf(body, body_offset, body->len - body_offset, - checksummer, req); - if (err) - goto out; - ahash_request_set_crypt(req, NULL, checksumdata, 0); - err = crypto_ahash_final(req); - if (err) - goto out; - - ahash_request_free(req); - req = ahash_request_alloc(hmac_md5, GFP_NOFS); - if (!req) - goto out_free_hmac_md5; - - ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL); - - err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength); - if (err) - goto out; - - sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5)); - ahash_request_set_crypt(req, sg, checksumdata, - crypto_ahash_digestsize(md5)); - err = crypto_ahash_digest(req); - if (err) - goto out; - - memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength); - cksumout->len = kctx->gk5e->cksumlength; -out: - ahash_request_free(req); -out_free_hmac_md5: - crypto_free_ahash(hmac_md5); -out_free_md5: - crypto_free_ahash(md5); -out_free_cksum: - kfree(checksumdata); -out_free_rc4salt: - kfree(rc4salt); - return err ? GSS_S_FAILURE : 0; -} - /* * checksum the plaintext data and hdrlen bytes of the token header * The checksum is performed over the first 8 bytes of the @@ -284,11 +155,6 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, u8 *checksumdata; unsigned int checksumlen; - if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR) - return make_checksum_hmac_md5(kctx, header, hdrlen, - body, body_offset, - cksumkey, usage, cksumout); - if (cksumout->len < kctx->gk5e->cksumlength) { dprintk("%s: checksum buffer length, %u, too small for %s\n", __func__, cksumout->len, kctx->gk5e->name); @@ -942,145 +808,3 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, ret = GSS_S_FAILURE; return ret; } - -/* - * Compute Kseq given the initial session key and the checksum. - * Set the key of the given cipher. - */ -int -krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, - struct crypto_sync_skcipher *cipher, - unsigned char *cksum) -{ - struct crypto_shash *hmac; - struct shash_desc *desc; - u8 Kseq[GSS_KRB5_MAX_KEYLEN]; - u32 zeroconstant = 0; - int err; - - dprintk("%s: entered\n", __func__); - - hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); - if (IS_ERR(hmac)) { - dprintk("%s: error %ld, allocating hash '%s'\n", - __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); - return PTR_ERR(hmac); - } - - desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), - GFP_NOFS); - if (!desc) { - dprintk("%s: failed to allocate shash descriptor for '%s'\n", - __func__, kctx->gk5e->cksum_name); - crypto_free_shash(hmac); - return -ENOMEM; - } - - desc->tfm = hmac; - - /* Compute intermediate Kseq from session key */ - err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength); - if (err) - goto out_err; - - err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq); - if (err) - goto out_err; - - /* Compute final Kseq from the checksum and intermediate Kseq */ - err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength); - if (err) - goto out_err; - - err = crypto_shash_digest(desc, cksum, 8, Kseq); - if (err) - goto out_err; - - err = crypto_sync_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength); - if (err) - goto out_err; - - err = 0; - -out_err: - kzfree(desc); - crypto_free_shash(hmac); - dprintk("%s: returning %d\n", __func__, err); - return err; -} - -/* - * Compute Kcrypt given the initial session key and the plaintext seqnum. - * Set the key of cipher kctx->enc. - */ -int -krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, - struct crypto_sync_skcipher *cipher, - s32 seqnum) -{ - struct crypto_shash *hmac; - struct shash_desc *desc; - u8 Kcrypt[GSS_KRB5_MAX_KEYLEN]; - u8 zeroconstant[4] = {0}; - u8 seqnumarray[4]; - int err, i; - - dprintk("%s: entered, seqnum %u\n", __func__, seqnum); - - hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0); - if (IS_ERR(hmac)) { - dprintk("%s: error %ld, allocating hash '%s'\n", - __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name); - return PTR_ERR(hmac); - } - - desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), - GFP_NOFS); - if (!desc) { - dprintk("%s: failed to allocate shash descriptor for '%s'\n", - __func__, kctx->gk5e->cksum_name); - crypto_free_shash(hmac); - return -ENOMEM; - } - - desc->tfm = hmac; - - /* Compute intermediate Kcrypt from session key */ - for (i = 0; i < kctx->gk5e->keylength; i++) - Kcrypt[i] = kctx->Ksess[i] ^ 0xf0; - - err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); - if (err) - goto out_err; - - err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt); - if (err) - goto out_err; - - /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */ - err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength); - if (err) - goto out_err; - - seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff); - seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff); - seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff); - seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff); - - err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt); - if (err) - goto out_err; - - err = crypto_sync_skcipher_setkey(cipher, Kcrypt, - kctx->gk5e->keylength); - if (err) - goto out_err; - - err = 0; - -out_err: - kzfree(desc); - crypto_free_shash(hmac); - dprintk("%s: returning %d\n", __func__, err); - return err; -} diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 75b3c2e9e8f8..ae9acf3a7389 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -51,27 +51,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .keyed_cksum = 0, }, #endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ - /* - * RC4-HMAC - */ - { - .etype = ENCTYPE_ARCFOUR_HMAC, - .ctype = CKSUMTYPE_HMAC_MD5_ARCFOUR, - .name = "rc4-hmac", - .encrypt_name = "ecb(arc4)", - .cksum_name = "hmac(md5)", - .encrypt = krb5_encrypt, - .decrypt = krb5_decrypt, - .mk_key = NULL, - .signalg = SGN_ALG_HMAC_MD5, - .sealalg = SEAL_ALG_MICROSOFT_RC4, - .keybytes = 16, - .keylength = 16, - .blocksize = 1, - .conflen = 8, - .cksumlength = 8, - .keyed_cksum = 1, - }, /* * 3DES */ @@ -401,78 +380,6 @@ context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) return -EINVAL; } -/* - * Note that RC4 depends on deriving keys using the sequence - * number or the checksum of a token. Therefore, the final keys - * cannot be calculated until the token is being constructed! - */ -static int -context_derive_keys_rc4(struct krb5_ctx *ctx) -{ - struct crypto_shash *hmac; - char sigkeyconstant[] = "signaturekey"; - int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ - struct shash_desc *desc; - int err; - - dprintk("RPC: %s: entered\n", __func__); - /* - * derive cksum (aka Ksign) key - */ - hmac = crypto_alloc_shash(ctx->gk5e->cksum_name, 0, 0); - if (IS_ERR(hmac)) { - dprintk("%s: error %ld allocating hash '%s'\n", - __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name); - err = PTR_ERR(hmac); - goto out_err; - } - - err = crypto_shash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength); - if (err) - goto out_err_free_hmac; - - - desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac), GFP_NOFS); - if (!desc) { - dprintk("%s: failed to allocate hash descriptor for '%s'\n", - __func__, ctx->gk5e->cksum_name); - err = -ENOMEM; - goto out_err_free_hmac; - } - - desc->tfm = hmac; - - err = crypto_shash_digest(desc, sigkeyconstant, slen, ctx->cksum); - kzfree(desc); - if (err) - goto out_err_free_hmac; - /* - * allocate hash, and skciphers for data and seqnum encryption - */ - ctx->enc = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0); - if (IS_ERR(ctx->enc)) { - err = PTR_ERR(ctx->enc); - goto out_err_free_hmac; - } - - ctx->seq = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0); - if (IS_ERR(ctx->seq)) { - crypto_free_sync_skcipher(ctx->enc); - err = PTR_ERR(ctx->seq); - goto out_err_free_hmac; - } - - dprintk("RPC: %s: returning success\n", __func__); - - err = 0; - -out_err_free_hmac: - crypto_free_shash(hmac); -out_err: - dprintk("RPC: %s: returning %d\n", __func__, err); - return err; -} - static int context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) { @@ -649,8 +556,6 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, switch (ctx->enctype) { case ENCTYPE_DES3_CBC_RAW: return context_derive_keys_des3(ctx, gfp_mask); - case ENCTYPE_ARCFOUR_HMAC: - return context_derive_keys_rc4(ctx); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: return context_derive_keys_new(ctx, gfp_mask); diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index f1d280accf43..33061417ec97 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -214,7 +214,6 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, BUG(); case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: - case ENCTYPE_ARCFOUR_HMAC: return gss_get_mic_v1(ctx, text, token); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index 507105127095..fb117817ff5d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c @@ -39,42 +39,6 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif -static s32 -krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, - unsigned char *cksum, unsigned char *buf) -{ - struct crypto_sync_skcipher *cipher; - unsigned char *plain; - s32 code; - - dprintk("RPC: %s:\n", __func__); - cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - plain = kmalloc(8, GFP_NOFS); - if (!plain) - return -ENOMEM; - - plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); - plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); - plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); - plain[3] = (unsigned char) ((seqnum >> 0) & 0xff); - plain[4] = direction; - plain[5] = direction; - plain[6] = direction; - plain[7] = direction; - - code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); - if (code) - goto out; - - code = krb5_encrypt(cipher, cksum, plain, buf, 8); -out: - kfree(plain); - crypto_free_sync_skcipher(cipher); - return code; -} s32 krb5_make_seq_num(struct krb5_ctx *kctx, struct crypto_sync_skcipher *key, @@ -85,10 +49,6 @@ krb5_make_seq_num(struct krb5_ctx *kctx, unsigned char *plain; s32 code; - if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) - return krb5_make_rc4_seq_num(kctx, direction, seqnum, - cksum, buf); - plain = kmalloc(8, GFP_NOFS); if (!plain) return -ENOMEM; @@ -108,50 +68,6 @@ krb5_make_seq_num(struct krb5_ctx *kctx, return code; } -static s32 -krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, - unsigned char *buf, int *direction, s32 *seqnum) -{ - struct crypto_sync_skcipher *cipher; - unsigned char *plain; - s32 code; - - dprintk("RPC: %s:\n", __func__); - cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0); - if (IS_ERR(cipher)) - return PTR_ERR(cipher); - - code = krb5_rc4_setup_seq_key(kctx, cipher, cksum); - if (code) - goto out; - - plain = kmalloc(8, GFP_NOFS); - if (!plain) { - code = -ENOMEM; - goto out; - } - - code = krb5_decrypt(cipher, cksum, buf, plain, 8); - if (code) - goto out_plain; - - if ((plain[4] != plain[5]) || (plain[4] != plain[6]) - || (plain[4] != plain[7])) { - code = (s32)KG_BAD_SEQ; - goto out_plain; - } - - *direction = plain[4]; - - *seqnum = ((plain[0] << 24) | (plain[1] << 16) | - (plain[2] << 8) | (plain[3])); -out_plain: - kfree(plain); -out: - crypto_free_sync_skcipher(cipher); - return code; -} - s32 krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, @@ -164,9 +80,6 @@ krb5_get_seq_num(struct krb5_ctx *kctx, dprintk("RPC: krb5_get_seq_num:\n"); - if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) - return krb5_get_rc4_seq_num(kctx, cksum, buf, - direction, seqnum); plain = kmalloc(8, GFP_NOFS); if (!plain) return -ENOMEM; diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index aaab91cf24c8..ba04e3ec970a 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -218,7 +218,6 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, BUG(); case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: - case ENCTYPE_ARCFOUR_HMAC: return gss_verify_mic_v1(ctx, message_buffer, read_token); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index cf0fd170ac18..a412a734ee17 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -236,26 +236,9 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) return GSS_S_FAILURE; - if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { - struct crypto_sync_skcipher *cipher; - int err; - cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, - 0, 0); - if (IS_ERR(cipher)) - return GSS_S_FAILURE; - - krb5_rc4_setup_enc_key(kctx, cipher, seq_send); - - err = gss_encrypt_xdr_buf(cipher, buf, - offset + headlen - conflen, pages); - crypto_free_sync_skcipher(cipher); - if (err) - return GSS_S_FAILURE; - } else { - if (gss_encrypt_xdr_buf(kctx->enc, buf, - offset + headlen - conflen, pages)) - return GSS_S_FAILURE; - } + if (gss_encrypt_xdr_buf(kctx->enc, buf, + offset + headlen - conflen, pages)) + return GSS_S_FAILURE; return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; } @@ -316,37 +299,9 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len, crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - (unsigned char *)buf->head[0].iov_base; - /* - * Need plaintext seqnum to derive encryption key for arcfour-hmac - */ - if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, - ptr + 8, &direction, &seqnum)) - return GSS_S_BAD_SIG; - - if ((kctx->initiate && direction != 0xff) || - (!kctx->initiate && direction != 0)) - return GSS_S_BAD_SIG; - buf->len = len; - if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { - struct crypto_sync_skcipher *cipher; - int err; - - cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, - 0, 0); - if (IS_ERR(cipher)) - return GSS_S_FAILURE; - - krb5_rc4_setup_enc_key(kctx, cipher, seqnum); - - err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset); - crypto_free_sync_skcipher(cipher); - if (err) - return GSS_S_DEFECTIVE_TOKEN; - } else { - if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) - return GSS_S_DEFECTIVE_TOKEN; - } + if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) + return GSS_S_DEFECTIVE_TOKEN; if (kctx->gk5e->keyed_cksum) cksumkey = kctx->cksum; @@ -370,6 +325,14 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len, /* do sequencing checks */ + if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, + ptr + 8, &direction, &seqnum)) + return GSS_S_BAD_SIG; + + if ((kctx->initiate && direction != 0xff) || + (!kctx->initiate && direction != 0)) + return GSS_S_BAD_SIG; + /* Copy the data back to the right position. XXX: Would probably be * better to copy and encrypt at the same time. */ @@ -605,7 +568,6 @@ gss_wrap_kerberos(struct gss_ctx *gctx, int offset, BUG(); case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: - case ENCTYPE_ARCFOUR_HMAC: return gss_wrap_kerberos_v1(kctx, offset, buf, pages); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: case ENCTYPE_AES256_CTS_HMAC_SHA1_96: @@ -624,7 +586,6 @@ gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, BUG(); case ENCTYPE_DES_CBC_RAW: case ENCTYPE_DES3_CBC_RAW: - case ENCTYPE_ARCFOUR_HMAC: return gss_unwrap_kerberos_v1(kctx, offset, len, buf, &gctx->slack, &gctx->align); case ENCTYPE_AES128_CTS_HMAC_SHA1_96: From patchwork Thu Jul 2 10:19:44 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11638453 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 24BBA13B6 for ; Thu, 2 Jul 2020 10:22:38 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 0B23720780 for ; Thu, 2 Jul 2020 10:22:38 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685358; bh=kaueKEmWjbH4Y/IsmnyUJSukPLRE5+XfMcf0ophHf3Y=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=C/AiUT7naqcH5zr7KEjYe5pUT+YKPnWrwGqYBdtczoMuN8gCPxnCiMYnYunL26ZjY kNtxJqEff2zJ+c0bf3EHtfD3Tz0ahESblEE62aA9KpCVU3KlD/bJ+QchQC5eQ5i1/Y TLQrbpEi+MmAAabIdLxs4LHZ9QO+4+8BWvAmmXFs= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728472AbgGBKWL (ORCPT ); Thu, 2 Jul 2020 06:22:11 -0400 Received: from mail.kernel.org ([198.145.29.99]:59864 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728367AbgGBKUf (ORCPT ); Thu, 2 Jul 2020 06:20:35 -0400 Received: from e123331-lin.nice.arm.com (lfbn-nic-1-188-42.w2-15.abo.wanadoo.fr [2.15.37.42]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 621BD20884; Thu, 2 Jul 2020 10:20:31 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685234; bh=kaueKEmWjbH4Y/IsmnyUJSukPLRE5+XfMcf0ophHf3Y=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=rR5cI1xEq2UaIXRaKecu6AAFsz50MEvbYusJVXJWxesdyTbKDZFlfBve8q5pJZ/WW 57JV7VDH8b2vk15TQIb7Pz6PYBW2RB4UWb+EiI0glP4IU25Q2/iuGvWCRC9aqE1sAB Va8Yj8C6MSaQP5jQi63frRgN0IyLgdlPODrzvMrs= From: Ard Biesheuvel To: linux-kernel@vger.kernel.org Cc: Ard Biesheuvel , Herbert Xu , "David S. Miller" , Greg Kroah-Hartman , Trond Myklebust , Anna Schumaker , "J. Bruce Fields" , Chuck Lever , Eric Biggers , linux-crypto@vger.kernel.org, netdev@vger.kernel.org, devel@driverdev.osuosl.org, linux-nfs@vger.kernel.org Subject: [RFC PATCH 4/7] crypto: remove ARC4 support from the skcipher API Date: Thu, 2 Jul 2020 12:19:44 +0200 Message-Id: <20200702101947.682-5-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200702101947.682-1-ardb@kernel.org> References: <20200702101947.682-1-ardb@kernel.org> Sender: linux-nfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org Remove the generic ecb(arc4) skcipher, which is slightly cumbersome from a maintenance perspective, since it does not quite behave like other skciphers do in terms of key vs IV lifetime. Since we are leaving the library interface in place, which is used by the various WEP and TKIP implementations we have in the tree, we can safely drop this code now it no longer has any users. Signed-off-by: Ard Biesheuvel --- crypto/Kconfig | 12 ---- crypto/Makefile | 1 - crypto/arc4.c | 76 -------------------- drivers/net/wireless/intel/ipw2x00/Kconfig | 1 - drivers/net/wireless/intersil/hostap/Kconfig | 1 - 5 files changed, 91 deletions(-) diff --git a/crypto/Kconfig b/crypto/Kconfig index 091c0a0bbf26..fd0d1f78ac47 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1197,18 +1197,6 @@ config CRYPTO_ANUBIS -config CRYPTO_ARC4 - tristate "ARC4 cipher algorithm" - select CRYPTO_SKCIPHER - select CRYPTO_LIB_ARC4 - help - ARC4 cipher algorithm. - - ARC4 is a stream cipher using keys ranging from 8 bits to 2048 - bits in length. This algorithm is required for driver-based - WEP, but it should not be for other purposes because of the - weakness of the algorithm. - config CRYPTO_BLOWFISH tristate "Blowfish cipher algorithm" select CRYPTO_ALGAPI diff --git a/crypto/Makefile b/crypto/Makefile index 4ca12b6044f7..af88c7e30b3c 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -128,7 +128,6 @@ obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o obj-$(CONFIG_CRYPTO_CAST5) += cast5_generic.o obj-$(CONFIG_CRYPTO_CAST6) += cast6_generic.o -obj-$(CONFIG_CRYPTO_ARC4) += arc4.o obj-$(CONFIG_CRYPTO_TEA) += tea.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o diff --git a/crypto/arc4.c b/crypto/arc4.c deleted file mode 100644 index aa79571dbd49..000000000000 --- a/crypto/arc4.c +++ /dev/null @@ -1,76 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Cryptographic API - * - * ARC4 Cipher Algorithm - * - * Jon Oberheide - */ - -#include -#include -#include -#include -#include - -static int crypto_arc4_setkey(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) -{ - struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); - - return arc4_setkey(ctx, in_key, key_len); -} - -static int crypto_arc4_crypt(struct skcipher_request *req) -{ - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct arc4_ctx *ctx = crypto_skcipher_ctx(tfm); - struct skcipher_walk walk; - int err; - - err = skcipher_walk_virt(&walk, req, false); - - while (walk.nbytes > 0) { - arc4_crypt(ctx, walk.dst.virt.addr, walk.src.virt.addr, - walk.nbytes); - err = skcipher_walk_done(&walk, 0); - } - - return err; -} - -static struct skcipher_alg arc4_alg = { - /* - * For legacy reasons, this is named "ecb(arc4)", not "arc4". - * Nevertheless it's actually a stream cipher, not a block cipher. - */ - .base.cra_name = "ecb(arc4)", - .base.cra_driver_name = "ecb(arc4)-generic", - .base.cra_priority = 100, - .base.cra_blocksize = ARC4_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct arc4_ctx), - .base.cra_module = THIS_MODULE, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - .setkey = crypto_arc4_setkey, - .encrypt = crypto_arc4_crypt, - .decrypt = crypto_arc4_crypt, -}; - -static int __init arc4_init(void) -{ - return crypto_register_skcipher(&arc4_alg); -} - -static void __exit arc4_exit(void) -{ - crypto_unregister_skcipher(&arc4_alg); -} - -subsys_initcall(arc4_init); -module_exit(arc4_exit); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("ARC4 Cipher Algorithm"); -MODULE_AUTHOR("Jon Oberheide "); -MODULE_ALIAS_CRYPTO("ecb(arc4)"); diff --git a/drivers/net/wireless/intel/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig index d00386915a9d..82b7eea3495f 100644 --- a/drivers/net/wireless/intel/ipw2x00/Kconfig +++ b/drivers/net/wireless/intel/ipw2x00/Kconfig @@ -160,7 +160,6 @@ config LIBIPW select WIRELESS_EXT select WEXT_SPY select CRYPTO - select CRYPTO_ARC4 select CRYPTO_ECB select CRYPTO_AES select CRYPTO_MICHAEL_MIC diff --git a/drivers/net/wireless/intersil/hostap/Kconfig b/drivers/net/wireless/intersil/hostap/Kconfig index 6ad88299432f..428fb6f55f51 100644 --- a/drivers/net/wireless/intersil/hostap/Kconfig +++ b/drivers/net/wireless/intersil/hostap/Kconfig @@ -5,7 +5,6 @@ config HOSTAP select WEXT_SPY select WEXT_PRIV select CRYPTO - select CRYPTO_ARC4 select CRYPTO_ECB select CRYPTO_AES select CRYPTO_MICHAEL_MIC From patchwork Thu Jul 2 10:19:45 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11638447 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 4AD77161F for ; Thu, 2 Jul 2020 10:22:12 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 2EC4620780 for ; Thu, 2 Jul 2020 10:22:12 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685332; bh=9PRSQMjLI8/4sHoDfY859AuP95S3O2JmFarTQDw/GUw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=flx1CorhVHsCa6s+Daw1DuqWGCKZ4oZ11j7/zxvzp7LT18yvtlS0zeZrrWjC1GWIe 1T9NcyIqK1VUpkc86nMTzFzagGt8RtbXi2xV0mhLtICjMqbe/8znMnh800ICm3BSPc pMdU+MY+xS3algmf0KQznI/Ga0Ho4lTFiEIXq0WU= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728465AbgGBKWL (ORCPT ); Thu, 2 Jul 2020 06:22:11 -0400 Received: from mail.kernel.org ([198.145.29.99]:59910 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1727991AbgGBKUh (ORCPT ); Thu, 2 Jul 2020 06:20:37 -0400 Received: from e123331-lin.nice.arm.com (lfbn-nic-1-188-42.w2-15.abo.wanadoo.fr [2.15.37.42]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 9447520780; Thu, 2 Jul 2020 10:20:34 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685237; bh=9PRSQMjLI8/4sHoDfY859AuP95S3O2JmFarTQDw/GUw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=zywTpzGD+H8Buhi2wxFCHNucxSUj9+QWHWpuUCXBZk2R+OOa1sXCqaW0YhCNW5ogr wT6crb423AqZvmeqPSAgk1VPFCxNHmgepLW6YUjhiUnyVPyRW4G6zXaM/niaQmhTN7 638SY/2ekUVYsQUVZAWVx/m+xWBBqoPrObtCFX50= From: Ard Biesheuvel To: linux-kernel@vger.kernel.org Cc: Ard Biesheuvel , Herbert Xu , "David S. Miller" , Greg Kroah-Hartman , Trond Myklebust , Anna Schumaker , "J. Bruce Fields" , Chuck Lever , Eric Biggers , linux-crypto@vger.kernel.org, netdev@vger.kernel.org, devel@driverdev.osuosl.org, linux-nfs@vger.kernel.org Subject: [RFC PATCH 5/7] crypto: n2 - remove ecb(arc4) support Date: Thu, 2 Jul 2020 12:19:45 +0200 Message-Id: <20200702101947.682-6-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200702101947.682-1-ardb@kernel.org> References: <20200702101947.682-1-ardb@kernel.org> Sender: linux-nfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org Signed-off-by: Ard Biesheuvel --- drivers/crypto/n2_core.c | 46 -------------------- 1 file changed, 46 deletions(-) diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 6a828bbecea4..c347e58cd9a1 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -662,7 +662,6 @@ struct n2_skcipher_context { u8 aes[AES_MAX_KEY_SIZE]; u8 des[DES_KEY_SIZE]; u8 des3[3 * DES_KEY_SIZE]; - u8 arc4[258]; /* S-box, X, Y */ } key; }; @@ -789,36 +788,6 @@ static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key, return 0; } -static int n2_arc4_setkey(struct crypto_skcipher *skcipher, const u8 *key, - unsigned int keylen) -{ - struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); - struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm); - struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher); - u8 *s = ctx->key.arc4; - u8 *x = s + 256; - u8 *y = x + 1; - int i, j, k; - - ctx->enc_type = n2alg->enc_type; - - j = k = 0; - *x = 0; - *y = 0; - for (i = 0; i < 256; i++) - s[i] = i; - for (i = 0; i < 256; i++) { - u8 a = s[i]; - j = (j + key[k] + a) & 0xff; - s[i] = s[j]; - s[j] = a; - if (++k >= keylen) - k = 0; - } - - return 0; -} - static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size) { int this_len = nbytes; @@ -1122,21 +1091,6 @@ struct n2_skcipher_tmpl { }; static const struct n2_skcipher_tmpl skcipher_tmpls[] = { - /* ARC4: only ECB is supported (chaining bits ignored) */ - { .name = "ecb(arc4)", - .drv_name = "ecb-arc4", - .block_size = 1, - .enc_type = (ENC_TYPE_ALG_RC4_STREAM | - ENC_TYPE_CHAINING_ECB), - .skcipher = { - .min_keysize = 1, - .max_keysize = 256, - .setkey = n2_arc4_setkey, - .encrypt = n2_encrypt_ecb, - .decrypt = n2_decrypt_ecb, - }, - }, - /* DES: ECB CBC and CFB are supported */ { .name = "ecb(des)", .drv_name = "ecb-des", From patchwork Thu Jul 2 10:19:46 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11638445 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 18E16618 for ; Thu, 2 Jul 2020 10:22:12 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id EED9D20772 for ; Thu, 2 Jul 2020 10:22:11 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685332; bh=BEFjNdvfKyJ+hPtNF3BSTZhE6Ir6RlgKVVjkC2LTMeE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=ZdE04mTKkPgB//sGKSdMyirIfwyPj9HGFcV6RE/HA2SSYQVSOocMPjNjrJiot+kZM J8pPsKirrJyG71qng8oE6A+1D0fj9yUAf5OoA9T387iWbOsgnYEWGwtQyUTHUtk/C2 HEMrt6Udu9ce0FVke12FMMnU6xwtp86wjNVi8giQ= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728454AbgGBKWJ (ORCPT ); Thu, 2 Jul 2020 06:22:09 -0400 Received: from mail.kernel.org ([198.145.29.99]:59948 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728280AbgGBKUl (ORCPT ); Thu, 2 Jul 2020 06:20:41 -0400 Received: from e123331-lin.nice.arm.com (lfbn-nic-1-188-42.w2-15.abo.wanadoo.fr [2.15.37.42]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 9DA5F206DD; Thu, 2 Jul 2020 10:20:37 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685240; bh=BEFjNdvfKyJ+hPtNF3BSTZhE6Ir6RlgKVVjkC2LTMeE=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=zJTrFCj+Qh571w+IHsFYe2TV3F68N0RGr7kIRZkuBPliIy4OdnkwbHqa8rgJkrgX8 Lf7Or6HJTANuH8xtK2IJ0yhEwkEE3mjT6NLvycD4u+QN0bLKko96If6+9QuvyajoTz cTflrIwZ808In3u57ckCUp3pQW6Y6xRwWw9FG+5E= From: Ard Biesheuvel To: linux-kernel@vger.kernel.org Cc: Ard Biesheuvel , Herbert Xu , "David S. Miller" , Greg Kroah-Hartman , Trond Myklebust , Anna Schumaker , "J. Bruce Fields" , Chuck Lever , Eric Biggers , linux-crypto@vger.kernel.org, netdev@vger.kernel.org, devel@driverdev.osuosl.org, linux-nfs@vger.kernel.org Subject: [RFC PATCH 6/7] crypto: bcm-iproc - remove ecb(arc4) support Date: Thu, 2 Jul 2020 12:19:46 +0200 Message-Id: <20200702101947.682-7-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200702101947.682-1-ardb@kernel.org> References: <20200702101947.682-1-ardb@kernel.org> Sender: linux-nfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org Signed-off-by: Ard Biesheuvel --- drivers/crypto/bcm/cipher.c | 96 +------------------- drivers/crypto/bcm/cipher.h | 1 - drivers/crypto/bcm/spu.c | 23 +---- drivers/crypto/bcm/spu.h | 1 - drivers/crypto/bcm/spu2.c | 12 +-- drivers/crypto/bcm/spu2.h | 1 - 6 files changed, 6 insertions(+), 128 deletions(-) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index a353217a0d33..f73b4bd86482 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -165,10 +165,6 @@ spu_skcipher_rx_sg_create(struct brcm_message *mssg, return -EFAULT; } - if (ctx->cipher.alg == CIPHER_ALG_RC4) - /* Add buffer to catch 260-byte SUPDT field for RC4 */ - sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN); - if (stat_pad_len) sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); @@ -317,7 +313,6 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) u8 local_iv_ctr[MAX_IV_SIZE]; u32 stat_pad_len; /* num bytes to align status field */ u32 pad_len; /* total length of all padding */ - bool update_key = false; struct brcm_message *mssg; /* mailbox message */ /* number of entries in src and dst sg in mailbox message. */ @@ -391,28 +386,6 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) } } - if (ctx->cipher.alg == CIPHER_ALG_RC4) { - rx_frag_num++; - if (chunk_start) { - /* - * for non-first RC4 chunks, use SUPDT from previous - * response as key for this chunk. - */ - cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak; - update_key = true; - cipher_parms.type = CIPHER_TYPE_UPDT; - } else if (!rctx->is_encrypt) { - /* - * First RC4 chunk. For decrypt, key in pre-built msg - * header may have been changed if encrypt required - * multiple chunks. So revert the key to the - * ctx->enckey value. - */ - update_key = true; - cipher_parms.type = CIPHER_TYPE_INIT; - } - } - if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) flow_log("max_payload infinite\n"); else @@ -425,14 +398,9 @@ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr, sizeof(rctx->msg_buf.bcm_spu_req_hdr)); - /* - * Pass SUPDT field as key. Key field in finish() call is only used - * when update_key has been set above for RC4. Will be ignored in - * all other cases. - */ spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, ctx->spu_req_hdr_len, !(rctx->is_encrypt), - &cipher_parms, update_key, chunksize); + &cipher_parms, chunksize); atomic64_add(chunksize, &iproc_priv.bytes_out); @@ -527,9 +495,6 @@ static void handle_skcipher_resp(struct iproc_reqctx_s *rctx) __func__, rctx->total_received, payload_len); dump_sg(req->dst, rctx->total_received, payload_len); - if (ctx->cipher.alg == CIPHER_ALG_RC4) - packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak, - SPU_SUPDT_LEN); rctx->total_received += payload_len; if (rctx->total_received == rctx->total_todo) { @@ -1853,26 +1818,6 @@ static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key, return 0; } -static int rc4_setkey(struct crypto_skcipher *cipher, const u8 *key, - unsigned int keylen) -{ - struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher); - int i; - - ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE; - - ctx->enckey[0] = 0x00; /* 0x00 */ - ctx->enckey[1] = 0x00; /* i */ - ctx->enckey[2] = 0x00; /* 0x00 */ - ctx->enckey[3] = 0x00; /* j */ - for (i = 0; i < ARC4_MAX_KEY_SIZE; i++) - ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen]; - - ctx->cipher_type = CIPHER_TYPE_INIT; - - return 0; -} - static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { @@ -1895,9 +1840,6 @@ static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, case CIPHER_ALG_AES: err = aes_setkey(cipher, key, keylen); break; - case CIPHER_ALG_RC4: - err = rc4_setkey(cipher, key, keylen); - break; default: pr_err("%s() Error: unknown cipher alg\n", __func__); err = -EINVAL; @@ -1905,11 +1847,9 @@ static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, if (err) return err; - /* RC4 already populated ctx->enkey */ - if (ctx->cipher.alg != CIPHER_ALG_RC4) { - memcpy(ctx->enckey, key, keylen); - ctx->enckeylen = keylen; - } + memcpy(ctx->enckey, key, keylen); + ctx->enckeylen = keylen; + /* SPU needs XTS keys in the reverse order the crypto API presents */ if ((ctx->cipher.alg == CIPHER_ALG_AES) && (ctx->cipher.mode == CIPHER_MODE_XTS)) { @@ -2872,9 +2812,6 @@ static int aead_authenc_setkey(struct crypto_aead *cipher, goto badkey; } break; - case CIPHER_ALG_RC4: - ctx->cipher_type = CIPHER_TYPE_INIT; - break; default: pr_err("%s() Error: Unknown cipher alg\n", __func__); return -EINVAL; @@ -3573,25 +3510,6 @@ static struct iproc_alg_s driver_algs[] = { }, /* SKCIPHER algorithms. */ - { - .type = CRYPTO_ALG_TYPE_SKCIPHER, - .alg.skcipher = { - .base.cra_name = "ecb(arc4)", - .base.cra_driver_name = "ecb-arc4-iproc", - .base.cra_blocksize = ARC4_BLOCK_SIZE, - .min_keysize = ARC4_MIN_KEY_SIZE, - .max_keysize = ARC4_MAX_KEY_SIZE, - .ivsize = 0, - }, - .cipher_info = { - .alg = CIPHER_ALG_RC4, - .mode = CIPHER_MODE_NONE, - }, - .auth_info = { - .alg = HASH_ALG_NONE, - .mode = HASH_MODE_NONE, - }, - }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { @@ -4495,15 +4413,9 @@ static void spu_counters_init(void) static int spu_register_skcipher(struct iproc_alg_s *driver_alg) { - struct spu_hw *spu = &iproc_priv.spu; struct skcipher_alg *crypto = &driver_alg->alg.skcipher; int err; - /* SPU2 does not support RC4 */ - if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) && - (spu->spu_type == SPU_TYPE_SPU2)) - return 0; - crypto->base.cra_module = THIS_MODULE; crypto->base.cra_priority = cipher_pri; crypto->base.cra_alignmask = 0; diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h index b6d83e3aa46c..035c8389cb3d 100644 --- a/drivers/crypto/bcm/cipher.h +++ b/drivers/crypto/bcm/cipher.h @@ -388,7 +388,6 @@ struct spu_hw { u16 spu_req_hdr_len, unsigned int is_inbound, struct spu_cipher_parms *cipher_parms, - bool update_key, unsigned int data_size); void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len, enum hash_alg auth_alg, diff --git a/drivers/crypto/bcm/spu.c b/drivers/crypto/bcm/spu.c index e7562e9bf396..fe126f95c702 100644 --- a/drivers/crypto/bcm/spu.c +++ b/drivers/crypto/bcm/spu.c @@ -222,10 +222,6 @@ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len) cipher_key_len = 24; name = "3DES"; break; - case CIPHER_ALG_RC4: - cipher_key_len = 260; - name = "ARC4"; - break; case CIPHER_ALG_AES: switch (cipher_type) { case CIPHER_TYPE_AES128: @@ -919,21 +915,16 @@ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) * @spu_req_hdr_len: Length in bytes of the SPU request header * @isInbound: 0 encrypt, 1 decrypt * @cipher_parms: Parameters describing cipher operation to be performed - * @update_key: If true, rewrite the cipher key in SCTX * @data_size: Length of the data in the BD field * * Assumes much of the header was already filled in at setkey() time in * spum_cipher_req_init(). - * spum_cipher_req_init() fills in the encryption key. For RC4, when submitting - * a request for a non-first chunk, we use the 260-byte SUPDT field from the - * previous response as the key. update_key is true for this case. Unused in all - * other cases. + * spum_cipher_req_init() fills in the encryption key. */ void spum_cipher_req_finish(u8 *spu_hdr, u16 spu_req_hdr_len, unsigned int is_inbound, struct spu_cipher_parms *cipher_parms, - bool update_key, unsigned int data_size) { struct SPUHEADER *spuh; @@ -948,11 +939,6 @@ void spum_cipher_req_finish(u8 *spu_hdr, flow_log(" in: %u\n", is_inbound); flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg, cipher_parms->type); - if (update_key) { - flow_log(" cipher key len: %u\n", cipher_parms->key_len); - flow_dump(" key: ", cipher_parms->key_buf, - cipher_parms->key_len); - } /* * In XTS mode, API puts "i" parameter (block tweak) in IV. For @@ -981,13 +967,6 @@ void spum_cipher_req_finish(u8 *spu_hdr, else cipher_bits &= ~CIPHER_INBOUND; - /* update encryption key for RC4 on non-first chunk */ - if (update_key) { - spuh->sa.cipher_flags |= - cipher_parms->type << CIPHER_TYPE_SHIFT; - memcpy(spuh + 1, cipher_parms->key_buf, cipher_parms->key_len); - } - if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len) /* cipher iv provided so put it in here */ memcpy(bdesc_ptr - cipher_parms->iv_len, cipher_parms->iv_buf, diff --git a/drivers/crypto/bcm/spu.h b/drivers/crypto/bcm/spu.h index b247bc5b9354..dd132389bcaa 100644 --- a/drivers/crypto/bcm/spu.h +++ b/drivers/crypto/bcm/spu.h @@ -251,7 +251,6 @@ void spum_cipher_req_finish(u8 *spu_hdr, u16 spu_req_hdr_len, unsigned int is_inbound, struct spu_cipher_parms *cipher_parms, - bool update_key, unsigned int data_size); void spum_request_pad(u8 *pad_start, diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c index 59abb5ecefa4..c860ffb0b4c3 100644 --- a/drivers/crypto/bcm/spu2.c +++ b/drivers/crypto/bcm/spu2.c @@ -1170,21 +1170,16 @@ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) * @spu_req_hdr_len: Length in bytes of the SPU request header * @isInbound: 0 encrypt, 1 decrypt * @cipher_parms: Parameters describing cipher operation to be performed - * @update_key: If true, rewrite the cipher key in SCTX * @data_size: Length of the data in the BD field * * Assumes much of the header was already filled in at setkey() time in * spu_cipher_req_init(). - * spu_cipher_req_init() fills in the encryption key. For RC4, when submitting a - * request for a non-first chunk, we use the 260-byte SUPDT field from the - * previous response as the key. update_key is true for this case. Unused in all - * other cases. + * spu_cipher_req_init() fills in the encryption key. */ void spu2_cipher_req_finish(u8 *spu_hdr, u16 spu_req_hdr_len, unsigned int is_inbound, struct spu_cipher_parms *cipher_parms, - bool update_key, unsigned int data_size) { struct SPU2_FMD *fmd; @@ -1196,11 +1191,6 @@ void spu2_cipher_req_finish(u8 *spu_hdr, flow_log(" in: %u\n", is_inbound); flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg, cipher_parms->type); - if (update_key) { - flow_log(" cipher key len: %u\n", cipher_parms->key_len); - flow_dump(" key: ", cipher_parms->key_buf, - cipher_parms->key_len); - } flow_log(" iv len: %d\n", cipher_parms->iv_len); flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len); flow_log(" data_size: %u\n", data_size); diff --git a/drivers/crypto/bcm/spu2.h b/drivers/crypto/bcm/spu2.h index 03af6c38df7f..6e666bfb3cfc 100644 --- a/drivers/crypto/bcm/spu2.h +++ b/drivers/crypto/bcm/spu2.h @@ -200,7 +200,6 @@ void spu2_cipher_req_finish(u8 *spu_hdr, u16 spu_req_hdr_len, unsigned int is_inbound, struct spu_cipher_parms *cipher_parms, - bool update_key, unsigned int data_size); void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len, enum hash_alg auth_alg, enum hash_mode auth_mode, From patchwork Thu Jul 2 10:19:47 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ard Biesheuvel X-Patchwork-Id: 11638459 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id D9A0E161F for ; Thu, 2 Jul 2020 10:22:47 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id BFC48206DD for ; Thu, 2 Jul 2020 10:22:47 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685367; bh=Dgcjvy0hCTfHOwCVOrjRfrBiKApvt8n4dhLRuzaQnTA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:List-ID:From; b=usTHRaHL+Y1PRoHNBViBYuTwL8fJ+3fcrOH6Cqayr5sCpuAF9rjowP6FyjT8LCFXm FHxTT7KTND01ALGDCdGiREjXREgKwDBmECyjxtpc2BojrEWe4YRGjjpxlzlwsWWsRy twhF6E8Whzvm/8b3EtAD4+BxDAYltDm/NVToHow4= Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1728008AbgGBKWJ (ORCPT ); Thu, 2 Jul 2020 06:22:09 -0400 Received: from mail.kernel.org ([198.145.29.99]:59972 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1728389AbgGBKUp (ORCPT ); Thu, 2 Jul 2020 06:20:45 -0400 Received: from e123331-lin.nice.arm.com (lfbn-nic-1-188-42.w2-15.abo.wanadoo.fr [2.15.37.42]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id 3092E208D5; Thu, 2 Jul 2020 10:20:41 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1593685244; bh=Dgcjvy0hCTfHOwCVOrjRfrBiKApvt8n4dhLRuzaQnTA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=eih0jVBvh7Y1bN4cY1nF7fJ9CwT3o2BoGfRUZVQM+4FJmd7mGUPmo4UWNkrVfYdiB QtE+5mjRUT1SuE5e5DJezfzxeQKrJTsTgcuMrq10aCUG0TYfa2oHkEK9CcqHd7Gq5J oTxHKpYq7H7UmQRq7PM5eDzFiReX5/NUi4ukBBWA= From: Ard Biesheuvel To: linux-kernel@vger.kernel.org Cc: Ard Biesheuvel , Herbert Xu , "David S. Miller" , Greg Kroah-Hartman , Trond Myklebust , Anna Schumaker , "J. Bruce Fields" , Chuck Lever , Eric Biggers , linux-crypto@vger.kernel.org, netdev@vger.kernel.org, devel@driverdev.osuosl.org, linux-nfs@vger.kernel.org Subject: [RFC PATCH 7/7] crypto: tcrypt - remove ecb(arc4) testing/benchmarking support Date: Thu, 2 Jul 2020 12:19:47 +0200 Message-Id: <20200702101947.682-8-ardb@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200702101947.682-1-ardb@kernel.org> References: <20200702101947.682-1-ardb@kernel.org> Sender: linux-nfs-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-nfs@vger.kernel.org Signed-off-by: Ard Biesheuvel --- crypto/tcrypt.c | 21 +------ crypto/testmgr.c | 7 --- crypto/testmgr.h | 62 -------------------- 3 files changed, 1 insertion(+), 89 deletions(-) diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c index ba0b7702f2e9..72828c4acd3a 100644 --- a/crypto/tcrypt.c +++ b/crypto/tcrypt.c @@ -68,7 +68,7 @@ static char *tvmem[TVMEMSIZE]; static const char *check[] = { "des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3", "blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes", - "cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea", + "cast6", "michael_mic", "deflate", "crc32c", "tea", "xtea", "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt", "camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320", "lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384", @@ -1762,10 +1762,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) ret += tcrypt_test("xts(cast6)"); break; - case 16: - ret += tcrypt_test("ecb(arc4)"); - break; - case 17: ret += tcrypt_test("michael_mic"); break; @@ -2201,11 +2197,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_32_64); break; - case 208: - test_cipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0, - speed_template_8); - break; - case 209: test_cipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0, speed_template_8_16); @@ -2720,11 +2711,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_32_48_64); break; - case 505: - test_acipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0, - speed_template_8); - break; - case 506: test_acipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0, speed_template_8_16); @@ -2932,11 +2918,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb) speed_template_32_48_64, num_mb); break; - case 605: - test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0, - speed_template_8, num_mb); - break; - case 606: test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0, speed_template_8_16, num_mb); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 6863f911fcee..7c1bdc5690e2 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -4783,13 +4783,6 @@ static const struct alg_test_desc alg_test_descs[] = { .suite = { .cipher = __VECS(anubis_tv_template) } - }, { - .alg = "ecb(arc4)", - .generic_driver = "ecb(arc4)-generic", - .test = alg_test_skcipher, - .suite = { - .cipher = __VECS(arc4_tv_template) - } }, { .alg = "ecb(blowfish)", .test = alg_test_skcipher, diff --git a/crypto/testmgr.h b/crypto/testmgr.h index d29983908c38..48cd6330ec8d 100644 --- a/crypto/testmgr.h +++ b/crypto/testmgr.h @@ -22490,68 +22490,6 @@ static const struct cipher_testvec cast5_ctr_tv_template[] = { }, }; -/* - * ARC4 test vectors from OpenSSL - */ -static const struct cipher_testvec arc4_tv_template[] = { - { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .ctext = "\x75\xb7\x87\x80\x99\xe0\xc5\x96", - .len = 8, - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\x74\x94\xc2\xe7\x10\x4b\x08\x79", - .len = 8, - }, { - .key = "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 8, - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00", - .ctext = "\xde\x18\x89\x41\xa3\x37\x5d\x3a", - .len = 8, - }, { - .key = "\xef\x01\x23\x45", - .klen = 4, - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00\x00\x00", - .ctext = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf" - "\xbd\x61\x5a\x11\x62\xe1\xc7\xba" - "\x36\xb6\x78\x58", - .len = 20, - }, { - .key = "\x01\x23\x45\x67\x89\xab\xcd\xef", - .klen = 8, - .ptext = "\x12\x34\x56\x78\x9A\xBC\xDE\xF0" - "\x12\x34\x56\x78\x9A\xBC\xDE\xF0" - "\x12\x34\x56\x78\x9A\xBC\xDE\xF0" - "\x12\x34\x56\x78", - .ctext = "\x66\xa0\x94\x9f\x8a\xf7\xd6\x89" - "\x1f\x7f\x83\x2b\xa8\x33\xc0\x0c" - "\x89\x2e\xbe\x30\x14\x3c\xe2\x87" - "\x40\x01\x1e\xcf", - .len = 28, - }, { - .key = "\xef\x01\x23\x45", - .klen = 4, - .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00" - "\x00\x00", - .ctext = "\xd6\xa1\x41\xa7\xec\x3c\x38\xdf" - "\xbd\x61", - .len = 10, - }, { - .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" - "\x00\x00\x00\x00\x00\x00\x00\x00", - .klen = 16, - .ptext = "\x01\x23\x45\x67\x89\xAB\xCD\xEF", - .ctext = "\x69\x72\x36\x59\x1B\x52\x42\xB1", - .len = 8, - }, -}; - /* * TEA test vectors */