From patchwork Thu Nov 30 14:46:32 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Jiri Slaby X-Patchwork-Id: 10085077 X-Patchwork-Delegate: herbert@gondor.apana.org.au Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 527BA60311 for ; Thu, 30 Nov 2017 14:53:58 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 581F32A066 for ; Thu, 30 Nov 2017 14:53:58 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 4CF562A09D; Thu, 30 Nov 2017 14:53:58 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=unavailable version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id E05F22A066 for ; Thu, 30 Nov 2017 14:53:56 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752975AbdK3Ox2 (ORCPT ); Thu, 30 Nov 2017 09:53:28 -0500 Received: from mx2.suse.de ([195.135.220.15]:48101 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752861AbdK3Oq6 (ORCPT ); Thu, 30 Nov 2017 09:46:58 -0500 X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (charybdis-ext.suse.de [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id EFDF8AE06; Thu, 30 Nov 2017 14:46:56 +0000 (UTC) From: Jiri Slaby To: mingo@redhat.com Cc: linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org, Jiri Slaby , Herbert Xu , "David S. Miller" , Thomas Gleixner , "H. Peter Anvin" , x86@kernel.org, linux-crypto@vger.kernel.org Subject: [PATCH v5 06/27] x86: crypto, annotate local functions Date: Thu, 30 Nov 2017 15:46:32 +0100 Message-Id: <20171130144653.23688-7-jslaby@suse.cz> X-Mailer: git-send-email 2.15.0 In-Reply-To: <20171130144653.23688-1-jslaby@suse.cz> References: <20171130144653.23688-1-jslaby@suse.cz> Sender: linux-crypto-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Use the newly added SYM_FUNC_START_LOCAL to annotate starts of all functions which do not have ".globl" annotation, but their ends are annotated by ENDPROC. This is needed to balance ENDPROC for tools that are about to generate debuginfo. To be symmetric, we also convert their ENDPROCs to the new SYM_FUNC_END. Signed-off-by: Jiri Slaby Cc: Herbert Xu Cc: "David S. Miller" Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Cc: --- arch/x86/crypto/aesni-intel_asm.S | 49 ++++++++++++---------------- arch/x86/crypto/camellia-aesni-avx-asm_64.S | 20 ++++++------ arch/x86/crypto/camellia-aesni-avx2-asm_64.S | 20 ++++++------ arch/x86/crypto/cast5-avx-x86_64-asm_64.S | 8 ++--- arch/x86/crypto/cast6-avx-x86_64-asm_64.S | 8 ++--- arch/x86/crypto/ghash-clmulni-intel_asm.S | 4 +-- arch/x86/crypto/serpent-avx-x86_64-asm_64.S | 8 ++--- arch/x86/crypto/serpent-avx2-asm_64.S | 8 ++--- arch/x86/crypto/twofish-avx-x86_64-asm_64.S | 8 ++--- 9 files changed, 62 insertions(+), 71 deletions(-) diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 16627fec80b2..1d34d5a14682 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -1875,7 +1875,7 @@ ENDPROC(aesni_gcm_enc) .align 4 _key_expansion_128: -_key_expansion_256a: +SYM_FUNC_START_LOCAL(_key_expansion_256a) pshufd $0b11111111, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 @@ -1886,10 +1886,9 @@ _key_expansion_256a: add $0x10, TKEYP ret ENDPROC(_key_expansion_128) -ENDPROC(_key_expansion_256a) +SYM_FUNC_END(_key_expansion_256a) -.align 4 -_key_expansion_192a: +SYM_FUNC_START_LOCAL(_key_expansion_192a) pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 @@ -1911,10 +1910,9 @@ _key_expansion_192a: movaps %xmm1, 0x10(TKEYP) add $0x20, TKEYP ret -ENDPROC(_key_expansion_192a) +SYM_FUNC_END(_key_expansion_192a) -.align 4 -_key_expansion_192b: +SYM_FUNC_START_LOCAL(_key_expansion_192b) pshufd $0b01010101, %xmm1, %xmm1 shufps $0b00010000, %xmm0, %xmm4 pxor %xmm4, %xmm0 @@ -1931,10 +1929,9 @@ _key_expansion_192b: movaps %xmm0, (TKEYP) add $0x10, TKEYP ret -ENDPROC(_key_expansion_192b) +SYM_FUNC_END(_key_expansion_192b) -.align 4 -_key_expansion_256b: +SYM_FUNC_START_LOCAL(_key_expansion_256b) pshufd $0b10101010, %xmm1, %xmm1 shufps $0b00010000, %xmm2, %xmm4 pxor %xmm4, %xmm2 @@ -1944,7 +1941,7 @@ _key_expansion_256b: movaps %xmm2, (TKEYP) add $0x10, TKEYP ret -ENDPROC(_key_expansion_256b) +SYM_FUNC_END(_key_expansion_256b) /* * int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key, @@ -2097,8 +2094,7 @@ ENDPROC(aesni_enc) * KEY * TKEYP (T1) */ -.align 4 -_aesni_enc1: +SYM_FUNC_START_LOCAL(_aesni_enc1) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE # round 0 @@ -2141,7 +2137,7 @@ _aesni_enc1: movaps 0x70(TKEYP), KEY AESENCLAST KEY STATE ret -ENDPROC(_aesni_enc1) +SYM_FUNC_END(_aesni_enc1) /* * _aesni_enc4: internal ABI @@ -2161,8 +2157,7 @@ ENDPROC(_aesni_enc1) * KEY * TKEYP (T1) */ -.align 4 -_aesni_enc4: +SYM_FUNC_START_LOCAL(_aesni_enc4) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE1 # round 0 @@ -2250,7 +2245,7 @@ _aesni_enc4: AESENCLAST KEY STATE3 AESENCLAST KEY STATE4 ret -ENDPROC(_aesni_enc4) +SYM_FUNC_END(_aesni_enc4) /* * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src) @@ -2289,8 +2284,7 @@ ENDPROC(aesni_dec) * KEY * TKEYP (T1) */ -.align 4 -_aesni_dec1: +SYM_FUNC_START_LOCAL(_aesni_dec1) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE # round 0 @@ -2333,7 +2327,7 @@ _aesni_dec1: movaps 0x70(TKEYP), KEY AESDECLAST KEY STATE ret -ENDPROC(_aesni_dec1) +SYM_FUNC_END(_aesni_dec1) /* * _aesni_dec4: internal ABI @@ -2353,8 +2347,7 @@ ENDPROC(_aesni_dec1) * KEY * TKEYP (T1) */ -.align 4 -_aesni_dec4: +SYM_FUNC_START_LOCAL(_aesni_dec4) movaps (KEYP), KEY # key mov KEYP, TKEYP pxor KEY, STATE1 # round 0 @@ -2442,7 +2435,7 @@ _aesni_dec4: AESDECLAST KEY STATE3 AESDECLAST KEY STATE4 ret -ENDPROC(_aesni_dec4) +SYM_FUNC_END(_aesni_dec4) /* * void aesni_ecb_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, @@ -2720,8 +2713,7 @@ ENDPROC(aesni_cbc_dec) * INC: == 1, in little endian * BSWAP_MASK == endian swapping mask */ -.align 4 -_aesni_inc_init: +SYM_FUNC_START_LOCAL(_aesni_inc_init) movaps .Lbswap_mask, BSWAP_MASK movaps IV, CTR PSHUFB_XMM BSWAP_MASK CTR @@ -2729,7 +2721,7 @@ _aesni_inc_init: MOVQ_R64_XMM TCTR_LOW INC MOVQ_R64_XMM CTR TCTR_LOW ret -ENDPROC(_aesni_inc_init) +SYM_FUNC_END(_aesni_inc_init) /* * _aesni_inc: internal ABI @@ -2746,8 +2738,7 @@ ENDPROC(_aesni_inc_init) * CTR: == output IV, in little endian * TCTR_LOW: == lower qword of CTR */ -.align 4 -_aesni_inc: +SYM_FUNC_START_LOCAL(_aesni_inc) paddq INC, CTR add $1, TCTR_LOW jnc .Linc_low @@ -2758,7 +2749,7 @@ _aesni_inc: movaps CTR, IV PSHUFB_XMM BSWAP_MASK IV ret -ENDPROC(_aesni_inc) +SYM_FUNC_END(_aesni_inc) /* * void aesni_ctr_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src, diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index f7c495e2863c..8b6a65524067 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S @@ -188,20 +188,20 @@ * larger and would only be 0.5% faster (on sandy-bridge). */ .align 8 -roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: +SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %rcx, (%r9)); ret; -ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) +SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 -roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: +SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11, %rax, (%r9)); ret; -ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) +SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* * IN/OUT: @@ -721,7 +721,7 @@ ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .text .align 8 -__camellia_enc_blk16: +SYM_FUNC_START_LOCAL(__camellia_enc_blk16) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes @@ -805,10 +805,10 @@ __camellia_enc_blk16: %xmm15, %rax, %rcx, 24); jmp .Lenc_done; -ENDPROC(__camellia_enc_blk16) +SYM_FUNC_END(__camellia_enc_blk16) .align 8 -__camellia_dec_blk16: +SYM_FUNC_START_LOCAL(__camellia_dec_blk16) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 256 bytes @@ -890,7 +890,7 @@ __camellia_dec_blk16: ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; -ENDPROC(__camellia_dec_blk16) +SYM_FUNC_END(__camellia_dec_blk16) ENTRY(camellia_ecb_enc_16way) /* input: @@ -1119,7 +1119,7 @@ ENDPROC(camellia_ctr_16way) vpxor tmp, iv, iv; .align 8 -camellia_xts_crypt_16way: +SYM_FUNC_START_LOCAL(camellia_xts_crypt_16way) /* input: * %rdi: ctx, CTX * %rsi: dst (16 blocks) @@ -1253,7 +1253,7 @@ camellia_xts_crypt_16way: FRAME_END ret; -ENDPROC(camellia_xts_crypt_16way) +SYM_FUNC_END(camellia_xts_crypt_16way) ENTRY(camellia_xts_enc_16way) /* input: diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index eee5b3982cfd..96b44ad85c59 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S @@ -227,20 +227,20 @@ * larger and would only marginally faster. */ .align 8 -roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd: +SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15, %rcx, (%r9)); ret; -ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) +SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd) .align 8 -roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab: +SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3, %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11, %rax, (%r9)); ret; -ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) +SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) /* * IN/OUT: @@ -764,7 +764,7 @@ ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab) .text .align 8 -__camellia_enc_blk32: +SYM_FUNC_START_LOCAL(__camellia_enc_blk32) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes @@ -848,10 +848,10 @@ __camellia_enc_blk32: %ymm15, %rax, %rcx, 24); jmp .Lenc_done; -ENDPROC(__camellia_enc_blk32) +SYM_FUNC_END(__camellia_enc_blk32) .align 8 -__camellia_dec_blk32: +SYM_FUNC_START_LOCAL(__camellia_dec_blk32) /* input: * %rdi: ctx, CTX * %rax: temporary storage, 512 bytes @@ -933,7 +933,7 @@ __camellia_dec_blk32: ((key_table + (24) * 8) + 4)(CTX)); jmp .Ldec_max24; -ENDPROC(__camellia_dec_blk32) +SYM_FUNC_END(__camellia_dec_blk32) ENTRY(camellia_ecb_enc_32way) /* input: @@ -1226,7 +1226,7 @@ ENDPROC(camellia_ctr_32way) vpxor tmp1, iv, iv; .align 8 -camellia_xts_crypt_32way: +SYM_FUNC_START_LOCAL(camellia_xts_crypt_32way) /* input: * %rdi: ctx, CTX * %rsi: dst (32 blocks) @@ -1371,7 +1371,7 @@ camellia_xts_crypt_32way: FRAME_END ret; -ENDPROC(camellia_xts_crypt_32way) +SYM_FUNC_END(camellia_xts_crypt_32way) ENTRY(camellia_xts_enc_32way) /* input: diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S index 86107c961bb4..b26df120413c 100644 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S @@ -224,7 +224,7 @@ .text .align 16 -__cast5_enc_blk16: +SYM_FUNC_START_LOCAL(__cast5_enc_blk16) /* input: * %rdi: ctx * RL1: blocks 1 and 2 @@ -295,10 +295,10 @@ __cast5_enc_blk16: outunpack_blocks(RR4, RL4, RTMP, RX, RKM); ret; -ENDPROC(__cast5_enc_blk16) +SYM_FUNC_END(__cast5_enc_blk16) .align 16 -__cast5_dec_blk16: +SYM_FUNC_START_LOCAL(__cast5_dec_blk16) /* input: * %rdi: ctx * RL1: encrypted blocks 1 and 2 @@ -372,7 +372,7 @@ __cast5_dec_blk16: .L__skip_dec: vpsrldq $4, RKR, RKR; jmp .L__dec_tail; -ENDPROC(__cast5_dec_blk16) +SYM_FUNC_END(__cast5_dec_blk16) ENTRY(cast5_ecb_enc_16way) /* input: diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S index 7f30b6f0d72c..0a68e42a00f9 100644 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S @@ -262,7 +262,7 @@ .text .align 8 -__cast6_enc_blk8: +SYM_FUNC_START_LOCAL(__cast6_enc_blk8) /* input: * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks @@ -307,10 +307,10 @@ __cast6_enc_blk8: outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); ret; -ENDPROC(__cast6_enc_blk8) +SYM_FUNC_END(__cast6_enc_blk8) .align 8 -__cast6_dec_blk8: +SYM_FUNC_START_LOCAL(__cast6_dec_blk8) /* input: * %rdi: ctx * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks @@ -354,7 +354,7 @@ __cast6_dec_blk8: outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM); ret; -ENDPROC(__cast6_dec_blk8) +SYM_FUNC_END(__cast6_dec_blk8) ENTRY(cast6_ecb_enc_8way) /* input: diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S index f94375a8dcd1..c3db86842578 100644 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S @@ -47,7 +47,7 @@ * T2 * T3 */ -__clmul_gf128mul_ble: +SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble) movaps DATA, T1 pshufd $0b01001110, DATA, T2 pshufd $0b01001110, SHASH, T3 @@ -90,7 +90,7 @@ __clmul_gf128mul_ble: pxor T2, T1 pxor T1, DATA ret -ENDPROC(__clmul_gf128mul_ble) +SYM_FUNC_END(__clmul_gf128mul_ble) /* void clmul_ghash_mul(char *dst, const u128 *shash) */ ENTRY(clmul_ghash_mul) diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S index 2925077f8c6a..c2d4a1fc9ee8 100644 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S @@ -570,7 +570,7 @@ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) .align 8 -__serpent_enc_blk8_avx: +SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks @@ -621,10 +621,10 @@ __serpent_enc_blk8_avx: write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_enc_blk8_avx) +SYM_FUNC_END(__serpent_enc_blk8_avx) .align 8 -__serpent_dec_blk8_avx: +SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: encrypted blocks @@ -675,7 +675,7 @@ __serpent_dec_blk8_avx: write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_dec_blk8_avx) +SYM_FUNC_END(__serpent_dec_blk8_avx) ENTRY(serpent_ecb_enc_8way_avx) /* input: diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S index d67888f2a52a..52c527ce4b18 100644 --- a/arch/x86/crypto/serpent-avx2-asm_64.S +++ b/arch/x86/crypto/serpent-avx2-asm_64.S @@ -566,7 +566,7 @@ transpose_4x4(x0, x1, x2, x3, t0, t1, t2) .align 8 -__serpent_enc_blk16: +SYM_FUNC_START_LOCAL(__serpent_enc_blk16) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: plaintext @@ -617,10 +617,10 @@ __serpent_enc_blk16: write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_enc_blk16) +SYM_FUNC_END(__serpent_enc_blk16) .align 8 -__serpent_dec_blk16: +SYM_FUNC_START_LOCAL(__serpent_dec_blk16) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: ciphertext @@ -671,7 +671,7 @@ __serpent_dec_blk16: write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2); ret; -ENDPROC(__serpent_dec_blk16) +SYM_FUNC_END(__serpent_dec_blk16) ENTRY(serpent_ecb_enc_16way) /* input: diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S index 73b471da3622..96ddfda4d7b2 100644 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S @@ -249,7 +249,7 @@ vpxor x3, wkey, x3; .align 8 -__twofish_enc_blk8: +SYM_FUNC_START_LOCAL(__twofish_enc_blk8) /* input: * %rdi: ctx, CTX * RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2: blocks @@ -288,10 +288,10 @@ __twofish_enc_blk8: outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2); ret; -ENDPROC(__twofish_enc_blk8) +SYM_FUNC_END(__twofish_enc_blk8) .align 8 -__twofish_dec_blk8: +SYM_FUNC_START_LOCAL(__twofish_dec_blk8) /* input: * %rdi: ctx, CTX * RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2: encrypted blocks @@ -328,7 +328,7 @@ __twofish_dec_blk8: outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2); ret; -ENDPROC(__twofish_dec_blk8) +SYM_FUNC_END(__twofish_dec_blk8) ENTRY(twofish_ecb_enc_8way) /* input: