diff mbox series

[net-next,v6,07/23] zinc: ChaCha20 ARM and ARM64 implementations

Message ID 20180925145622.29959-8-Jason@zx2c4.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series WireGuard: Secure Network Tunnel | expand

Commit Message

Jason A. Donenfeld Sept. 25, 2018, 2:56 p.m. UTC
These wire Andy Polyakov's implementations up to the kernel for ARMv7,8
NEON, and introduce Eric Biggers' ultra-fast scalar implementation for
CPUs without NEON or for CPUs with slow NEON (Cortex-A5,7).

This commit does the following:
  - Adds the glue code for the assembly implementations.
  - Renames the ARMv8 code into place, since it can at this point be
    used wholesale.
  - Merges Andy Polyakov's ARMv7 NEON code with Eric Biggers' <=ARMv7
    scalar code.

Commit note: Eric Biggers' scalar code is brand new, and quite possibly
prematurely added to this commit, and so it may require a bit of revision.

This commit delivers approximately the same or much better performance than
the existing crypto API's code and has been measured to do as such on:

  - ARM1176JZF-S [ARMv6]
  - Cortex-A7    [ARMv7]
  - Cortex-A8    [ARMv7]
  - Cortex-A9    [ARMv7]
  - Cortex-A17   [ARMv7]
  - Cortex-A53   [ARMv8]
  - Cortex-A55   [ARMv8]
  - Cortex-A73   [ARMv8]
  - Cortex-A75   [ARMv8]

Interestingly, Andy Polyakov's scalar code is slower than Eric Biggers',
but is also significantly shorter. This has the advantage that it does
not evict other code from L1 cache -- particularly on ARM11 chips -- and
so in certain circumstances it can actually be faster. However, it wasn't
found that this had an affect on any code existing in the kernel today.

Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Co-authored-by: Eric Biggers <ebiggers@google.com>
Cc: Samuel Neves <sneves@dei.uc.pt>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Jean-Philippe Aumasson <jeanphilippe.aumasson@gmail.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: linux-arm-kernel@lists.infradead.org
---
 lib/zinc/Makefile                             |   2 +
 lib/zinc/chacha20/chacha20-arm-glue.h         |  88 +++
 ...acha20-arm-cryptogams.S => chacha20-arm.S} | 502 ++++++++++++++++--
 ...20-arm64-cryptogams.S => chacha20-arm64.S} |   0
 lib/zinc/chacha20/chacha20.c                  |   2 +
 5 files changed, 556 insertions(+), 38 deletions(-)
 create mode 100644 lib/zinc/chacha20/chacha20-arm-glue.h
 rename lib/zinc/chacha20/{chacha20-arm-cryptogams.S => chacha20-arm.S} (71%)
 rename lib/zinc/chacha20/{chacha20-arm64-cryptogams.S => chacha20-arm64.S} (100%)

Comments

Ard Biesheuvel Sept. 26, 2018, 8:59 a.m. UTC | #1
On Tue, 25 Sep 2018 at 17:00, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>
> These wire Andy Polyakov's implementations up to the kernel for ARMv7,8
> NEON, and introduce Eric Biggers' ultra-fast scalar implementation for
> CPUs without NEON or for CPUs with slow NEON (Cortex-A5,7).
>
> This commit does the following:
>   - Adds the glue code for the assembly implementations.
>   - Renames the ARMv8 code into place, since it can at this point be
>     used wholesale.
>   - Merges Andy Polyakov's ARMv7 NEON code with Eric Biggers' <=ARMv7
>     scalar code.
>
> Commit note: Eric Biggers' scalar code is brand new, and quite possibly
> prematurely added to this commit, and so it may require a bit of revision.
>
> This commit delivers approximately the same or much better performance than
> the existing crypto API's code and has been measured to do as such on:
>
>   - ARM1176JZF-S [ARMv6]
>   - Cortex-A7    [ARMv7]
>   - Cortex-A8    [ARMv7]
>   - Cortex-A9    [ARMv7]
>   - Cortex-A17   [ARMv7]
>   - Cortex-A53   [ARMv8]
>   - Cortex-A55   [ARMv8]
>   - Cortex-A73   [ARMv8]
>   - Cortex-A75   [ARMv8]
>
> Interestingly, Andy Polyakov's scalar code is slower than Eric Biggers',
> but is also significantly shorter. This has the advantage that it does
> not evict other code from L1 cache -- particularly on ARM11 chips -- and
> so in certain circumstances it can actually be faster. However, it wasn't
> found that this had an affect on any code existing in the kernel today.
>
> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
> Co-authored-by: Eric Biggers <ebiggers@google.com>
> Cc: Samuel Neves <sneves@dei.uc.pt>
> Cc: Andy Lutomirski <luto@kernel.org>
> Cc: Greg KH <gregkh@linuxfoundation.org>
> Cc: Jean-Philippe Aumasson <jeanphilippe.aumasson@gmail.com>
> Cc: Russell King <linux@armlinux.org.uk>
> Cc: linux-arm-kernel@lists.infradead.org
> ---
>  lib/zinc/Makefile                             |   2 +
>  lib/zinc/chacha20/chacha20-arm-glue.h         |  88 +++
>  ...acha20-arm-cryptogams.S => chacha20-arm.S} | 502 ++++++++++++++++--
>  ...20-arm64-cryptogams.S => chacha20-arm64.S} |   0
>  lib/zinc/chacha20/chacha20.c                  |   2 +
>  5 files changed, 556 insertions(+), 38 deletions(-)
>  create mode 100644 lib/zinc/chacha20/chacha20-arm-glue.h
>  rename lib/zinc/chacha20/{chacha20-arm-cryptogams.S => chacha20-arm.S} (71%)
>  rename lib/zinc/chacha20/{chacha20-arm64-cryptogams.S => chacha20-arm64.S} (100%)
>
> diff --git a/lib/zinc/Makefile b/lib/zinc/Makefile
> index 223a0816c918..e47f64e12bbd 100644
> --- a/lib/zinc/Makefile
> +++ b/lib/zinc/Makefile
> @@ -4,4 +4,6 @@ ccflags-$(CONFIG_ZINC_DEBUG) += -DDEBUG
>
>  zinc_chacha20-y := chacha20/chacha20.o
>  zinc_chacha20-$(CONFIG_ZINC_ARCH_X86_64) += chacha20/chacha20-x86_64.o
> +zinc_chacha20-$(CONFIG_ZINC_ARCH_ARM) += chacha20/chacha20-arm.o
> +zinc_chacha20-$(CONFIG_ZINC_ARCH_ARM64) += chacha20/chacha20-arm64.o
>  obj-$(CONFIG_ZINC_CHACHA20) += zinc_chacha20.o
> diff --git a/lib/zinc/chacha20/chacha20-arm-glue.h b/lib/zinc/chacha20/chacha20-arm-glue.h
> new file mode 100644
> index 000000000000..86cce851ed02
> --- /dev/null
> +++ b/lib/zinc/chacha20/chacha20-arm-glue.h
> @@ -0,0 +1,88 @@
> +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
> +/*
> + * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
> + */
> +
> +#include <asm/hwcap.h>
> +#include <asm/neon.h>
> +#if defined(CONFIG_ARM)
> +#include <asm/system_info.h>
> +#include <asm/cputype.h>
> +#endif
> +
> +asmlinkage void chacha20_arm(u8 *out, const u8 *in, const size_t len,
> +                            const u32 key[8], const u32 counter[4]);
> +#if defined(CONFIG_ARM)
> +asmlinkage void hchacha20_arm(const u32 state[16], u32 out[8]);
> +#endif
> +#if defined(CONFIG_KERNEL_MODE_NEON)
> +asmlinkage void chacha20_neon(u8 *out, const u8 *in, const size_t len,
> +                             const u32 key[8], const u32 counter[4]);
> +#endif
> +
> +static bool chacha20_use_neon __ro_after_init;
> +
> +static void __init chacha20_fpu_init(void)
> +{
> +#if defined(CONFIG_ARM64)
> +       chacha20_use_neon = elf_hwcap & HWCAP_ASIMD;
> +#elif defined(CONFIG_ARM)
> +       switch (read_cpuid_part()) {
> +       case ARM_CPU_PART_CORTEX_A7:
> +       case ARM_CPU_PART_CORTEX_A5:
> +               /* The Cortex-A7 and Cortex-A5 do not perform well with the NEON
> +                * implementation but do incredibly with the scalar one and use
> +                * less power.
> +                */
> +               break;
> +       default:
> +               chacha20_use_neon = elf_hwcap & HWCAP_NEON;
> +       }
> +#endif
> +}
> +
> +static inline bool chacha20_arch(struct chacha20_ctx *state, u8 *dst,
> +                                const u8 *src, size_t len,
> +                                simd_context_t *simd_context)
> +{
> +#if defined(CONFIG_KERNEL_MODE_NEON)
> +       if (chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
> +           simd_use(simd_context))
> +               chacha20_neon(dst, src, len, state->key, state->counter);
> +       else
> +#endif

Better to use IS_ENABLED() here:

> +       if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON)) &&
> +           chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
> +           simd_use(simd_context))

Also, this still has unbounded worst case scheduling latency, given
that the outer library function passes its entire input straight into
the NEON routine.

> +               chacha20_arm(dst, src, len, state->key, state->counter);
> +
> +       state->counter[0] += (len + 63) / 64;
> +       return true;
> +}
> +
> +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS],
> +                                 const u8 nonce[HCHACHA20_NONCE_SIZE],
> +                                 const u8 key[HCHACHA20_KEY_SIZE],
> +                                 simd_context_t *simd_context)
> +{
> +#if defined(CONFIG_ARM)
> +       u32 x[] = { CHACHA20_CONSTANT_EXPA,
> +                   CHACHA20_CONSTANT_ND_3,
> +                   CHACHA20_CONSTANT_2_BY,
> +                   CHACHA20_CONSTANT_TE_K,
> +                   get_unaligned_le32(key + 0),
> +                   get_unaligned_le32(key + 4),
> +                   get_unaligned_le32(key + 8),
> +                   get_unaligned_le32(key + 12),
> +                   get_unaligned_le32(key + 16),
> +                   get_unaligned_le32(key + 20),
> +                   get_unaligned_le32(key + 24),
> +                   get_unaligned_le32(key + 28),
> +                   get_unaligned_le32(nonce + 0),
> +                   get_unaligned_le32(nonce + 4),
> +                   get_unaligned_le32(nonce + 8),
> +                   get_unaligned_le32(nonce + 12)
> +       };
> +       hchacha20_arm(x, derived_key);
> +       return true;
> +#else
> +       return false;
> +#endif
> +}
> diff --git a/lib/zinc/chacha20/chacha20-arm-cryptogams.S b/lib/zinc/chacha20/chacha20-arm.S
> similarity index 71%
> rename from lib/zinc/chacha20/chacha20-arm-cryptogams.S
> rename to lib/zinc/chacha20/chacha20-arm.S
> index 770bab469171..5abedafcf129 100644
> --- a/lib/zinc/chacha20/chacha20-arm-cryptogams.S
> +++ b/lib/zinc/chacha20/chacha20-arm.S
> @@ -1,13 +1,475 @@
>  /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
>  /*
> + * Copyright (C) 2018 Google, Inc.
>   * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
>   * Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved.
> - *
> - * This is based in part on Andy Polyakov's implementation from CRYPTOGAMS.
>   */
>
>  #include <linux/linkage.h>
>
> +/*
> + * The following scalar routine was written by Eric Biggers.
> + *
> + * Design notes:
> + *
> + * 16 registers would be needed to hold the state matrix, but only 14 are
> + * available because 'sp' and 'pc' cannot be used.  So we spill the elements
> + * (x8, x9) to the stack and swap them out with (x10, x11).  This adds one
> + * 'ldrd' and one 'strd' instruction per round.
> + *
> + * All rotates are performed using the implicit rotate operand accepted by the
> + * 'add' and 'eor' instructions.  This is faster than using explicit rotate
> + * instructions.  To make this work, we allow the values in the second and last
> + * rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the
> + * wrong rotation amount.  The rotation amount is then fixed up just in time
> + * when the values are used.  'brot' is the number of bits the values in row 'b'
> + * need to be rotated right to arrive at the correct values, and 'drot'
> + * similarly for row 'd'.  (brot, drot) start out as (0, 0) but we make it such
> + * that they end up as (25, 24) after every round.
> + */
> +
> +       // ChaCha state registers
> +       X0      .req    r0
> +       X1      .req    r1
> +       X2      .req    r2
> +       X3      .req    r3
> +       X4      .req    r4
> +       X5      .req    r5
> +       X6      .req    r6
> +       X7      .req    r7
> +       X8_X10  .req    r8      // shared by x8 and x10
> +       X9_X11  .req    r9      // shared by x9 and x11
> +       X12     .req    r10
> +       X13     .req    r11
> +       X14     .req    r12
> +       X15     .req    r14
> +
> +.Lexpand_32byte_k:
> +       // "expand 32-byte k"
> +       .word   0x61707865, 0x3320646e, 0x79622d32, 0x6b206574
> +
> +#ifdef __thumb2__
> +#  define adrl adr
> +#endif
> +
> +.macro __rev           out, in,  t0, t1, t2
> +.if __LINUX_ARM_ARCH__ >= 6
> +       rev             \out, \in
> +.else
> +       lsl             \t0, \in, #24
> +       and             \t1, \in, #0xff00
> +       and             \t2, \in, #0xff0000
> +       orr             \out, \t0, \in, lsr #24
> +       orr             \out, \out, \t1, lsl #8
> +       orr             \out, \out, \t2, lsr #8
> +.endif
> +.endm
> +
> +.macro _le32_bswap     x,  t0, t1, t2
> +#ifdef __ARMEB__
> +       __rev           \x, \x,  \t0, \t1, \t2
> +#endif
> +.endm
> +
> +.macro _le32_bswap_4x  a, b, c, d,  t0, t1, t2
> +       _le32_bswap     \a,  \t0, \t1, \t2
> +       _le32_bswap     \b,  \t0, \t1, \t2
> +       _le32_bswap     \c,  \t0, \t1, \t2
> +       _le32_bswap     \d,  \t0, \t1, \t2
> +.endm
> +
> +.macro __ldrd          a, b, src, offset
> +#if __LINUX_ARM_ARCH__ >= 6
> +       ldrd            \a, \b, [\src, #\offset]
> +#else
> +       ldr             \a, [\src, #\offset]
> +       ldr             \b, [\src, #\offset + 4]
> +#endif
> +.endm
> +
> +.macro __strd          a, b, dst, offset
> +#if __LINUX_ARM_ARCH__ >= 6
> +       strd            \a, \b, [\dst, #\offset]
> +#else
> +       str             \a, [\dst, #\offset]
> +       str             \b, [\dst, #\offset + 4]
> +#endif
> +.endm
> +
> +.macro _halfround      a1, b1, c1, d1,  a2, b2, c2, d2
> +
> +       // a += b; d ^= a; d = rol(d, 16);
> +       add             \a1, \a1, \b1, ror #brot
> +       add             \a2, \a2, \b2, ror #brot
> +       eor             \d1, \a1, \d1, ror #drot
> +       eor             \d2, \a2, \d2, ror #drot
> +       // drot == 32 - 16 == 16
> +
> +       // c += d; b ^= c; b = rol(b, 12);
> +       add             \c1, \c1, \d1, ror #16
> +       add             \c2, \c2, \d2, ror #16
> +       eor             \b1, \c1, \b1, ror #brot
> +       eor             \b2, \c2, \b2, ror #brot
> +       // brot == 32 - 12 == 20
> +
> +       // a += b; d ^= a; d = rol(d, 8);
> +       add             \a1, \a1, \b1, ror #20
> +       add             \a2, \a2, \b2, ror #20
> +       eor             \d1, \a1, \d1, ror #16
> +       eor             \d2, \a2, \d2, ror #16
> +       // drot == 32 - 8 == 24
> +
> +       // c += d; b ^= c; b = rol(b, 7);
> +       add             \c1, \c1, \d1, ror #24
> +       add             \c2, \c2, \d2, ror #24
> +       eor             \b1, \c1, \b1, ror #20
> +       eor             \b2, \c2, \b2, ror #20
> +       // brot == 32 - 7 == 25
> +.endm
> +
> +.macro _doubleround
> +
> +       // column round
> +
> +       // quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13)
> +       _halfround      X0, X4, X8_X10, X12,  X1, X5, X9_X11, X13
> +
> +       // save (x8, x9); restore (x10, x11)
> +       __strd          X8_X10, X9_X11, sp, 0
> +       __ldrd          X8_X10, X9_X11, sp, 8
> +
> +       // quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15)
> +       _halfround      X2, X6, X8_X10, X14,  X3, X7, X9_X11, X15
> +
> +       .set brot, 25
> +       .set drot, 24
> +
> +       // diagonal round
> +
> +       // quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12)
> +       _halfround      X0, X5, X8_X10, X15,  X1, X6, X9_X11, X12
> +
> +       // save (x10, x11); restore (x8, x9)
> +       __strd          X8_X10, X9_X11, sp, 8
> +       __ldrd          X8_X10, X9_X11, sp, 0
> +
> +       // quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14)
> +       _halfround      X2, X7, X8_X10, X13,  X3, X4, X9_X11, X14
> +.endm
> +
> +.macro _chacha_permute nrounds
> +       .set brot, 0
> +       .set drot, 0
> +       .rept \nrounds / 2
> +        _doubleround
> +       .endr
> +.endm
> +
> +.macro _chacha         nrounds
> +
> +.Lnext_block\@:
> +       // Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN
> +       // Registers contain x0-x9,x12-x15.
> +
> +       // Do the core ChaCha permutation to update x0-x15.
> +       _chacha_permute \nrounds
> +
> +       add             sp, #8
> +       // Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN
> +       // Registers contain x0-x9,x12-x15.
> +       // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
> +
> +       // Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15).
> +       push            {X8_X10, X9_X11, X12, X13, X14, X15}
> +
> +       // Load (OUT, IN, LEN).
> +       ldr             r14, [sp, #96]
> +       ldr             r12, [sp, #100]
> +       ldr             r11, [sp, #104]
> +
> +       orr             r10, r14, r12
> +
> +       // Use slow path if fewer than 64 bytes remain.
> +       cmp             r11, #64
> +       blt             .Lxor_slowpath\@
> +
> +       // Use slow path if IN and/or OUT isn't 4-byte aligned.  Needed even on
> +       // ARMv6+, since ldmia and stmia (used below) still require alignment.
> +       tst             r10, #3
> +       bne             .Lxor_slowpath\@
> +
> +       // Fast path: XOR 64 bytes of aligned data.
> +
> +       // Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
> +       // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT.
> +       // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
> +
> +       // x0-x3
> +       __ldrd          r8, r9, sp, 32
> +       __ldrd          r10, r11, sp, 40
> +       add             X0, X0, r8
> +       add             X1, X1, r9
> +       add             X2, X2, r10
> +       add             X3, X3, r11
> +       _le32_bswap_4x  X0, X1, X2, X3,  r8, r9, r10
> +       ldmia           r12!, {r8-r11}
> +       eor             X0, X0, r8
> +       eor             X1, X1, r9
> +       eor             X2, X2, r10
> +       eor             X3, X3, r11
> +       stmia           r14!, {X0-X3}
> +
> +       // x4-x7
> +       __ldrd          r8, r9, sp, 48
> +       __ldrd          r10, r11, sp, 56
> +       add             X4, r8, X4, ror #brot
> +       add             X5, r9, X5, ror #brot
> +       ldmia           r12!, {X0-X3}
> +       add             X6, r10, X6, ror #brot
> +       add             X7, r11, X7, ror #brot
> +       _le32_bswap_4x  X4, X5, X6, X7,  r8, r9, r10
> +       eor             X4, X4, X0
> +       eor             X5, X5, X1
> +       eor             X6, X6, X2
> +       eor             X7, X7, X3
> +       stmia           r14!, {X4-X7}
> +
> +       // x8-x15
> +       pop             {r0-r7}                 // (x8-x9,x12-x15,x10-x11)
> +       __ldrd          r8, r9, sp, 32
> +       __ldrd          r10, r11, sp, 40
> +       add             r0, r0, r8              // x8
> +       add             r1, r1, r9              // x9
> +       add             r6, r6, r10             // x10
> +       add             r7, r7, r11             // x11
> +       _le32_bswap_4x  r0, r1, r6, r7,  r8, r9, r10
> +       ldmia           r12!, {r8-r11}
> +       eor             r0, r0, r8              // x8
> +       eor             r1, r1, r9              // x9
> +       eor             r6, r6, r10             // x10
> +       eor             r7, r7, r11             // x11
> +       stmia           r14!, {r0,r1,r6,r7}
> +       ldmia           r12!, {r0,r1,r6,r7}
> +       __ldrd          r8, r9, sp, 48
> +       __ldrd          r10, r11, sp, 56
> +       add             r2, r8, r2, ror #drot   // x12
> +       add             r3, r9, r3, ror #drot   // x13
> +       add             r4, r10, r4, ror #drot  // x14
> +       add             r5, r11, r5, ror #drot  // x15
> +       _le32_bswap_4x  r2, r3, r4, r5,  r9, r10, r11
> +         ldr           r9, [sp, #72]           // load LEN
> +       eor             r2, r2, r0              // x12
> +       eor             r3, r3, r1              // x13
> +       eor             r4, r4, r6              // x14
> +       eor             r5, r5, r7              // x15
> +         subs          r9, #64                 // decrement and check LEN
> +       stmia           r14!, {r2-r5}
> +
> +       beq             .Ldone\@
> +
> +.Lprepare_for_next_block\@:
> +
> +       // Stack: x0-x15 OUT IN LEN
> +
> +       // Increment block counter (x12)
> +       add             r8, #1
> +
> +       // Store updated (OUT, IN, LEN)
> +       str             r14, [sp, #64]
> +       str             r12, [sp, #68]
> +       str             r9, [sp, #72]
> +
> +         mov           r14, sp
> +
> +       // Store updated block counter (x12)
> +       str             r8, [sp, #48]
> +
> +         sub           sp, #16
> +
> +       // Reload state and do next block
> +       ldmia           r14!, {r0-r11}          // load x0-x11
> +       __strd          r10, r11, sp, 8         // store x10-x11 before state
> +       ldmia           r14, {r10-r12,r14}      // load x12-x15
> +       b               .Lnext_block\@
> +
> +.Lxor_slowpath\@:
> +       // Slow path: < 64 bytes remaining, or unaligned input or output buffer.
> +       // We handle it by storing the 64 bytes of keystream to the stack, then
> +       // XOR-ing the needed portion with the data.
> +
> +       // Allocate keystream buffer
> +       sub             sp, #64
> +       mov             r14, sp
> +
> +       // Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
> +       // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0.
> +       // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
> +
> +       // Save keystream for x0-x3
> +       __ldrd          r8, r9, sp, 96
> +       __ldrd          r10, r11, sp, 104
> +       add             X0, X0, r8
> +       add             X1, X1, r9
> +       add             X2, X2, r10
> +       add             X3, X3, r11
> +       _le32_bswap_4x  X0, X1, X2, X3,  r8, r9, r10
> +       stmia           r14!, {X0-X3}
> +
> +       // Save keystream for x4-x7
> +       __ldrd          r8, r9, sp, 112
> +       __ldrd          r10, r11, sp, 120
> +       add             X4, r8, X4, ror #brot
> +       add             X5, r9, X5, ror #brot
> +       add             X6, r10, X6, ror #brot
> +       add             X7, r11, X7, ror #brot
> +       _le32_bswap_4x  X4, X5, X6, X7,  r8, r9, r10
> +         add           r8, sp, #64
> +       stmia           r14!, {X4-X7}
> +
> +       // Save keystream for x8-x15
> +       ldm             r8, {r0-r7}             // (x8-x9,x12-x15,x10-x11)
> +       __ldrd          r8, r9, sp, 128
> +       __ldrd          r10, r11, sp, 136
> +       add             r0, r0, r8              // x8
> +       add             r1, r1, r9              // x9
> +       add             r6, r6, r10             // x10
> +       add             r7, r7, r11             // x11
> +       _le32_bswap_4x  r0, r1, r6, r7,  r8, r9, r10
> +       stmia           r14!, {r0,r1,r6,r7}
> +       __ldrd          r8, r9, sp, 144
> +       __ldrd          r10, r11, sp, 152
> +       add             r2, r8, r2, ror #drot   // x12
> +       add             r3, r9, r3, ror #drot   // x13
> +       add             r4, r10, r4, ror #drot  // x14
> +       add             r5, r11, r5, ror #drot  // x15
> +       _le32_bswap_4x  r2, r3, r4, r5,  r9, r10, r11
> +       stmia           r14, {r2-r5}
> +
> +       // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN
> +       // Registers: r8 is block counter, r12 is IN.
> +
> +       ldr             r9, [sp, #168]          // LEN
> +       ldr             r14, [sp, #160]         // OUT
> +       cmp             r9, #64
> +         mov           r0, sp
> +       movle           r1, r9
> +       movgt           r1, #64
> +       // r1 is number of bytes to XOR, in range [1, 64]
> +
> +.if __LINUX_ARM_ARCH__ < 6
> +       orr             r2, r12, r14
> +       tst             r2, #3                  // IN or OUT misaligned?
> +       bne             .Lxor_next_byte\@
> +.endif
> +
> +       // XOR a word at a time
> +.rept 16
> +       subs            r1, #4
> +       blt             .Lxor_words_done\@
> +       ldr             r2, [r12], #4
> +       ldr             r3, [r0], #4
> +       eor             r2, r2, r3
> +       str             r2, [r14], #4
> +.endr
> +       b               .Lxor_slowpath_done\@
> +.Lxor_words_done\@:
> +       ands            r1, r1, #3
> +       beq             .Lxor_slowpath_done\@
> +
> +       // XOR a byte at a time
> +.Lxor_next_byte\@:
> +       ldrb            r2, [r12], #1
> +       ldrb            r3, [r0], #1
> +       eor             r2, r2, r3
> +       strb            r2, [r14], #1
> +       subs            r1, #1
> +       bne             .Lxor_next_byte\@
> +
> +.Lxor_slowpath_done\@:
> +       subs            r9, #64
> +       add             sp, #96
> +       bgt             .Lprepare_for_next_block\@
> +
> +.Ldone\@:
> +.endm  // _chacha
> +
> +/*
> + * void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8],
> + *                  const u32 iv[4]);
> + */
> +ENTRY(chacha20_arm)
> +       cmp             r2, #0                  // len == 0?
> +       bxeq            lr
> +
> +       push            {r0-r2,r4-r11,lr}
> +
> +       // Push state x0-x15 onto stack.
> +       // Also store an extra copy of x10-x11 just before the state.
> +
> +       ldr             r4, [sp, #48]           // iv
> +       mov             r0, sp
> +       sub             sp, #80
> +
> +       // iv: x12-x15
> +       ldm             r4, {X12,X13,X14,X15}
> +       stmdb           r0!, {X12,X13,X14,X15}
> +
> +       // key: x4-x11
> +       __ldrd          X8_X10, X9_X11, r3, 24
> +       __strd          X8_X10, X9_X11, sp, 8
> +       stmdb           r0!, {X8_X10, X9_X11}
> +       ldm             r3, {X4-X9_X11}
> +       stmdb           r0!, {X4-X9_X11}
> +
> +       // constants: x0-x3
> +       adrl            X3, .Lexpand_32byte_k
> +       ldm             X3, {X0-X3}
> +       __strd          X0, X1, sp, 16
> +       __strd          X2, X3, sp, 24
> +
> +       _chacha         20
> +
> +       add             sp, #76
> +       pop             {r4-r11, pc}
> +ENDPROC(chacha20_arm)
> +
> +/*
> + * void hchacha20_arm(const u32 state[16], u32 out[8]);
> + */
> +ENTRY(hchacha20_arm)
> +       push            {r1,r4-r11,lr}
> +
> +       mov             r14, r0
> +       ldmia           r14!, {r0-r11}          // load x0-x11
> +       push            {r10-r11}               // store x10-x11 to stack
> +       ldm             r14, {r10-r12,r14}      // load x12-x15
> +       sub             sp, #8
> +
> +       _chacha_permute 20
> +
> +       // Skip over (unused0-unused1, x10-x11)
> +       add             sp, #16
> +
> +       // Fix up rotations of x12-x15
> +       ror             X12, X12, #drot
> +       ror             X13, X13, #drot
> +         pop           {r4}                    // load 'out'
> +       ror             X14, X14, #drot
> +       ror             X15, X15, #drot
> +
> +       // Store (x0-x3,x12-x15) to 'out'
> +       stm             r4, {X0,X1,X2,X3,X12,X13,X14,X15}
> +
> +       pop             {r4-r11,pc}
> +ENDPROC(hchacha20_arm)
> +
> +#ifdef CONFIG_KERNEL_MODE_NEON
> +/*
> + * This following NEON routine was ported from Andy Polyakov's implementation
> + * from CRYPTOGAMS. It begins with parts of the CRYPTOGAMS scalar routine,
> + * since certain NEON code paths actually branch to it.
> + */
> +
>  .text
>  #if defined(__thumb2__) || defined(__clang__)
>  .syntax        unified
> @@ -22,39 +484,6 @@
>  #define ldrhsb ldrbhs
>  #endif
>
> -.align 5
> -.Lsigma:
> -.long  0x61707865,0x3320646e,0x79622d32,0x6b206574     @ endian-neutral
> -.Lone:
> -.long  1,0,0,0
> -.word  -1
> -
> -.align 5
> -ENTRY(chacha20_arm)
> -       ldr     r12,[sp,#0]             @ pull pointer to counter and nonce
> -       stmdb   sp!,{r0-r2,r4-r11,lr}
> -       cmp     r2,#0                   @ len==0?
> -#ifdef __thumb2__
> -       itt     eq
> -#endif
> -       addeq   sp,sp,#4*3
> -       beq     .Lno_data_arm
> -       ldmia   r12,{r4-r7}             @ load counter and nonce
> -       sub     sp,sp,#4*(16)           @ off-load area
> -#if __LINUX_ARM_ARCH__ < 7 && !defined(__thumb2__)
> -       sub     r14,pc,#100             @ .Lsigma
> -#else
> -       adr     r14,.Lsigma             @ .Lsigma
> -#endif
> -       stmdb   sp!,{r4-r7}             @ copy counter and nonce
> -       ldmia   r3,{r4-r11}             @ load key
> -       ldmia   r14,{r0-r3}             @ load sigma
> -       stmdb   sp!,{r4-r11}            @ copy key
> -       stmdb   sp!,{r0-r3}             @ copy sigma
> -       str     r10,[sp,#4*(16+10)]     @ off-load "rx"
> -       str     r11,[sp,#4*(16+11)]     @ off-load "rx"
> -       b       .Loop_outer_enter
> -
>  .align 4
>  .Loop_outer:
>         ldmia   sp,{r0-r9}              @ load key material
> @@ -748,11 +1177,8 @@ ENTRY(chacha20_arm)
>
>  .Ldone:
>         add     sp,sp,#4*(32+3)
> -.Lno_data_arm:
>         ldmia   sp!,{r4-r11,pc}
> -ENDPROC(chacha20_arm)
>
> -#ifdef CONFIG_KERNEL_MODE_NEON
>  .align 5
>  .Lsigma2:
>  .long  0x61707865,0x3320646e,0x79622d32,0x6b206574     @ endian-neutral
> diff --git a/lib/zinc/chacha20/chacha20-arm64-cryptogams.S b/lib/zinc/chacha20/chacha20-arm64.S
> similarity index 100%
> rename from lib/zinc/chacha20/chacha20-arm64-cryptogams.S
> rename to lib/zinc/chacha20/chacha20-arm64.S
> diff --git a/lib/zinc/chacha20/chacha20.c b/lib/zinc/chacha20/chacha20.c
> index 4354b874a6a5..fc4f74fca653 100644
> --- a/lib/zinc/chacha20/chacha20.c
> +++ b/lib/zinc/chacha20/chacha20.c
> @@ -16,6 +16,8 @@
>
>  #if defined(CONFIG_ZINC_ARCH_X86_64)
>  #include "chacha20-x86_64-glue.h"
> +#elif defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_ARM64)
> +#include "chacha20-arm-glue.h"
>  #else
>  void __init chacha20_fpu_init(void)
>  {
> --
> 2.19.0
>
Jason A. Donenfeld Sept. 26, 2018, 1:32 p.m. UTC | #2
Hi Ard,

On Wed, Sep 26, 2018 at 10:59 AM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
> > +static inline bool chacha20_arch(struct chacha20_ctx *state, u8 *dst,
> > +                                const u8 *src, size_t len,
> > +                                simd_context_t *simd_context)
> > +{
> > +#if defined(CONFIG_KERNEL_MODE_NEON)
> > +       if (chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
> > +           simd_use(simd_context))
> > +               chacha20_neon(dst, src, len, state->key, state->counter);
> > +       else
> > +#endif
>
> Better to use IS_ENABLED() here:
>
> > +       if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON)) &&
> > +           chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
> > +           simd_use(simd_context))

Good idea. I'll fix that up.

>
> Also, this still has unbounded worst case scheduling latency, given
> that the outer library function passes its entire input straight into
> the NEON routine.

The vast majority of crypto routines in arch/*/crypto/ follow this
same exact pattern, actually. I realize a few don't -- probably the
ones you had a hand in :) -- but I think this is up to the caller to
handle. I made a change so that in chacha20poly1305.c, it calls
simd_relax after handling each scatter-gather element, so a
"construction" will handle this gracefully. But I believe it's up to
the caller to decide on what sizes of information it wants to pass to
primitives. Put differently, this also hasn't ever been an issue
before -- the existing state of the tree indicates this -- and so I
don't anticipate this will be a real issue now. And if it becomes one,
this is something we can address *later*, but certainly there's no use
of adding additional complexity to the initial patchset to do this
now.

Jason
Ard Biesheuvel Sept. 26, 2018, 2:02 p.m. UTC | #3
(+ Herbert, Thomas)

On Wed, 26 Sep 2018 at 15:33, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>
> Hi Ard,
>
> On Wed, Sep 26, 2018 at 10:59 AM Ard Biesheuvel
> <ard.biesheuvel@linaro.org> wrote:
> > > +static inline bool chacha20_arch(struct chacha20_ctx *state, u8 *dst,
> > > +                                const u8 *src, size_t len,
> > > +                                simd_context_t *simd_context)
> > > +{
> > > +#if defined(CONFIG_KERNEL_MODE_NEON)
> > > +       if (chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
> > > +           simd_use(simd_context))
> > > +               chacha20_neon(dst, src, len, state->key, state->counter);
> > > +       else
> > > +#endif
> >
> > Better to use IS_ENABLED() here:
> >
> > > +       if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON)) &&
> > > +           chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
> > > +           simd_use(simd_context))
>
> Good idea. I'll fix that up.
>
> >
> > Also, this still has unbounded worst case scheduling latency, given
> > that the outer library function passes its entire input straight into
> > the NEON routine.
>
> The vast majority of crypto routines in arch/*/crypto/ follow this
> same exact pattern, actually. I realize a few don't -- probably the
> ones you had a hand in :) -- but I think this is up to the caller to
> handle.

Anything that uses the scatterwalk API (AEADs and skciphers) will
handle at most a page at a time. Hashes are different, which is why
some of them have to handle it explicitly.

> I made a change so that in chacha20poly1305.c, it calls
> simd_relax after handling each scatter-gather element, so a
> "construction" will handle this gracefully. But I believe it's up to
> the caller to decide on what sizes of information it wants to pass to
> primitives. Put differently, this also hasn't ever been an issue
> before -- the existing state of the tree indicates this -- and so I
> don't anticipate this will be a real issue now.

The state of the tree does not capture all relevant context or
history. The scheduling latency issue was brought up very recently by
the -rt folks on the mailing lists.

> And if it becomes one,
> this is something we can address *later*, but certainly there's no use
> of adding additional complexity to the initial patchset to do this
> now.
>

You are introducing a very useful SIMD abstraction, but it lets code
run with preemption disabled for unbounded amounts of time, and so now
is the time to ensure we get it right.

Part of the [justified] criticism on the current state of the crypto
API is on its complexity, and so I don't think it makes sense to keep
it simple now and add the complexity later (and the same concern
applies to async support btw).
Andrew Lunn Sept. 26, 2018, 2:36 p.m. UTC | #4
> > Also, this still has unbounded worst case scheduling latency, given
> > that the outer library function passes its entire input straight into
> > the NEON routine.
> 
> The vast majority of crypto routines in arch/*/crypto/ follow this
> same exact pattern, actually. I realize a few don't -- probably the
> ones you had a hand in :) -- but I think this is up to the caller to
> handle. I made a change so that in chacha20poly1305.c, it calls
> simd_relax after handling each scatter-gather element, so a
> "construction" will handle this gracefully. But I believe it's up to
> the caller to decide on what sizes of information it wants to pass to
> primitives. Put differently, this also hasn't ever been an issue
> before -- the existing state of the tree indicates this -- and so I
> don't anticipate this will be a real issue now. And if it becomes one,
> this is something we can address *later*, but certainly there's no use
> of adding additional complexity to the initial patchset to do this
> now.

Hi Jason

This is not my area of expertise, so you should verify what i'm say
here...

My guess is, IPSEC will mostly ask the crypto code to work on 1500
byte full MTU packets and 64 byte TCP ACK packets. Disk encryption i
guess works on 4K blocks. So these requests are all quite small,
keeping the latency reasonably bounded.

The wireguard interface claims it is GSO capable. This means the
network stack will pass it big chunks of data and leave it to the
network interface to perform the segmentation into 1500 byte MTU
frames on the wire. I've not looked at how wireguard actually handles
these big chunks. But to get maximum performance, it should try to
keep them whole, just add a header and/or trailer. Will wireguard pass
these big chunks of data to the crypto code? Do we now have 64K blocks
being worked on? Does the latency jump from 4K to 64K? That might be
new, so the existing state of the tree does not help you here.

   Andrew
Jason A. Donenfeld Sept. 26, 2018, 3:25 p.m. UTC | #5
On Wed, Sep 26, 2018 at 4:36 PM Andrew Lunn <andrew@lunn.ch> wrote:
> The wireguard interface claims it is GSO capable. This means the
> network stack will pass it big chunks of data and leave it to the
> network interface to perform the segmentation into 1500 byte MTU
> frames on the wire. I've not looked at how wireguard actually handles
> these big chunks. But to get maximum performance, it should try to
> keep them whole, just add a header and/or trailer. Will wireguard pass
> these big chunks of data to the crypto code? Do we now have 64K blocks
> being worked on? Does the latency jump from 4K to 64K? That might be
> new, so the existing state of the tree does not help you here.

No, it only requests GSO superpackets so that it can group the pieces
and encrypt them on the same core. But they're each encrypted
separately (broken up immediately after ndo_start_xmit), and so they
wind up being ~1420 bytes each to encrypt. I spoke about this at
netdev2.2 if you're interested in the architecture; there's a paper:

https://www.wireguard.com/papers/wireguard-netdev22.pdf
https://www.youtube.com/watch?v=54orFwtQ1XY
https://www.wireguard.com/talks/netdev2017-slides.pdf
Jason A. Donenfeld Sept. 26, 2018, 3:41 p.m. UTC | #6
On Wed, Sep 26, 2018 at 4:02 PM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
> I don't think it makes sense to keep
> it simple now and add the complexity later (and the same concern
> applies to async support btw).

Ugh, no. I don't want to add needless complexity, period. Zinc is
synchronous, not asynchronous. It provides software implementations.
That's what it does. While many of your reviews have been useful, many
of your comments indicate some desire to change and mold the purpose
and focus of Zinc away from Zinc's intents. Stop that. It's not going
to become a bloated mess of "things Ard wanted and quipped about on
LKML." Things like these only serve to filibuster the patchset
indefinitely. But maybe that's what you'd like all along? Hard to
tell, honestly. So, no, sorry, Zinc isn't gaining an async interface
right now.
Ard Biesheuvel Sept. 26, 2018, 3:41 p.m. UTC | #7
On Wed, 26 Sep 2018 at 16:02, Ard Biesheuvel <ard.biesheuvel@linaro.org> wrote:
>
> (+ Herbert, Thomas)
>
> On Wed, 26 Sep 2018 at 15:33, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> >
> > Hi Ard,
> >
> > On Wed, Sep 26, 2018 at 10:59 AM Ard Biesheuvel
> > <ard.biesheuvel@linaro.org> wrote:
> > > > +static inline bool chacha20_arch(struct chacha20_ctx *state, u8 *dst,
> > > > +                                const u8 *src, size_t len,
> > > > +                                simd_context_t *simd_context)
> > > > +{
> > > > +#if defined(CONFIG_KERNEL_MODE_NEON)
> > > > +       if (chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
> > > > +           simd_use(simd_context))
> > > > +               chacha20_neon(dst, src, len, state->key, state->counter);
> > > > +       else
> > > > +#endif
> > >
> > > Better to use IS_ENABLED() here:
> > >
> > > > +       if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON)) &&
> > > > +           chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
> > > > +           simd_use(simd_context))
> >
> > Good idea. I'll fix that up.
> >
> > >
> > > Also, this still has unbounded worst case scheduling latency, given
> > > that the outer library function passes its entire input straight into
> > > the NEON routine.
> >
> > The vast majority of crypto routines in arch/*/crypto/ follow this
> > same exact pattern, actually. I realize a few don't -- probably the
> > ones you had a hand in :) -- but I think this is up to the caller to
> > handle.
>
> Anything that uses the scatterwalk API (AEADs and skciphers) will
> handle at most a page at a time. Hashes are different, which is why
> some of them have to handle it explicitly.
>
> > I made a change so that in chacha20poly1305.c, it calls
> > simd_relax after handling each scatter-gather element, so a
> > "construction" will handle this gracefully. But I believe it's up to
> > the caller to decide on what sizes of information it wants to pass to
> > primitives. Put differently, this also hasn't ever been an issue
> > before -- the existing state of the tree indicates this -- and so I
> > don't anticipate this will be a real issue now.
>
> The state of the tree does not capture all relevant context or
> history. The scheduling latency issue was brought up very recently by
> the -rt folks on the mailing lists.
>
> > And if it becomes one,
> > this is something we can address *later*, but certainly there's no use
> > of adding additional complexity to the initial patchset to do this
> > now.
> >
>
> You are introducing a very useful SIMD abstraction, but it lets code
> run with preemption disabled for unbounded amounts of time, and so now
> is the time to ensure we get it right.
>

Actually, looking at the code again, the abstraction does appear to be
fine, it is just the chacha20 code that does not make use of it.
Jason A. Donenfeld Sept. 26, 2018, 3:45 p.m. UTC | #8
On Wed, Sep 26, 2018 at 5:42 PM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
>
> On Wed, 26 Sep 2018 at 16:02, Ard Biesheuvel <ard.biesheuvel@linaro.org> wrote:
> Actually, looking at the code again, the abstraction does appear to be
> fine, it is just the chacha20 code that does not make use of it.

So what you have in mind is something like calling simd_relax() every
4096 bytes or so?
Jason A. Donenfeld Sept. 26, 2018, 3:49 p.m. UTC | #9
On Wed, Sep 26, 2018 at 5:45 PM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> So what you have in mind is something like calling simd_relax() every
> 4096 bytes or so?

That was actually pretty easy, putting together both of your suggestions:

static inline bool chacha20_arch(struct chacha20_ctx *state, u8 *dst,
                 u8 *src, size_t len,
                 simd_context_t *simd_context)
{
    while (len > PAGE_SIZE) {
        chacha20_arch(state, dst, src, PAGE_SIZE, simd_context);
        len -= PAGE_SIZE;
        src += PAGE_SIZE;
        dst += PAGE_SIZE;
        simd_relax(simd_context);
    }
    if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && chacha20_use_neon &&
        len >= CHACHA20_BLOCK_SIZE * 3 && simd_use(simd_context))
        chacha20_neon(dst, src, len, state->key, state->counter);
    else
        chacha20_arm(dst, src, len, state->key, state->counter);

    state->counter[0] += (len + 63) / 64;
    return true;
}
Ard Biesheuvel Sept. 26, 2018, 3:51 p.m. UTC | #10
On Wed, 26 Sep 2018 at 17:50, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>
> On Wed, Sep 26, 2018 at 5:45 PM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> > So what you have in mind is something like calling simd_relax() every
> > 4096 bytes or so?
>
> That was actually pretty easy, putting together both of your suggestions:
>
> static inline bool chacha20_arch(struct chacha20_ctx *state, u8 *dst,
>                  u8 *src, size_t len,
>                  simd_context_t *simd_context)
> {
>     while (len > PAGE_SIZE) {
>         chacha20_arch(state, dst, src, PAGE_SIZE, simd_context);
>         len -= PAGE_SIZE;
>         src += PAGE_SIZE;
>         dst += PAGE_SIZE;
>         simd_relax(simd_context);
>     }
>     if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && chacha20_use_neon &&
>         len >= CHACHA20_BLOCK_SIZE * 3 && simd_use(simd_context))
>         chacha20_neon(dst, src, len, state->key, state->counter);
>     else
>         chacha20_arm(dst, src, len, state->key, state->counter);
>
>     state->counter[0] += (len + 63) / 64;
>     return true;
> }

Nice one :-)

This works for me (but perhaps add a comment as well)
Jason A. Donenfeld Sept. 26, 2018, 3:58 p.m. UTC | #11
On Wed, Sep 26, 2018 at 5:52 PM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
> Nice one :-)
>
> This works for me (but perhaps add a comment as well)

Sure. Just a prototype; it'll be clean for v7.
Andy Lutomirski Sept. 26, 2018, 4:21 p.m. UTC | #12
> On Sep 26, 2018, at 7:02 AM, Ard Biesheuvel <ard.biesheuvel@linaro.org> wrote:
> 
> (+ Herbert, Thomas)
> 
>> On Wed, 26 Sep 2018 at 15:33, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>> 
>> Hi Ard,
>> .
> 
>> And if it becomes one,
>> this is something we can address *later*, but certainly there's no use
>> of adding additional complexity to the initial patchset to do this
>> now.
>> 
> 
> You are introducing a very useful SIMD abstraction, but it lets code
> run with preemption disabled for unbounded amounts of time, and so now
> is the time to ensure we get it right.
> 
> Part of the [justified] criticism on the current state of the crypto
> API is on its complexity, and so I don't think it makes sense to keep
> it simple now and add the complexity later (and the same concern
> applies to async support btw).

Are, is what you’re saying that the Zinc chacha20 functions should call simd_relax() every n bytes automatically for some reasonable value of n?  If so, seems sensible, except that some care might be needed to make sure they interact with preemption correctly.

What I mean is: the public Zinc entry points should either be callable in an atomic context or they should not be.  I think this should be checked at runtime in an appropriate place with an __might_sleep or similar.  Or simd_relax should learn to *not* schedule if the result of preempt_enable() leaves it atomic. (And the latter needs to be done in a way that works even on non-preempt kernels, and I don’t remember whether that’s possible.). And this should happen regardless of how many bytes are processed. IOW, calling into Zinc should be equally not atomic-safe for 100 bytes and for 10 MB.

As for async, ISTM a really good WireGuard accelerator would expose a different interface than crypto API supports, and it probably makes sense to wait for such hardware to show up before figuring out how to use it.  And no matter what form it takes, I don’t think it should complicate the basic Zinc crypto entry points.
Ard Biesheuvel Sept. 26, 2018, 4:54 p.m. UTC | #13
On Wed, 26 Sep 2018 at 17:41, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>
> On Wed, Sep 26, 2018 at 4:02 PM Ard Biesheuvel
> <ard.biesheuvel@linaro.org> wrote:
> > I don't think it makes sense to keep
> > it simple now and add the complexity later (and the same concern
> > applies to async support btw).
>
> Ugh, no. I don't want to add needless complexity, period. Zinc is
> synchronous, not asynchronous. It provides software implementations.
> That's what it does. While many of your reviews have been useful, many
> of your comments indicate some desire to change and mold the purpose
> and focus of Zinc away from Zinc's intents. Stop that. It's not going
> to become a bloated mess of "things Ard wanted and quipped about on
> LKML." Things like these only serve to filibuster the patchset
> indefinitely. But maybe that's what you'd like all along? Hard to
> tell, honestly. So, no, sorry, Zinc isn't gaining an async interface
> right now.

Framing it as /needless/ complexity does not help at all. The changes
you are proposing are very useful, but nobody wants two crypto
subsystems with two different maintainers in the kernel, so I would
like to understand where this is going in the future. I am not saying
it should block these patches though.

Also, I have spent a *lot* of time looking at your code, and trying to
make it better, especially for use cases that weren't on your radar to
begin with (e.g., 'pet projects' [your words] like the Cortex-A7 which
will be in almost every new 32-bit Android phone). So characterizing
my feedback as some kind of sabotage is not very productive either.

Contrary to what you seem to think, I am not deeply invested in the
crypto API. What I do care about is that the ARM crypto pieces in the
kernel are maintained, supported and improved by someone who
understands the use cases Linaro's members care about, and is willing
to make an effort to gain such understanding if he doesn't. I have no
doubt that your involvement in the kernel's crypto subsystem will have
a significant positive impact when it comes to code quality,
robustness and usability. I'd just like to see a bit more
consideration for other aspects of kernel programming, e.g.,
preemption under -rt, stack size constraints, coding style, importing
code from other projects etc. - please try to be less dismissive of
feedback first time around, but try to understand why people are
raising these issues; I'm sure you will appreciate it when future
contributors to zinc will do the same.
Jason A. Donenfeld Sept. 26, 2018, 5:03 p.m. UTC | #14
On Wed, Sep 26, 2018 at 6:21 PM Andy Lutomirski <luto@amacapital.net> wrote:
> Are, is what you’re saying that the Zinc chacha20 functions should call simd_relax() every n bytes automatically for some reasonable value of n?  If so, seems sensible, except that some care might be needed to make sure they interact with preemption correctly.
>
> What I mean is: the public Zinc entry points should either be callable in an atomic context or they should not be.  I think this should be checked at runtime in an appropriate place with an __might_sleep or similar.  Or simd_relax should learn to *not* schedule if the result of preempt_enable() leaves it atomic. (And the latter needs to be done in a way that works even on non-preempt kernels, and I don’t remember whether that’s possible.). And this should happen regardless of how many bytes are processed. IOW, calling into Zinc should be equally not atomic-safe for 100 bytes and for 10 MB.

I'm not sure this is actually a problem. Namely:

preempt_disable();
kernel_fpu_begin();
kernel_fpu_end();
schedule(); <--- bug!

Calling kernel_fpu_end() disables preemption, but AFAIK, preemption
enabling/disabling is recursive, so kernel_fpu_end's use of
preempt_disable won't actually do anything until the outer preempt
enable is called:

preempt_disable();
kernel_fpu_begin();
kernel_fpu_end();
preempt_enable();
schedule(); <--- works!

Or am I missing some more subtle point?

Jason
Jason A. Donenfeld Sept. 26, 2018, 5:07 p.m. UTC | #15
On Wed, Sep 26, 2018 at 6:55 PM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
> Framing it as /needless/ complexity does not help at all. The changes
> you are proposing are very useful, but nobody wants two crypto
> subsystems with two different maintainers in the kernel, so I would
> like to understand where this is going in the future. I am not saying
> it should block these patches though.

Thanks for clarifying. I understood you to be intending to block the
patches until they were converted to an async interface, which is not
what Zinc's about. Seeing as you're just curious about future
directions, that seems much more tenable.

> Also, I have spent a *lot* of time looking at your code, and trying to
> make it better, especially for use cases that weren't on your radar to
> begin with

I am extremely grateful for a good portion of your reviews indeed. As
I mentioned earlier, much is very useful. But in other places, I fear
you're steering this in a direction I really am hesitant to go.

> (e.g., 'pet projects' [your words]

Taken out of context.

> consideration for other aspects of kernel programming, e.g.,
> preemption under -rt, stack size constraints, coding style, importing
> code from other projects etc.

And indeed all of these concerns I've been pretty amenable to, and
continue to do so. What I'm commenting on are things outside of these.


> - please try to be less dismissive of
> feedback first time around, but try to understand why people are
> raising these issues

Apologies, and duly noted. I'll give you the benefit of the doubt.

Jason
Ard Biesheuvel Sept. 26, 2018, 5:08 p.m. UTC | #16
On Wed, 26 Sep 2018 at 19:03, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>
> On Wed, Sep 26, 2018 at 6:21 PM Andy Lutomirski <luto@amacapital.net> wrote:
> > Are, is what you’re saying that the Zinc chacha20 functions should call simd_relax() every n bytes automatically for some reasonable value of n?  If so, seems sensible, except that some care might be needed to make sure they interact with preemption correctly.
> >
> > What I mean is: the public Zinc entry points should either be callable in an atomic context or they should not be.  I think this should be checked at runtime in an appropriate place with an __might_sleep or similar.  Or simd_relax should learn to *not* schedule if the result of preempt_enable() leaves it atomic. (And the latter needs to be done in a way that works even on non-preempt kernels, and I don’t remember whether that’s possible.). And this should happen regardless of how many bytes are processed. IOW, calling into Zinc should be equally not atomic-safe for 100 bytes and for 10 MB.
>
> I'm not sure this is actually a problem. Namely:
>
> preempt_disable();
> kernel_fpu_begin();
> kernel_fpu_end();
> schedule(); <--- bug!
>
> Calling kernel_fpu_end() disables preemption, but AFAIK, preemption
> enabling/disabling is recursive, so kernel_fpu_end's use of
> preempt_disable won't actually do anything until the outer preempt
> enable is called:
>
> preempt_disable();
> kernel_fpu_begin();
> kernel_fpu_end();
> preempt_enable();
> schedule(); <--- works!
>
> Or am I missing some more subtle point?
>

No that seems accurate to me.
Andy Lutomirski Sept. 26, 2018, 5:23 p.m. UTC | #17
> On Sep 26, 2018, at 10:03 AM, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> 
>> On Wed, Sep 26, 2018 at 6:21 PM Andy Lutomirski <luto@amacapital.net> wrote:
>> Are, is what you’re saying that the Zinc chacha20 functions should call simd_relax() every n bytes automatically for some reasonable value of n?  If so, seems sensible, except that some care might be needed to make sure they interact with preemption correctly.
>> 
>> What I mean is: the public Zinc entry points should either be callable in an atomic context or they should not be.  I think this should be checked at runtime in an appropriate place with an __might_sleep or similar.  Or simd_relax should learn to *not* schedule if the result of preempt_enable() leaves it atomic. (And the latter needs to be done in a way that works even on non-preempt kernels, and I don’t remember whether that’s possible.). And this should happen regardless of how many bytes are processed. IOW, calling into Zinc should be equally not atomic-safe for 100 bytes and for 10 MB.
> 
> I'm not sure this is actually a problem. Namely:
> 
> preempt_disable();
> kernel_fpu_begin();
> kernel_fpu_end();
> schedule(); <--- bug!
> 
> Calling kernel_fpu_end() disables preemption, but AFAIK, preemption
> enabling/disabling is recursive, so kernel_fpu_end's use of
> preempt_disable won't actually do anything until the outer preempt
> enable is called:
> 
> preempt_disable();
> kernel_fpu_begin();
> kernel_fpu_end();
> preempt_enable();
> schedule(); <--- works!
> 
> Or am I missing some more subtle point?
> 

No, I think you’re right. I was mid-remembering precisely how simd_relax() worked.
Eric Biggers Sept. 26, 2018, 5:37 p.m. UTC | #18
On Wed, Sep 26, 2018 at 05:41:12PM +0200, Jason A. Donenfeld wrote:
> On Wed, Sep 26, 2018 at 4:02 PM Ard Biesheuvel
> <ard.biesheuvel@linaro.org> wrote:
> > I don't think it makes sense to keep
> > it simple now and add the complexity later (and the same concern
> > applies to async support btw).
> 
> Ugh, no. I don't want to add needless complexity, period. Zinc is
> synchronous, not asynchronous. It provides software implementations.
> That's what it does. While many of your reviews have been useful, many
> of your comments indicate some desire to change and mold the purpose
> and focus of Zinc away from Zinc's intents. Stop that. It's not going
> to become a bloated mess of "things Ard wanted and quipped about on
> LKML." Things like these only serve to filibuster the patchset
> indefinitely. But maybe that's what you'd like all along? Hard to
> tell, honestly. So, no, sorry, Zinc isn't gaining an async interface
> right now.

Can you please stop accusing Ard of "filibustering" your patchset?  Spending too
long in non-preemptible code is a real problem even on non-RT systems.
syzkaller has been reporting bugs where the kernel spins too long without any
preemption points, both in crypto-related code and elsewhere in the kernel.  So
we've had to add explicit preemption points to address those, as otherwise users
can lock up all CPUs for tens of seconds.  The issue being discussed here is
basically the same except here preemption is being explicitly disabled via
kernel_neon_begin(), so it becomes a problem even on non-CONFIG_PREEMPT kernels.

It's much better to address this problem (which is a regression from the current
skcipher API which only maps a page at a time) up front rather than have to rush
to patch it after the fact once the syzbot reports start coming in.

- Eric
Jason A. Donenfeld Sept. 26, 2018, 5:46 p.m. UTC | #19
On Wed, Sep 26, 2018 at 7:37 PM Eric Biggers <ebiggers@kernel.org> wrote:
> Can you please stop accusing Ard of "filibustering" your patchset?  Spending too
> long in non-preemptible code is a real problem even on non-RT systems.
> syzkaller has been reporting bugs where the kernel spins too long without any
> preemption points, both in crypto-related code and elsewhere in the kernel.  So
> we've had to add explicit preemption points to address those, as otherwise users
> can lock up all CPUs for tens of seconds.  The issue being discussed here is
> basically the same except here preemption is being explicitly disabled via
> kernel_neon_begin(), so it becomes a problem even on non-CONFIG_PREEMPT kernels.

The async distraction (re:filibustering) and the preempt concern are
two totally different things. I've already posted some code elsewhere
in this thread that addresses the preempt issue that looked good to
Ard. This will be part of v7.
Jason A. Donenfeld Sept. 27, 2018, 12:04 a.m. UTC | #20
On Wed, Sep 26, 2018 at 5:52 PM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
>
> On Wed, 26 Sep 2018 at 17:50, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> >
> > On Wed, Sep 26, 2018 at 5:45 PM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> > > So what you have in mind is something like calling simd_relax() every
> > > 4096 bytes or so?
> >
> > That was actually pretty easy, putting together both of your suggestions:
> >
> > static inline bool chacha20_arch(struct chacha20_ctx *state, u8 *dst,
> >                  u8 *src, size_t len,
> >                  simd_context_t *simd_context)
> > {
> >     while (len > PAGE_SIZE) {
> >         chacha20_arch(state, dst, src, PAGE_SIZE, simd_context);
> >         len -= PAGE_SIZE;
> >         src += PAGE_SIZE;
> >         dst += PAGE_SIZE;
> >         simd_relax(simd_context);
> >     }
> >     if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && chacha20_use_neon &&
> >         len >= CHACHA20_BLOCK_SIZE * 3 && simd_use(simd_context))
> >         chacha20_neon(dst, src, len, state->key, state->counter);
> >     else
> >         chacha20_arm(dst, src, len, state->key, state->counter);
> >
> >     state->counter[0] += (len + 63) / 64;
> >     return true;
> > }
>
> Nice one :-)
>
> This works for me (but perhaps add a comment as well)

As elegant as my quick recursive solution was, gcc produced kind of
bad code from it, as you might expect. So I've implemented this using
a boring old loop that works the way it's supposed to. This is marked
for v7.
Jason A. Donenfeld Sept. 27, 2018, 1:26 p.m. UTC | #21
Hi Thomas,

I'm trying to optimize this for crypto performance while still taking
into account preemption concerns. I'm having a bit of trouble figuring
out a way to determine numerically what the upper bounds for this
stuff looks like. I'm sure I could pick a pretty sane number that's
arguably okay -- and way under the limit -- but I still am interested
in determining what that limit actually is. I was hoping there'd be a
debugging option called, "warn if preemption is disabled for too
long", or something, but I couldn't find anything like that. I'm also
not quite sure what the latency limits are, to just compute this with
a formula. Essentially what I'm trying to determine is:

preempt_disable();
asm volatile(".fill N, 1, 0x90;");
preempt_enable();

What is the maximum value of N for which the above is okay? What
technique would you generally use in measuring this?

Thanks,
Jason
Jason A. Donenfeld Sept. 27, 2018, 3:19 p.m. UTC | #22
Hey again Thomas,

On Thu, Sep 27, 2018 at 3:26 PM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>
> Hi Thomas,
>
> I'm trying to optimize this for crypto performance while still taking
> into account preemption concerns. I'm having a bit of trouble figuring
> out a way to determine numerically what the upper bounds for this
> stuff looks like. I'm sure I could pick a pretty sane number that's
> arguably okay -- and way under the limit -- but I still am interested
> in determining what that limit actually is. I was hoping there'd be a
> debugging option called, "warn if preemption is disabled for too
> long", or something, but I couldn't find anything like that. I'm also
> not quite sure what the latency limits are, to just compute this with
> a formula. Essentially what I'm trying to determine is:
>
> preempt_disable();
> asm volatile(".fill N, 1, 0x90;");
> preempt_enable();
>
> What is the maximum value of N for which the above is okay? What
> technique would you generally use in measuring this?
>
> Thanks,
> Jason

From talking to Peter (now CC'd) on IRC, it sounds like what you're
mostly interested in is clocktime latency on reasonable hardware, with
a goal of around ~20µs as a maximum upper bound? I don't expect to get
anywhere near this value at all, but if you can confirm that's a
decent ballpark, it would make for some interesting calculations.

Regards,
Jason
Andy Lutomirski Sept. 27, 2018, 4:26 p.m. UTC | #23
> On Sep 27, 2018, at 8:19 AM, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> 
> Hey again Thomas,
> 
>> On Thu, Sep 27, 2018 at 3:26 PM Jason A. Donenfeld <Jason@zx2c4.com> wrote:
>> 
>> Hi Thomas,
>> 
>> I'm trying to optimize this for crypto performance while still taking
>> into account preemption concerns. I'm having a bit of trouble figuring
>> out a way to determine numerically what the upper bounds for this
>> stuff looks like. I'm sure I could pick a pretty sane number that's
>> arguably okay -- and way under the limit -- but I still am interested
>> in determining what that limit actually is. I was hoping there'd be a
>> debugging option called, "warn if preemption is disabled for too
>> long", or something, but I couldn't find anything like that. I'm also
>> not quite sure what the latency limits are, to just compute this with
>> a formula. Essentially what I'm trying to determine is:
>> 
>> preempt_disable();
>> asm volatile(".fill N, 1, 0x90;");
>> preempt_enable();
>> 
>> What is the maximum value of N for which the above is okay? What
>> technique would you generally use in measuring this?
>> 
>> Thanks,
>> Jason
> 
> From talking to Peter (now CC'd) on IRC, it sounds like what you're
> mostly interested in is clocktime latency on reasonable hardware, with
> a goal of around ~20µs as a maximum upper bound? I don't expect to get
> anywhere near this value at all, but if you can confirm that's a
> decent ballpark, it would make for some interesting calculations.
> 
> 

I would add another consideration: if you can get better latency with negligible overhead (0.1%? 0.05%), then that might make sense too. For example, it seems plausible that checking need_resched() every few blocks adds basically no overhead, and the SIMD helpers could do this themselves or perhaps only ever do a block at a time.

need_resched() costs a cacheline access, but it’s usually a hot cacheline, and the actual check is just whether a certain bit in memory is set.
Jason A. Donenfeld Sept. 27, 2018, 5:06 p.m. UTC | #24
On Thu, Sep 27, 2018 at 6:27 PM Andy Lutomirski <luto@amacapital.net> wrote:
> I would add another consideration: if you can get better latency with negligible overhead (0.1%? 0.05%), then that might make sense too. For example, it seems plausible that checking need_resched() every few blocks adds basically no overhead, and the SIMD helpers could do this themselves or perhaps only ever do a block at a time.
>
> need_resched() costs a cacheline access, but it’s usually a hot cacheline, and the actual check is just whether a certain bit in memory is set.

Yes you're right, I do plan to check quite often, rather than seldom,
for this reason. I've been toying with the idea of instead processing
65k (maximum size of a UDP packet) at a time before checking
need_resched(), but armed with the 20µs figure, this isn't remotely
possible on most hardware. So I'll stick with the original
conservative plan of checking very often, and not making things
different from the aspects worked out by the present crypto API in
this regard.
Ard Biesheuvel Sept. 28, 2018, 4:01 p.m. UTC | #25
On 25 September 2018 at 16:56, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> These wire Andy Polyakov's implementations up to the kernel for ARMv7,8
> NEON, and introduce Eric Biggers' ultra-fast scalar implementation for
> CPUs without NEON or for CPUs with slow NEON (Cortex-A5,7).
>
> This commit does the following:
>   - Adds the glue code for the assembly implementations.
>   - Renames the ARMv8 code into place, since it can at this point be
>     used wholesale.
>   - Merges Andy Polyakov's ARMv7 NEON code with Eric Biggers' <=ARMv7
>     scalar code.
>
> Commit note: Eric Biggers' scalar code is brand new, and quite possibly
> prematurely added to this commit, and so it may require a bit of revision.
>

Please put comments like this below the ---

> This commit delivers approximately the same or much better performance than
> the existing crypto API's code and has been measured to do as such on:
>
>   - ARM1176JZF-S [ARMv6]
>   - Cortex-A7    [ARMv7]
>   - Cortex-A8    [ARMv7]
>   - Cortex-A9    [ARMv7]
>   - Cortex-A17   [ARMv7]
>   - Cortex-A53   [ARMv8]
>   - Cortex-A55   [ARMv8]
>   - Cortex-A73   [ARMv8]
>   - Cortex-A75   [ARMv8]
>
> Interestingly, Andy Polyakov's scalar code is slower than Eric Biggers',
> but is also significantly shorter. This has the advantage that it does
> not evict other code from L1 cache -- particularly on ARM11 chips -- and
> so in certain circumstances it can actually be faster. However, it wasn't
> found that this had an affect on any code existing in the kernel today.
>
> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
> Co-authored-by: Eric Biggers <ebiggers@google.com>
> Cc: Samuel Neves <sneves@dei.uc.pt>
> Cc: Andy Lutomirski <luto@kernel.org>
> Cc: Greg KH <gregkh@linuxfoundation.org>
> Cc: Jean-Philippe Aumasson <jeanphilippe.aumasson@gmail.com>
> Cc: Russell King <linux@armlinux.org.uk>
> Cc: linux-arm-kernel@lists.infradead.org
> ---
>  lib/zinc/Makefile                             |   2 +
>  lib/zinc/chacha20/chacha20-arm-glue.h         |  88 +++
>  ...acha20-arm-cryptogams.S => chacha20-arm.S} | 502 ++++++++++++++++--
>  ...20-arm64-cryptogams.S => chacha20-arm64.S} |   0
>  lib/zinc/chacha20/chacha20.c                  |   2 +
>  5 files changed, 556 insertions(+), 38 deletions(-)
>  create mode 100644 lib/zinc/chacha20/chacha20-arm-glue.h
>  rename lib/zinc/chacha20/{chacha20-arm-cryptogams.S => chacha20-arm.S} (71%)
>  rename lib/zinc/chacha20/{chacha20-arm64-cryptogams.S => chacha20-arm64.S} (100%)
>
> diff --git a/lib/zinc/Makefile b/lib/zinc/Makefile
> index 223a0816c918..e47f64e12bbd 100644
> --- a/lib/zinc/Makefile
> +++ b/lib/zinc/Makefile
> @@ -4,4 +4,6 @@ ccflags-$(CONFIG_ZINC_DEBUG) += -DDEBUG
>
>  zinc_chacha20-y := chacha20/chacha20.o
>  zinc_chacha20-$(CONFIG_ZINC_ARCH_X86_64) += chacha20/chacha20-x86_64.o
> +zinc_chacha20-$(CONFIG_ZINC_ARCH_ARM) += chacha20/chacha20-arm.o
> +zinc_chacha20-$(CONFIG_ZINC_ARCH_ARM64) += chacha20/chacha20-arm64.o

Are these CONFIG_ symbols defined anywhere at this point?

In any case, I don't think these is a reason for these, at least not
on ARM/arm64. The 64-bitness is implied in both cases, and the
dependency on !CPU_32v3 you introduce (looking at the version of
Kconfig at the end of the series) seems spurious to me. Was that added
because of some kbuild robot report? (we don't support ARMv3 in the
kernel but ARCH_RPC is built in v3 mode because of historical reasons
while the actual core is a v4)

>  obj-$(CONFIG_ZINC_CHACHA20) += zinc_chacha20.o
> diff --git a/lib/zinc/chacha20/chacha20-arm-glue.h b/lib/zinc/chacha20/chacha20-arm-glue.h
> new file mode 100644
> index 000000000000..86cce851ed02
> --- /dev/null
> +++ b/lib/zinc/chacha20/chacha20-arm-glue.h
> @@ -0,0 +1,88 @@
> +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
> +/*
> + * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
> + */
> +
> +#include <asm/hwcap.h>
> +#include <asm/neon.h>
> +#if defined(CONFIG_ARM)
> +#include <asm/system_info.h>
> +#include <asm/cputype.h>
> +#endif
> +
> +asmlinkage void chacha20_arm(u8 *out, const u8 *in, const size_t len,
> +                            const u32 key[8], const u32 counter[4]);
> +#if defined(CONFIG_ARM)
> +asmlinkage void hchacha20_arm(const u32 state[16], u32 out[8]);
> +#endif
> +#if defined(CONFIG_KERNEL_MODE_NEON)
> +asmlinkage void chacha20_neon(u8 *out, const u8 *in, const size_t len,
> +                             const u32 key[8], const u32 counter[4]);
> +#endif
> +

No need to make asmlinkage declarations conditional

> +static bool chacha20_use_neon __ro_after_init;
> +
> +static void __init chacha20_fpu_init(void)
> +{
> +#if defined(CONFIG_ARM64)
> +       chacha20_use_neon = elf_hwcap & HWCAP_ASIMD;
> +#elif defined(CONFIG_ARM)
> +       switch (read_cpuid_part()) {
> +       case ARM_CPU_PART_CORTEX_A7:
> +       case ARM_CPU_PART_CORTEX_A5:
> +               /* The Cortex-A7 and Cortex-A5 do not perform well with the NEON
> +                * implementation but do incredibly with the scalar one and use
> +                * less power.
> +                */
> +               break;
> +       default:
> +               chacha20_use_neon = elf_hwcap & HWCAP_NEON;
> +       }
> +#endif
> +}
> +
> +static inline bool chacha20_arch(struct chacha20_ctx *state, u8 *dst,
> +                                const u8 *src, size_t len,
> +                                simd_context_t *simd_context)
> +{
> +#if defined(CONFIG_KERNEL_MODE_NEON)

if (IS_ENABLED())

> +       if (chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
> +           simd_use(simd_context))
> +               chacha20_neon(dst, src, len, state->key, state->counter);
> +       else
> +#endif
> +               chacha20_arm(dst, src, len, state->key, state->counter);
> +
> +       state->counter[0] += (len + 63) / 64;
> +       return true;
> +}
> +
> +static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS],
> +                                 const u8 nonce[HCHACHA20_NONCE_SIZE],
> +                                 const u8 key[HCHACHA20_KEY_SIZE],
> +                                 simd_context_t *simd_context)
> +{
> +#if defined(CONFIG_ARM)
> +       u32 x[] = { CHACHA20_CONSTANT_EXPA,
> +                   CHACHA20_CONSTANT_ND_3,
> +                   CHACHA20_CONSTANT_2_BY,
> +                   CHACHA20_CONSTANT_TE_K,
> +                   get_unaligned_le32(key + 0),
> +                   get_unaligned_le32(key + 4),
> +                   get_unaligned_le32(key + 8),
> +                   get_unaligned_le32(key + 12),
> +                   get_unaligned_le32(key + 16),
> +                   get_unaligned_le32(key + 20),
> +                   get_unaligned_le32(key + 24),
> +                   get_unaligned_le32(key + 28),
> +                   get_unaligned_le32(nonce + 0),
> +                   get_unaligned_le32(nonce + 4),
> +                   get_unaligned_le32(nonce + 8),
> +                   get_unaligned_le32(nonce + 12)
> +       };
> +       hchacha20_arm(x, derived_key);

"""
if (!IS_ENABLED(CONFIG_ARM))
   return false;

hchacha20_arm(x, derived_key);
return true;
"""

and drop the #ifdefs


> +       return true;
> +#else
> +       return false;
> +#endif
> +}
> diff --git a/lib/zinc/chacha20/chacha20-arm-cryptogams.S b/lib/zinc/chacha20/chacha20-arm.S
> similarity index 71%
> rename from lib/zinc/chacha20/chacha20-arm-cryptogams.S
> rename to lib/zinc/chacha20/chacha20-arm.S
> index 770bab469171..5abedafcf129 100644
> --- a/lib/zinc/chacha20/chacha20-arm-cryptogams.S
> +++ b/lib/zinc/chacha20/chacha20-arm.S
> @@ -1,13 +1,475 @@
>  /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
>  /*
> + * Copyright (C) 2018 Google, Inc.
>   * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
>   * Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved.
> - *
> - * This is based in part on Andy Polyakov's implementation from CRYPTOGAMS.
>   */
>
>  #include <linux/linkage.h>
>
> +/*
> + * The following scalar routine was written by Eric Biggers.
> + *
> + * Design notes:
> + *
> + * 16 registers would be needed to hold the state matrix, but only 14 are
> + * available because 'sp' and 'pc' cannot be used.  So we spill the elements
> + * (x8, x9) to the stack and swap them out with (x10, x11).  This adds one
> + * 'ldrd' and one 'strd' instruction per round.
> + *
> + * All rotates are performed using the implicit rotate operand accepted by the
> + * 'add' and 'eor' instructions.  This is faster than using explicit rotate
> + * instructions.  To make this work, we allow the values in the second and last
> + * rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the
> + * wrong rotation amount.  The rotation amount is then fixed up just in time
> + * when the values are used.  'brot' is the number of bits the values in row 'b'
> + * need to be rotated right to arrive at the correct values, and 'drot'
> + * similarly for row 'd'.  (brot, drot) start out as (0, 0) but we make it such
> + * that they end up as (25, 24) after every round.
> + */
> +
> +       // ChaCha state registers
> +       X0      .req    r0
> +       X1      .req    r1
> +       X2      .req    r2
> +       X3      .req    r3
> +       X4      .req    r4
> +       X5      .req    r5
> +       X6      .req    r6
> +       X7      .req    r7
> +       X8_X10  .req    r8      // shared by x8 and x10
> +       X9_X11  .req    r9      // shared by x9 and x11
> +       X12     .req    r10
> +       X13     .req    r11
> +       X14     .req    r12
> +       X15     .req    r14
> +
> +.Lexpand_32byte_k:
> +       // "expand 32-byte k"
> +       .word   0x61707865, 0x3320646e, 0x79622d32, 0x6b206574
> +
> +#ifdef __thumb2__
> +#  define adrl adr
> +#endif
> +
> +.macro __rev           out, in,  t0, t1, t2
> +.if __LINUX_ARM_ARCH__ >= 6
> +       rev             \out, \in
> +.else
> +       lsl             \t0, \in, #24
> +       and             \t1, \in, #0xff00
> +       and             \t2, \in, #0xff0000
> +       orr             \out, \t0, \in, lsr #24
> +       orr             \out, \out, \t1, lsl #8
> +       orr             \out, \out, \t2, lsr #8
> +.endif
> +.endm
> +
> +.macro _le32_bswap     x,  t0, t1, t2
> +#ifdef __ARMEB__
> +       __rev           \x, \x,  \t0, \t1, \t2
> +#endif
> +.endm
> +
> +.macro _le32_bswap_4x  a, b, c, d,  t0, t1, t2
> +       _le32_bswap     \a,  \t0, \t1, \t2
> +       _le32_bswap     \b,  \t0, \t1, \t2
> +       _le32_bswap     \c,  \t0, \t1, \t2
> +       _le32_bswap     \d,  \t0, \t1, \t2
> +.endm
> +
> +.macro __ldrd          a, b, src, offset
> +#if __LINUX_ARM_ARCH__ >= 6
> +       ldrd            \a, \b, [\src, #\offset]
> +#else
> +       ldr             \a, [\src, #\offset]
> +       ldr             \b, [\src, #\offset + 4]
> +#endif
> +.endm
> +
> +.macro __strd          a, b, dst, offset
> +#if __LINUX_ARM_ARCH__ >= 6
> +       strd            \a, \b, [\dst, #\offset]
> +#else
> +       str             \a, [\dst, #\offset]
> +       str             \b, [\dst, #\offset + 4]
> +#endif
> +.endm
> +
> +.macro _halfround      a1, b1, c1, d1,  a2, b2, c2, d2
> +
> +       // a += b; d ^= a; d = rol(d, 16);
> +       add             \a1, \a1, \b1, ror #brot
> +       add             \a2, \a2, \b2, ror #brot
> +       eor             \d1, \a1, \d1, ror #drot
> +       eor             \d2, \a2, \d2, ror #drot
> +       // drot == 32 - 16 == 16
> +
> +       // c += d; b ^= c; b = rol(b, 12);
> +       add             \c1, \c1, \d1, ror #16
> +       add             \c2, \c2, \d2, ror #16
> +       eor             \b1, \c1, \b1, ror #brot
> +       eor             \b2, \c2, \b2, ror #brot
> +       // brot == 32 - 12 == 20
> +
> +       // a += b; d ^= a; d = rol(d, 8);
> +       add             \a1, \a1, \b1, ror #20
> +       add             \a2, \a2, \b2, ror #20
> +       eor             \d1, \a1, \d1, ror #16
> +       eor             \d2, \a2, \d2, ror #16
> +       // drot == 32 - 8 == 24
> +
> +       // c += d; b ^= c; b = rol(b, 7);
> +       add             \c1, \c1, \d1, ror #24
> +       add             \c2, \c2, \d2, ror #24
> +       eor             \b1, \c1, \b1, ror #20
> +       eor             \b2, \c2, \b2, ror #20
> +       // brot == 32 - 7 == 25
> +.endm
> +
> +.macro _doubleround
> +
> +       // column round
> +
> +       // quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13)
> +       _halfround      X0, X4, X8_X10, X12,  X1, X5, X9_X11, X13
> +
> +       // save (x8, x9); restore (x10, x11)
> +       __strd          X8_X10, X9_X11, sp, 0
> +       __ldrd          X8_X10, X9_X11, sp, 8
> +
> +       // quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15)
> +       _halfround      X2, X6, X8_X10, X14,  X3, X7, X9_X11, X15
> +
> +       .set brot, 25
> +       .set drot, 24
> +
> +       // diagonal round
> +
> +       // quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12)
> +       _halfround      X0, X5, X8_X10, X15,  X1, X6, X9_X11, X12
> +
> +       // save (x10, x11); restore (x8, x9)
> +       __strd          X8_X10, X9_X11, sp, 8
> +       __ldrd          X8_X10, X9_X11, sp, 0
> +
> +       // quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14)
> +       _halfround      X2, X7, X8_X10, X13,  X3, X4, X9_X11, X14
> +.endm
> +
> +.macro _chacha_permute nrounds
> +       .set brot, 0
> +       .set drot, 0
> +       .rept \nrounds / 2
> +        _doubleround
> +       .endr
> +.endm
> +
> +.macro _chacha         nrounds
> +
> +.Lnext_block\@:
> +       // Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN
> +       // Registers contain x0-x9,x12-x15.
> +
> +       // Do the core ChaCha permutation to update x0-x15.
> +       _chacha_permute \nrounds
> +
> +       add             sp, #8
> +       // Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN
> +       // Registers contain x0-x9,x12-x15.
> +       // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
> +
> +       // Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15).
> +       push            {X8_X10, X9_X11, X12, X13, X14, X15}
> +
> +       // Load (OUT, IN, LEN).
> +       ldr             r14, [sp, #96]
> +       ldr             r12, [sp, #100]
> +       ldr             r11, [sp, #104]
> +
> +       orr             r10, r14, r12
> +
> +       // Use slow path if fewer than 64 bytes remain.
> +       cmp             r11, #64
> +       blt             .Lxor_slowpath\@
> +
> +       // Use slow path if IN and/or OUT isn't 4-byte aligned.  Needed even on
> +       // ARMv6+, since ldmia and stmia (used below) still require alignment.
> +       tst             r10, #3
> +       bne             .Lxor_slowpath\@
> +
> +       // Fast path: XOR 64 bytes of aligned data.
> +
> +       // Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
> +       // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT.
> +       // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
> +
> +       // x0-x3
> +       __ldrd          r8, r9, sp, 32
> +       __ldrd          r10, r11, sp, 40
> +       add             X0, X0, r8
> +       add             X1, X1, r9
> +       add             X2, X2, r10
> +       add             X3, X3, r11
> +       _le32_bswap_4x  X0, X1, X2, X3,  r8, r9, r10
> +       ldmia           r12!, {r8-r11}
> +       eor             X0, X0, r8
> +       eor             X1, X1, r9
> +       eor             X2, X2, r10
> +       eor             X3, X3, r11
> +       stmia           r14!, {X0-X3}
> +
> +       // x4-x7
> +       __ldrd          r8, r9, sp, 48
> +       __ldrd          r10, r11, sp, 56
> +       add             X4, r8, X4, ror #brot
> +       add             X5, r9, X5, ror #brot
> +       ldmia           r12!, {X0-X3}
> +       add             X6, r10, X6, ror #brot
> +       add             X7, r11, X7, ror #brot
> +       _le32_bswap_4x  X4, X5, X6, X7,  r8, r9, r10
> +       eor             X4, X4, X0
> +       eor             X5, X5, X1
> +       eor             X6, X6, X2
> +       eor             X7, X7, X3
> +       stmia           r14!, {X4-X7}
> +
> +       // x8-x15
> +       pop             {r0-r7}                 // (x8-x9,x12-x15,x10-x11)
> +       __ldrd          r8, r9, sp, 32
> +       __ldrd          r10, r11, sp, 40
> +       add             r0, r0, r8              // x8
> +       add             r1, r1, r9              // x9
> +       add             r6, r6, r10             // x10
> +       add             r7, r7, r11             // x11
> +       _le32_bswap_4x  r0, r1, r6, r7,  r8, r9, r10
> +       ldmia           r12!, {r8-r11}
> +       eor             r0, r0, r8              // x8
> +       eor             r1, r1, r9              // x9
> +       eor             r6, r6, r10             // x10
> +       eor             r7, r7, r11             // x11
> +       stmia           r14!, {r0,r1,r6,r7}
> +       ldmia           r12!, {r0,r1,r6,r7}
> +       __ldrd          r8, r9, sp, 48
> +       __ldrd          r10, r11, sp, 56
> +       add             r2, r8, r2, ror #drot   // x12
> +       add             r3, r9, r3, ror #drot   // x13
> +       add             r4, r10, r4, ror #drot  // x14
> +       add             r5, r11, r5, ror #drot  // x15
> +       _le32_bswap_4x  r2, r3, r4, r5,  r9, r10, r11
> +         ldr           r9, [sp, #72]           // load LEN
> +       eor             r2, r2, r0              // x12
> +       eor             r3, r3, r1              // x13
> +       eor             r4, r4, r6              // x14
> +       eor             r5, r5, r7              // x15
> +         subs          r9, #64                 // decrement and check LEN
> +       stmia           r14!, {r2-r5}
> +
> +       beq             .Ldone\@
> +
> +.Lprepare_for_next_block\@:
> +
> +       // Stack: x0-x15 OUT IN LEN
> +
> +       // Increment block counter (x12)
> +       add             r8, #1
> +
> +       // Store updated (OUT, IN, LEN)
> +       str             r14, [sp, #64]
> +       str             r12, [sp, #68]
> +       str             r9, [sp, #72]
> +
> +         mov           r14, sp
> +
> +       // Store updated block counter (x12)
> +       str             r8, [sp, #48]
> +
> +         sub           sp, #16
> +
> +       // Reload state and do next block
> +       ldmia           r14!, {r0-r11}          // load x0-x11
> +       __strd          r10, r11, sp, 8         // store x10-x11 before state
> +       ldmia           r14, {r10-r12,r14}      // load x12-x15
> +       b               .Lnext_block\@
> +
> +.Lxor_slowpath\@:
> +       // Slow path: < 64 bytes remaining, or unaligned input or output buffer.
> +       // We handle it by storing the 64 bytes of keystream to the stack, then
> +       // XOR-ing the needed portion with the data.
> +
> +       // Allocate keystream buffer
> +       sub             sp, #64
> +       mov             r14, sp
> +
> +       // Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
> +       // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0.
> +       // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
> +
> +       // Save keystream for x0-x3
> +       __ldrd          r8, r9, sp, 96
> +       __ldrd          r10, r11, sp, 104
> +       add             X0, X0, r8
> +       add             X1, X1, r9
> +       add             X2, X2, r10
> +       add             X3, X3, r11
> +       _le32_bswap_4x  X0, X1, X2, X3,  r8, r9, r10
> +       stmia           r14!, {X0-X3}
> +
> +       // Save keystream for x4-x7
> +       __ldrd          r8, r9, sp, 112
> +       __ldrd          r10, r11, sp, 120
> +       add             X4, r8, X4, ror #brot
> +       add             X5, r9, X5, ror #brot
> +       add             X6, r10, X6, ror #brot
> +       add             X7, r11, X7, ror #brot
> +       _le32_bswap_4x  X4, X5, X6, X7,  r8, r9, r10
> +         add           r8, sp, #64
> +       stmia           r14!, {X4-X7}
> +
> +       // Save keystream for x8-x15
> +       ldm             r8, {r0-r7}             // (x8-x9,x12-x15,x10-x11)
> +       __ldrd          r8, r9, sp, 128
> +       __ldrd          r10, r11, sp, 136
> +       add             r0, r0, r8              // x8
> +       add             r1, r1, r9              // x9
> +       add             r6, r6, r10             // x10
> +       add             r7, r7, r11             // x11
> +       _le32_bswap_4x  r0, r1, r6, r7,  r8, r9, r10
> +       stmia           r14!, {r0,r1,r6,r7}
> +       __ldrd          r8, r9, sp, 144
> +       __ldrd          r10, r11, sp, 152
> +       add             r2, r8, r2, ror #drot   // x12
> +       add             r3, r9, r3, ror #drot   // x13
> +       add             r4, r10, r4, ror #drot  // x14
> +       add             r5, r11, r5, ror #drot  // x15
> +       _le32_bswap_4x  r2, r3, r4, r5,  r9, r10, r11
> +       stmia           r14, {r2-r5}
> +
> +       // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN
> +       // Registers: r8 is block counter, r12 is IN.
> +
> +       ldr             r9, [sp, #168]          // LEN
> +       ldr             r14, [sp, #160]         // OUT
> +       cmp             r9, #64
> +         mov           r0, sp
> +       movle           r1, r9
> +       movgt           r1, #64
> +       // r1 is number of bytes to XOR, in range [1, 64]
> +
> +.if __LINUX_ARM_ARCH__ < 6
> +       orr             r2, r12, r14
> +       tst             r2, #3                  // IN or OUT misaligned?
> +       bne             .Lxor_next_byte\@
> +.endif
> +
> +       // XOR a word at a time
> +.rept 16
> +       subs            r1, #4
> +       blt             .Lxor_words_done\@
> +       ldr             r2, [r12], #4
> +       ldr             r3, [r0], #4
> +       eor             r2, r2, r3
> +       str             r2, [r14], #4
> +.endr
> +       b               .Lxor_slowpath_done\@
> +.Lxor_words_done\@:
> +       ands            r1, r1, #3
> +       beq             .Lxor_slowpath_done\@
> +
> +       // XOR a byte at a time
> +.Lxor_next_byte\@:
> +       ldrb            r2, [r12], #1
> +       ldrb            r3, [r0], #1
> +       eor             r2, r2, r3
> +       strb            r2, [r14], #1
> +       subs            r1, #1
> +       bne             .Lxor_next_byte\@
> +
> +.Lxor_slowpath_done\@:
> +       subs            r9, #64
> +       add             sp, #96
> +       bgt             .Lprepare_for_next_block\@
> +
> +.Ldone\@:
> +.endm  // _chacha
> +
> +/*
> + * void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8],
> + *                  const u32 iv[4]);
> + */
> +ENTRY(chacha20_arm)
> +       cmp             r2, #0                  // len == 0?
> +       bxeq            lr
> +
> +       push            {r0-r2,r4-r11,lr}
> +
> +       // Push state x0-x15 onto stack.
> +       // Also store an extra copy of x10-x11 just before the state.
> +
> +       ldr             r4, [sp, #48]           // iv
> +       mov             r0, sp
> +       sub             sp, #80
> +
> +       // iv: x12-x15
> +       ldm             r4, {X12,X13,X14,X15}
> +       stmdb           r0!, {X12,X13,X14,X15}
> +
> +       // key: x4-x11
> +       __ldrd          X8_X10, X9_X11, r3, 24
> +       __strd          X8_X10, X9_X11, sp, 8
> +       stmdb           r0!, {X8_X10, X9_X11}
> +       ldm             r3, {X4-X9_X11}
> +       stmdb           r0!, {X4-X9_X11}
> +
> +       // constants: x0-x3
> +       adrl            X3, .Lexpand_32byte_k
> +       ldm             X3, {X0-X3}
> +       __strd          X0, X1, sp, 16
> +       __strd          X2, X3, sp, 24
> +
> +       _chacha         20
> +
> +       add             sp, #76
> +       pop             {r4-r11, pc}
> +ENDPROC(chacha20_arm)
> +
> +/*
> + * void hchacha20_arm(const u32 state[16], u32 out[8]);
> + */
> +ENTRY(hchacha20_arm)
> +       push            {r1,r4-r11,lr}
> +
> +       mov             r14, r0
> +       ldmia           r14!, {r0-r11}          // load x0-x11
> +       push            {r10-r11}               // store x10-x11 to stack
> +       ldm             r14, {r10-r12,r14}      // load x12-x15
> +       sub             sp, #8
> +
> +       _chacha_permute 20
> +
> +       // Skip over (unused0-unused1, x10-x11)
> +       add             sp, #16
> +
> +       // Fix up rotations of x12-x15
> +       ror             X12, X12, #drot
> +       ror             X13, X13, #drot
> +         pop           {r4}                    // load 'out'
> +       ror             X14, X14, #drot
> +       ror             X15, X15, #drot
> +
> +       // Store (x0-x3,x12-x15) to 'out'
> +       stm             r4, {X0,X1,X2,X3,X12,X13,X14,X15}
> +
> +       pop             {r4-r11,pc}
> +ENDPROC(hchacha20_arm)
> +
> +#ifdef CONFIG_KERNEL_MODE_NEON
> +/*
> + * This following NEON routine was ported from Andy Polyakov's implementation
> + * from CRYPTOGAMS. It begins with parts of the CRYPTOGAMS scalar routine,
> + * since certain NEON code paths actually branch to it.
> + */
> +
>  .text
>  #if defined(__thumb2__) || defined(__clang__)
>  .syntax        unified
> @@ -22,39 +484,6 @@
>  #define ldrhsb ldrbhs
>  #endif
>
> -.align 5
> -.Lsigma:
> -.long  0x61707865,0x3320646e,0x79622d32,0x6b206574     @ endian-neutral
> -.Lone:
> -.long  1,0,0,0
> -.word  -1
> -
> -.align 5
> -ENTRY(chacha20_arm)
> -       ldr     r12,[sp,#0]             @ pull pointer to counter and nonce
> -       stmdb   sp!,{r0-r2,r4-r11,lr}
> -       cmp     r2,#0                   @ len==0?
> -#ifdef __thumb2__
> -       itt     eq
> -#endif
> -       addeq   sp,sp,#4*3
> -       beq     .Lno_data_arm
> -       ldmia   r12,{r4-r7}             @ load counter and nonce
> -       sub     sp,sp,#4*(16)           @ off-load area
> -#if __LINUX_ARM_ARCH__ < 7 && !defined(__thumb2__)
> -       sub     r14,pc,#100             @ .Lsigma
> -#else
> -       adr     r14,.Lsigma             @ .Lsigma
> -#endif
> -       stmdb   sp!,{r4-r7}             @ copy counter and nonce
> -       ldmia   r3,{r4-r11}             @ load key
> -       ldmia   r14,{r0-r3}             @ load sigma
> -       stmdb   sp!,{r4-r11}            @ copy key
> -       stmdb   sp!,{r0-r3}             @ copy sigma
> -       str     r10,[sp,#4*(16+10)]     @ off-load "rx"
> -       str     r11,[sp,#4*(16+11)]     @ off-load "rx"
> -       b       .Loop_outer_enter
> -
>  .align 4
>  .Loop_outer:
>         ldmia   sp,{r0-r9}              @ load key material
> @@ -748,11 +1177,8 @@ ENTRY(chacha20_arm)
>
>  .Ldone:
>         add     sp,sp,#4*(32+3)
> -.Lno_data_arm:
>         ldmia   sp!,{r4-r11,pc}
> -ENDPROC(chacha20_arm)
>
> -#ifdef CONFIG_KERNEL_MODE_NEON
>  .align 5
>  .Lsigma2:
>  .long  0x61707865,0x3320646e,0x79622d32,0x6b206574     @ endian-neutral
> diff --git a/lib/zinc/chacha20/chacha20-arm64-cryptogams.S b/lib/zinc/chacha20/chacha20-arm64.S
> similarity index 100%
> rename from lib/zinc/chacha20/chacha20-arm64-cryptogams.S
> rename to lib/zinc/chacha20/chacha20-arm64.S
> diff --git a/lib/zinc/chacha20/chacha20.c b/lib/zinc/chacha20/chacha20.c
> index 4354b874a6a5..fc4f74fca653 100644
> --- a/lib/zinc/chacha20/chacha20.c
> +++ b/lib/zinc/chacha20/chacha20.c
> @@ -16,6 +16,8 @@
>
>  #if defined(CONFIG_ZINC_ARCH_X86_64)
>  #include "chacha20-x86_64-glue.h"
> +#elif defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_ARM64)

As above, just use CONFIG_ARM / CONFIG_ARM64 directly

> +#include "chacha20-arm-glue.h"
>  #else
>  void __init chacha20_fpu_init(void)
>  {
> --
> 2.19.0
>
Jason A. Donenfeld Sept. 29, 2018, 2:20 a.m. UTC | #26
Hi Ard,

On Fri, Sep 28, 2018 at 6:02 PM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
> Please put comments like this below the ---

git-notes is nice for this indeed.

> Are these CONFIG_ symbols defined anywhere at this point?

Yes, they're introduced in the first zinc commit. There's no git-blame
on git.kernel.org, presumably because it's expensive to compute, but
there is on my personal instance, so this might help:
https://git.zx2c4.com/linux-dev/blame/lib/zinc/Kconfig?h=jd/wireguard

> In any case, I don't think these is a reason for these, at least not
> on ARM/arm64. The 64-bitness is implied in both cases

You mean to say that since these nobs are def_bool y and are
essentially "depends on ARM", then I should just straight up use
CONFIG_ARM? I had thought about this, but figured this would make it
easier to later make these optional or have other options block them
need be, or even if the dependencies and requirements for having them
changes (for example, with UML on x86). I think doing it this way
gives us some flexibility later on. So if that's a compelling enough
reason, I'd like to keep those.

> and the
> dependency on !CPU_32v3 you introduce (looking at the version of
> Kconfig at the end of the series) seems spurious to me. Was that added
> because of some kbuild robot report? (we don't support ARMv3 in the
> kernel but ARCH_RPC is built in v3 mode because of historical reasons
> while the actual core is a v4)

I added the !CPU_32v3 in my development tree after posting v6, so good
to hear you're just looking straight at the updated tree. If you see
things jump out in there prior to me posting v7, don't hesitate to let
me know.

The reason it was added was indeed because of:
https://lists.01.org/pipermail/kbuild-all/2018-September/053114.html
-- exactly what you suspected, ARCH_RPC. Have a better suggestion than
!CPU_32v3? It seems to me like so long as the kernel has CPU_32v3 as a
thing in any form, I should mark Zinc as not supporting it, since
we'll certainly be at least v4 and up. (Do you guys have any old Acorn
ARM610 boxes sitting around for old time's sake at LinaroHQ? ;-)

> > +#endif
> > +
>
> No need to make asmlinkage declarations conditional

Yep, addressed in the IS_ENABLED cleanup.

>
> if (IS_ENABLED())

Sorted.

>
> """
> if (!IS_ENABLED(CONFIG_ARM))
>    return false;
>
> hchacha20_arm(x, derived_key);
> return true;
> """
>
> and drop the #ifdefs

Also sorted.

Regards,
Jason
Ard Biesheuvel Sept. 29, 2018, 6:16 a.m. UTC | #27
On 29 September 2018 at 04:20, Jason A. Donenfeld <Jason@zx2c4.com> wrote:
> Hi Ard,
>
> On Fri, Sep 28, 2018 at 6:02 PM Ard Biesheuvel
> <ard.biesheuvel@linaro.org> wrote:
>> Please put comments like this below the ---
>
> git-notes is nice for this indeed.
>
>> Are these CONFIG_ symbols defined anywhere at this point?
>
> Yes, they're introduced in the first zinc commit. There's no git-blame
> on git.kernel.org, presumably because it's expensive to compute, but
> there is on my personal instance, so this might help:
> https://git.zx2c4.com/linux-dev/blame/lib/zinc/Kconfig?h=jd/wireguard
>
>> In any case, I don't think these is a reason for these, at least not
>> on ARM/arm64. The 64-bitness is implied in both cases
>
> You mean to say that since these nobs are def_bool y and are
> essentially "depends on ARM", then I should just straight up use
> CONFIG_ARM? I had thought about this, but figured this would make it
> easier to later make these optional or have other options block them
> need be, or even if the dependencies and requirements for having them
> changes (for example, with UML on x86). I think doing it this way
> gives us some flexibility later on. So if that's a compelling enough
> reason, I'd like to keep those.
>

Sure. But probably better to be consistent then, and stop using
CONFIG_ARM directly in your code.

>> and the
>> dependency on !CPU_32v3 you introduce (looking at the version of
>> Kconfig at the end of the series) seems spurious to me. Was that added
>> because of some kbuild robot report? (we don't support ARMv3 in the
>> kernel but ARCH_RPC is built in v3 mode because of historical reasons
>> while the actual core is a v4)
>
> I added the !CPU_32v3 in my development tree after posting v6, so good
> to hear you're just looking straight at the updated tree. If you see
> things jump out in there prior to me posting v7, don't hesitate to let
> me know.
>
> The reason it was added was indeed because of:
> https://lists.01.org/pipermail/kbuild-all/2018-September/053114.html
> -- exactly what you suspected, ARCH_RPC. Have a better suggestion than
> !CPU_32v3?

Yes, you could just add

asflags-$(CONFIG_CPU_32v3) += -march=armv4

with a comment stating that we don't actually support ARMv3 but only
use it as a code generation target for reasons unrelated to the ISA

> It seems to me like so long as the kernel has CPU_32v3 as a
> thing in any form, I should mark Zinc as not supporting it, since
> we'll certainly be at least v4 and up. (Do you guys have any old Acorn
> ARM610 boxes sitting around for old time's sake at LinaroHQ? ;-)
>

AFAIK we only support the StrongARM flavor of RiscPC which is ARMv4.
But yes, some people do care deeply about these antiquated platforms,
including Russell, and there is no particular to leave this one
behind.

>> > +#endif
>> > +
>>
>> No need to make asmlinkage declarations conditional
>
> Yep, addressed in the IS_ENABLED cleanup.
>
>>
>> if (IS_ENABLED())
>
> Sorted.
>
>>
>> """
>> if (!IS_ENABLED(CONFIG_ARM))
>>    return false;
>>
>> hchacha20_arm(x, derived_key);
>> return true;
>> """
>>
>> and drop the #ifdefs
>
> Also sorted.
>
> Regards,
> Jason
Jason A. Donenfeld Sept. 30, 2018, 2:33 a.m. UTC | #28
Hi Ard,

On Sat, Sep 29, 2018 at 8:16 AM Ard Biesheuvel
<ard.biesheuvel@linaro.org> wrote:
> > You mean to say that since these nobs are def_bool y and are
> > essentially "depends on ARM", then I should just straight up use
> > CONFIG_ARM? I had thought about this, but figured this would make it
> > easier to later make these optional or have other options block them
> > need be, or even if the dependencies and requirements for having them
> > changes (for example, with UML on x86). I think doing it this way
> > gives us some flexibility later on. So if that's a compelling enough
> > reason, I'd like to keep those.
>
> Sure. But probably better to be consistent then, and stop using
> CONFIG_ARM directly in your code.

Ack.

> > The reason it was added was indeed because of:
> > https://lists.01.org/pipermail/kbuild-all/2018-September/053114.html
> > -- exactly what you suspected, ARCH_RPC. Have a better suggestion than
> > !CPU_32v3?
>
> Yes, you could just add
>
> asflags-$(CONFIG_CPU_32v3) += -march=armv4
>
> with a comment stating that we don't actually support ARMv3 but only
> use it as a code generation target for reasons unrelated to the ISA

Alright, I'll do exactly that. Though, if the rationale for this has
to do only with codegen -- with what the C compiler does -- then
shouldn't this be set globally for CONFIG_CPU_32v3? I couldn't find
any macros that test against __LINUX_ARM_ARCH__ being 3 in the
assembly, so this shouldn't be a problem I don't think. Maybe I'll
send a patch.

Jason
diff mbox series

Patch

diff --git a/lib/zinc/Makefile b/lib/zinc/Makefile
index 223a0816c918..e47f64e12bbd 100644
--- a/lib/zinc/Makefile
+++ b/lib/zinc/Makefile
@@ -4,4 +4,6 @@  ccflags-$(CONFIG_ZINC_DEBUG) += -DDEBUG
 
 zinc_chacha20-y := chacha20/chacha20.o
 zinc_chacha20-$(CONFIG_ZINC_ARCH_X86_64) += chacha20/chacha20-x86_64.o
+zinc_chacha20-$(CONFIG_ZINC_ARCH_ARM) += chacha20/chacha20-arm.o
+zinc_chacha20-$(CONFIG_ZINC_ARCH_ARM64) += chacha20/chacha20-arm64.o
 obj-$(CONFIG_ZINC_CHACHA20) += zinc_chacha20.o
diff --git a/lib/zinc/chacha20/chacha20-arm-glue.h b/lib/zinc/chacha20/chacha20-arm-glue.h
new file mode 100644
index 000000000000..86cce851ed02
--- /dev/null
+++ b/lib/zinc/chacha20/chacha20-arm-glue.h
@@ -0,0 +1,88 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#if defined(CONFIG_ARM)
+#include <asm/system_info.h>
+#include <asm/cputype.h>
+#endif
+
+asmlinkage void chacha20_arm(u8 *out, const u8 *in, const size_t len,
+			     const u32 key[8], const u32 counter[4]);
+#if defined(CONFIG_ARM)
+asmlinkage void hchacha20_arm(const u32 state[16], u32 out[8]);
+#endif
+#if defined(CONFIG_KERNEL_MODE_NEON)
+asmlinkage void chacha20_neon(u8 *out, const u8 *in, const size_t len,
+			      const u32 key[8], const u32 counter[4]);
+#endif
+
+static bool chacha20_use_neon __ro_after_init;
+
+static void __init chacha20_fpu_init(void)
+{
+#if defined(CONFIG_ARM64)
+	chacha20_use_neon = elf_hwcap & HWCAP_ASIMD;
+#elif defined(CONFIG_ARM)
+	switch (read_cpuid_part()) {
+	case ARM_CPU_PART_CORTEX_A7:
+	case ARM_CPU_PART_CORTEX_A5:
+		/* The Cortex-A7 and Cortex-A5 do not perform well with the NEON
+		 * implementation but do incredibly with the scalar one and use
+		 * less power.
+		 */
+		break;
+	default:
+		chacha20_use_neon = elf_hwcap & HWCAP_NEON;
+	}
+#endif
+}
+
+static inline bool chacha20_arch(struct chacha20_ctx *state, u8 *dst,
+				 const u8 *src, size_t len,
+				 simd_context_t *simd_context)
+{
+#if defined(CONFIG_KERNEL_MODE_NEON)
+	if (chacha20_use_neon && len >= CHACHA20_BLOCK_SIZE * 3 &&
+	    simd_use(simd_context))
+		chacha20_neon(dst, src, len, state->key, state->counter);
+	else
+#endif
+		chacha20_arm(dst, src, len, state->key, state->counter);
+
+	state->counter[0] += (len + 63) / 64;
+	return true;
+}
+
+static inline bool hchacha20_arch(u32 derived_key[CHACHA20_KEY_WORDS],
+				  const u8 nonce[HCHACHA20_NONCE_SIZE],
+				  const u8 key[HCHACHA20_KEY_SIZE],
+				  simd_context_t *simd_context)
+{
+#if defined(CONFIG_ARM)
+	u32 x[] = { CHACHA20_CONSTANT_EXPA,
+		    CHACHA20_CONSTANT_ND_3,
+		    CHACHA20_CONSTANT_2_BY,
+		    CHACHA20_CONSTANT_TE_K,
+		    get_unaligned_le32(key + 0),
+		    get_unaligned_le32(key + 4),
+		    get_unaligned_le32(key + 8),
+		    get_unaligned_le32(key + 12),
+		    get_unaligned_le32(key + 16),
+		    get_unaligned_le32(key + 20),
+		    get_unaligned_le32(key + 24),
+		    get_unaligned_le32(key + 28),
+		    get_unaligned_le32(nonce + 0),
+		    get_unaligned_le32(nonce + 4),
+		    get_unaligned_le32(nonce + 8),
+		    get_unaligned_le32(nonce + 12)
+	};
+	hchacha20_arm(x, derived_key);
+	return true;
+#else
+	return false;
+#endif
+}
diff --git a/lib/zinc/chacha20/chacha20-arm-cryptogams.S b/lib/zinc/chacha20/chacha20-arm.S
similarity index 71%
rename from lib/zinc/chacha20/chacha20-arm-cryptogams.S
rename to lib/zinc/chacha20/chacha20-arm.S
index 770bab469171..5abedafcf129 100644
--- a/lib/zinc/chacha20/chacha20-arm-cryptogams.S
+++ b/lib/zinc/chacha20/chacha20-arm.S
@@ -1,13 +1,475 @@ 
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
+ * Copyright (C) 2018 Google, Inc.
  * Copyright (C) 2015-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
  * Copyright (C) 2006-2017 CRYPTOGAMS by <appro@openssl.org>. All Rights Reserved.
- *
- * This is based in part on Andy Polyakov's implementation from CRYPTOGAMS.
  */
 
 #include <linux/linkage.h>
 
+/*
+ * The following scalar routine was written by Eric Biggers.
+ *
+ * Design notes:
+ *
+ * 16 registers would be needed to hold the state matrix, but only 14 are
+ * available because 'sp' and 'pc' cannot be used.  So we spill the elements
+ * (x8, x9) to the stack and swap them out with (x10, x11).  This adds one
+ * 'ldrd' and one 'strd' instruction per round.
+ *
+ * All rotates are performed using the implicit rotate operand accepted by the
+ * 'add' and 'eor' instructions.  This is faster than using explicit rotate
+ * instructions.  To make this work, we allow the values in the second and last
+ * rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the
+ * wrong rotation amount.  The rotation amount is then fixed up just in time
+ * when the values are used.  'brot' is the number of bits the values in row 'b'
+ * need to be rotated right to arrive at the correct values, and 'drot'
+ * similarly for row 'd'.  (brot, drot) start out as (0, 0) but we make it such
+ * that they end up as (25, 24) after every round.
+ */
+
+	// ChaCha state registers
+	X0	.req	r0
+	X1	.req	r1
+	X2	.req	r2
+	X3	.req	r3
+	X4	.req	r4
+	X5	.req	r5
+	X6	.req	r6
+	X7	.req	r7
+	X8_X10	.req	r8	// shared by x8 and x10
+	X9_X11	.req	r9	// shared by x9 and x11
+	X12	.req	r10
+	X13	.req	r11
+	X14	.req	r12
+	X15	.req	r14
+
+.Lexpand_32byte_k:
+	// "expand 32-byte k"
+	.word	0x61707865, 0x3320646e, 0x79622d32, 0x6b206574
+
+#ifdef __thumb2__
+#  define adrl adr
+#endif
+
+.macro __rev		out, in,  t0, t1, t2
+.if __LINUX_ARM_ARCH__ >= 6
+	rev		\out, \in
+.else
+	lsl		\t0, \in, #24
+	and		\t1, \in, #0xff00
+	and		\t2, \in, #0xff0000
+	orr		\out, \t0, \in, lsr #24
+	orr		\out, \out, \t1, lsl #8
+	orr		\out, \out, \t2, lsr #8
+.endif
+.endm
+
+.macro _le32_bswap	x,  t0, t1, t2
+#ifdef __ARMEB__
+	__rev		\x, \x,  \t0, \t1, \t2
+#endif
+.endm
+
+.macro _le32_bswap_4x	a, b, c, d,  t0, t1, t2
+	_le32_bswap	\a,  \t0, \t1, \t2
+	_le32_bswap	\b,  \t0, \t1, \t2
+	_le32_bswap	\c,  \t0, \t1, \t2
+	_le32_bswap	\d,  \t0, \t1, \t2
+.endm
+
+.macro __ldrd		a, b, src, offset
+#if __LINUX_ARM_ARCH__ >= 6
+	ldrd		\a, \b, [\src, #\offset]
+#else
+	ldr		\a, [\src, #\offset]
+	ldr		\b, [\src, #\offset + 4]
+#endif
+.endm
+
+.macro __strd		a, b, dst, offset
+#if __LINUX_ARM_ARCH__ >= 6
+	strd		\a, \b, [\dst, #\offset]
+#else
+	str		\a, [\dst, #\offset]
+	str		\b, [\dst, #\offset + 4]
+#endif
+.endm
+
+.macro _halfround	a1, b1, c1, d1,  a2, b2, c2, d2
+
+	// a += b; d ^= a; d = rol(d, 16);
+	add		\a1, \a1, \b1, ror #brot
+	add		\a2, \a2, \b2, ror #brot
+	eor		\d1, \a1, \d1, ror #drot
+	eor		\d2, \a2, \d2, ror #drot
+	// drot == 32 - 16 == 16
+
+	// c += d; b ^= c; b = rol(b, 12);
+	add		\c1, \c1, \d1, ror #16
+	add		\c2, \c2, \d2, ror #16
+	eor		\b1, \c1, \b1, ror #brot
+	eor		\b2, \c2, \b2, ror #brot
+	// brot == 32 - 12 == 20
+
+	// a += b; d ^= a; d = rol(d, 8);
+	add		\a1, \a1, \b1, ror #20
+	add		\a2, \a2, \b2, ror #20
+	eor		\d1, \a1, \d1, ror #16
+	eor		\d2, \a2, \d2, ror #16
+	// drot == 32 - 8 == 24
+
+	// c += d; b ^= c; b = rol(b, 7);
+	add		\c1, \c1, \d1, ror #24
+	add		\c2, \c2, \d2, ror #24
+	eor		\b1, \c1, \b1, ror #20
+	eor		\b2, \c2, \b2, ror #20
+	// brot == 32 - 7 == 25
+.endm
+
+.macro _doubleround
+
+	// column round
+
+	// quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13)
+	_halfround	X0, X4, X8_X10, X12,  X1, X5, X9_X11, X13
+
+	// save (x8, x9); restore (x10, x11)
+	__strd		X8_X10, X9_X11, sp, 0
+	__ldrd		X8_X10, X9_X11, sp, 8
+
+	// quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15)
+	_halfround	X2, X6, X8_X10, X14,  X3, X7, X9_X11, X15
+
+	.set brot, 25
+	.set drot, 24
+
+	// diagonal round
+
+	// quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12)
+	_halfround	X0, X5, X8_X10, X15,  X1, X6, X9_X11, X12
+
+	// save (x10, x11); restore (x8, x9)
+	__strd		X8_X10, X9_X11, sp, 8
+	__ldrd		X8_X10, X9_X11, sp, 0
+
+	// quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14)
+	_halfround	X2, X7, X8_X10, X13,  X3, X4, X9_X11, X14
+.endm
+
+.macro _chacha_permute	nrounds
+	.set brot, 0
+	.set drot, 0
+	.rept \nrounds / 2
+	 _doubleround
+	.endr
+.endm
+
+.macro _chacha		nrounds
+
+.Lnext_block\@:
+	// Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN
+	// Registers contain x0-x9,x12-x15.
+
+	// Do the core ChaCha permutation to update x0-x15.
+	_chacha_permute	\nrounds
+
+	add		sp, #8
+	// Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN
+	// Registers contain x0-x9,x12-x15.
+	// x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
+
+	// Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15).
+	push		{X8_X10, X9_X11, X12, X13, X14, X15}
+
+	// Load (OUT, IN, LEN).
+	ldr		r14, [sp, #96]
+	ldr		r12, [sp, #100]
+	ldr		r11, [sp, #104]
+
+	orr		r10, r14, r12
+
+	// Use slow path if fewer than 64 bytes remain.
+	cmp		r11, #64
+	blt		.Lxor_slowpath\@
+
+	// Use slow path if IN and/or OUT isn't 4-byte aligned.  Needed even on
+	// ARMv6+, since ldmia and stmia (used below) still require alignment.
+	tst		r10, #3
+	bne		.Lxor_slowpath\@
+
+	// Fast path: XOR 64 bytes of aligned data.
+
+	// Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
+	// Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT.
+	// x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
+
+	// x0-x3
+	__ldrd		r8, r9, sp, 32
+	__ldrd		r10, r11, sp, 40
+	add		X0, X0, r8
+	add		X1, X1, r9
+	add		X2, X2, r10
+	add		X3, X3, r11
+	_le32_bswap_4x	X0, X1, X2, X3,  r8, r9, r10
+	ldmia		r12!, {r8-r11}
+	eor		X0, X0, r8
+	eor		X1, X1, r9
+	eor		X2, X2, r10
+	eor		X3, X3, r11
+	stmia		r14!, {X0-X3}
+
+	// x4-x7
+	__ldrd		r8, r9, sp, 48
+	__ldrd		r10, r11, sp, 56
+	add		X4, r8, X4, ror #brot
+	add		X5, r9, X5, ror #brot
+	ldmia		r12!, {X0-X3}
+	add		X6, r10, X6, ror #brot
+	add		X7, r11, X7, ror #brot
+	_le32_bswap_4x	X4, X5, X6, X7,  r8, r9, r10
+	eor		X4, X4, X0
+	eor		X5, X5, X1
+	eor		X6, X6, X2
+	eor		X7, X7, X3
+	stmia		r14!, {X4-X7}
+
+	// x8-x15
+	pop		{r0-r7}			// (x8-x9,x12-x15,x10-x11)
+	__ldrd		r8, r9, sp, 32
+	__ldrd		r10, r11, sp, 40
+	add		r0, r0, r8		// x8
+	add		r1, r1, r9		// x9
+	add		r6, r6, r10		// x10
+	add		r7, r7, r11		// x11
+	_le32_bswap_4x	r0, r1, r6, r7,  r8, r9, r10
+	ldmia		r12!, {r8-r11}
+	eor		r0, r0, r8		// x8
+	eor		r1, r1, r9		// x9
+	eor		r6, r6, r10		// x10
+	eor		r7, r7, r11		// x11
+	stmia		r14!, {r0,r1,r6,r7}
+	ldmia		r12!, {r0,r1,r6,r7}
+	__ldrd		r8, r9, sp, 48
+	__ldrd		r10, r11, sp, 56
+	add		r2, r8, r2, ror #drot	// x12
+	add		r3, r9, r3, ror #drot	// x13
+	add		r4, r10, r4, ror #drot	// x14
+	add		r5, r11, r5, ror #drot	// x15
+	_le32_bswap_4x	r2, r3, r4, r5,  r9, r10, r11
+	  ldr		r9, [sp, #72]		// load LEN
+	eor		r2, r2, r0		// x12
+	eor		r3, r3, r1		// x13
+	eor		r4, r4, r6		// x14
+	eor		r5, r5, r7		// x15
+	  subs		r9, #64			// decrement and check LEN
+	stmia		r14!, {r2-r5}
+
+	beq		.Ldone\@
+
+.Lprepare_for_next_block\@:
+
+	// Stack: x0-x15 OUT IN LEN
+
+	// Increment block counter (x12)
+	add		r8, #1
+
+	// Store updated (OUT, IN, LEN)
+	str		r14, [sp, #64]
+	str		r12, [sp, #68]
+	str		r9, [sp, #72]
+
+	  mov		r14, sp
+
+	// Store updated block counter (x12)
+	str		r8, [sp, #48]
+
+	  sub		sp, #16
+
+	// Reload state and do next block
+	ldmia		r14!, {r0-r11}		// load x0-x11
+	__strd		r10, r11, sp, 8		// store x10-x11 before state
+	ldmia		r14, {r10-r12,r14}	// load x12-x15
+	b		.Lnext_block\@
+
+.Lxor_slowpath\@:
+	// Slow path: < 64 bytes remaining, or unaligned input or output buffer.
+	// We handle it by storing the 64 bytes of keystream to the stack, then
+	// XOR-ing the needed portion with the data.
+
+	// Allocate keystream buffer
+	sub		sp, #64
+	mov		r14, sp
+
+	// Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN
+	// Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0.
+	// x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'.
+
+	// Save keystream for x0-x3
+	__ldrd		r8, r9, sp, 96
+	__ldrd		r10, r11, sp, 104
+	add		X0, X0, r8
+	add		X1, X1, r9
+	add		X2, X2, r10
+	add		X3, X3, r11
+	_le32_bswap_4x	X0, X1, X2, X3,  r8, r9, r10
+	stmia		r14!, {X0-X3}
+
+	// Save keystream for x4-x7
+	__ldrd		r8, r9, sp, 112
+	__ldrd		r10, r11, sp, 120
+	add		X4, r8, X4, ror #brot
+	add		X5, r9, X5, ror #brot
+	add		X6, r10, X6, ror #brot
+	add		X7, r11, X7, ror #brot
+	_le32_bswap_4x	X4, X5, X6, X7,  r8, r9, r10
+	  add		r8, sp, #64
+	stmia		r14!, {X4-X7}
+
+	// Save keystream for x8-x15
+	ldm		r8, {r0-r7}		// (x8-x9,x12-x15,x10-x11)
+	__ldrd		r8, r9, sp, 128
+	__ldrd		r10, r11, sp, 136
+	add		r0, r0, r8		// x8
+	add		r1, r1, r9		// x9
+	add		r6, r6, r10		// x10
+	add		r7, r7, r11		// x11
+	_le32_bswap_4x	r0, r1, r6, r7,  r8, r9, r10
+	stmia		r14!, {r0,r1,r6,r7}
+	__ldrd		r8, r9, sp, 144
+	__ldrd		r10, r11, sp, 152
+	add		r2, r8, r2, ror #drot	// x12
+	add		r3, r9, r3, ror #drot	// x13
+	add		r4, r10, r4, ror #drot	// x14
+	add		r5, r11, r5, ror #drot	// x15
+	_le32_bswap_4x	r2, r3, r4, r5,  r9, r10, r11
+	stmia		r14, {r2-r5}
+
+	// Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN
+	// Registers: r8 is block counter, r12 is IN.
+
+	ldr		r9, [sp, #168]		// LEN
+	ldr		r14, [sp, #160]		// OUT
+	cmp		r9, #64
+	  mov		r0, sp
+	movle		r1, r9
+	movgt		r1, #64
+	// r1 is number of bytes to XOR, in range [1, 64]
+
+.if __LINUX_ARM_ARCH__ < 6
+	orr		r2, r12, r14
+	tst		r2, #3			// IN or OUT misaligned?
+	bne		.Lxor_next_byte\@
+.endif
+
+	// XOR a word at a time
+.rept 16
+	subs		r1, #4
+	blt		.Lxor_words_done\@
+	ldr		r2, [r12], #4
+	ldr		r3, [r0], #4
+	eor		r2, r2, r3
+	str		r2, [r14], #4
+.endr
+	b		.Lxor_slowpath_done\@
+.Lxor_words_done\@:
+	ands		r1, r1, #3
+	beq		.Lxor_slowpath_done\@
+
+	// XOR a byte at a time
+.Lxor_next_byte\@:
+	ldrb		r2, [r12], #1
+	ldrb		r3, [r0], #1
+	eor		r2, r2, r3
+	strb		r2, [r14], #1
+	subs		r1, #1
+	bne		.Lxor_next_byte\@
+
+.Lxor_slowpath_done\@:
+	subs		r9, #64
+	add		sp, #96
+	bgt		.Lprepare_for_next_block\@
+
+.Ldone\@:
+.endm	// _chacha
+
+/*
+ * void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8],
+ *		     const u32 iv[4]);
+ */
+ENTRY(chacha20_arm)
+	cmp		r2, #0			// len == 0?
+	bxeq		lr
+
+	push		{r0-r2,r4-r11,lr}
+
+	// Push state x0-x15 onto stack.
+	// Also store an extra copy of x10-x11 just before the state.
+
+	ldr		r4, [sp, #48]		// iv
+	mov		r0, sp
+	sub		sp, #80
+
+	// iv: x12-x15
+	ldm		r4, {X12,X13,X14,X15}
+	stmdb		r0!, {X12,X13,X14,X15}
+
+	// key: x4-x11
+	__ldrd		X8_X10, X9_X11, r3, 24
+	__strd		X8_X10, X9_X11, sp, 8
+	stmdb		r0!, {X8_X10, X9_X11}
+	ldm		r3, {X4-X9_X11}
+	stmdb		r0!, {X4-X9_X11}
+
+	// constants: x0-x3
+	adrl		X3, .Lexpand_32byte_k
+	ldm		X3, {X0-X3}
+	__strd		X0, X1, sp, 16
+	__strd		X2, X3, sp, 24
+
+	_chacha		20
+
+	add		sp, #76
+	pop		{r4-r11, pc}
+ENDPROC(chacha20_arm)
+
+/*
+ * void hchacha20_arm(const u32 state[16], u32 out[8]);
+ */
+ENTRY(hchacha20_arm)
+	push		{r1,r4-r11,lr}
+
+	mov		r14, r0
+	ldmia		r14!, {r0-r11}		// load x0-x11
+	push		{r10-r11}		// store x10-x11 to stack
+	ldm		r14, {r10-r12,r14}	// load x12-x15
+	sub		sp, #8
+
+	_chacha_permute	20
+
+	// Skip over (unused0-unused1, x10-x11)
+	add		sp, #16
+
+	// Fix up rotations of x12-x15
+	ror		X12, X12, #drot
+	ror		X13, X13, #drot
+	  pop		{r4}			// load 'out'
+	ror		X14, X14, #drot
+	ror		X15, X15, #drot
+
+	// Store (x0-x3,x12-x15) to 'out'
+	stm		r4, {X0,X1,X2,X3,X12,X13,X14,X15}
+
+	pop		{r4-r11,pc}
+ENDPROC(hchacha20_arm)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+/*
+ * This following NEON routine was ported from Andy Polyakov's implementation
+ * from CRYPTOGAMS. It begins with parts of the CRYPTOGAMS scalar routine,
+ * since certain NEON code paths actually branch to it.
+ */
+
 .text
 #if defined(__thumb2__) || defined(__clang__)
 .syntax	unified
@@ -22,39 +484,6 @@ 
 #define ldrhsb	ldrbhs
 #endif
 
-.align	5
-.Lsigma:
-.long	0x61707865,0x3320646e,0x79622d32,0x6b206574	@ endian-neutral
-.Lone:
-.long	1,0,0,0
-.word	-1
-
-.align	5
-ENTRY(chacha20_arm)
-	ldr	r12,[sp,#0]		@ pull pointer to counter and nonce
-	stmdb	sp!,{r0-r2,r4-r11,lr}
-	cmp	r2,#0			@ len==0?
-#ifdef __thumb2__
-	itt	eq
-#endif
-	addeq	sp,sp,#4*3
-	beq	.Lno_data_arm
-	ldmia	r12,{r4-r7}		@ load counter and nonce
-	sub	sp,sp,#4*(16)		@ off-load area
-#if __LINUX_ARM_ARCH__ < 7 && !defined(__thumb2__)
-	sub	r14,pc,#100		@ .Lsigma
-#else
-	adr	r14,.Lsigma		@ .Lsigma
-#endif
-	stmdb	sp!,{r4-r7}		@ copy counter and nonce
-	ldmia	r3,{r4-r11}		@ load key
-	ldmia	r14,{r0-r3}		@ load sigma
-	stmdb	sp!,{r4-r11}		@ copy key
-	stmdb	sp!,{r0-r3}		@ copy sigma
-	str	r10,[sp,#4*(16+10)]	@ off-load "rx"
-	str	r11,[sp,#4*(16+11)]	@ off-load "rx"
-	b	.Loop_outer_enter
-
 .align	4
 .Loop_outer:
 	ldmia	sp,{r0-r9}		@ load key material
@@ -748,11 +1177,8 @@  ENTRY(chacha20_arm)
 
 .Ldone:
 	add	sp,sp,#4*(32+3)
-.Lno_data_arm:
 	ldmia	sp!,{r4-r11,pc}
-ENDPROC(chacha20_arm)
 
-#ifdef CONFIG_KERNEL_MODE_NEON
 .align	5
 .Lsigma2:
 .long	0x61707865,0x3320646e,0x79622d32,0x6b206574	@ endian-neutral
diff --git a/lib/zinc/chacha20/chacha20-arm64-cryptogams.S b/lib/zinc/chacha20/chacha20-arm64.S
similarity index 100%
rename from lib/zinc/chacha20/chacha20-arm64-cryptogams.S
rename to lib/zinc/chacha20/chacha20-arm64.S
diff --git a/lib/zinc/chacha20/chacha20.c b/lib/zinc/chacha20/chacha20.c
index 4354b874a6a5..fc4f74fca653 100644
--- a/lib/zinc/chacha20/chacha20.c
+++ b/lib/zinc/chacha20/chacha20.c
@@ -16,6 +16,8 @@ 
 
 #if defined(CONFIG_ZINC_ARCH_X86_64)
 #include "chacha20-x86_64-glue.h"
+#elif defined(CONFIG_ZINC_ARCH_ARM) || defined(CONFIG_ZINC_ARCH_ARM64)
+#include "chacha20-arm-glue.h"
 #else
 void __init chacha20_fpu_init(void)
 {