@@ -25,6 +25,11 @@ config AS_GFNI
help
Supported by binutils >= 2.30 and LLVM integrated assembler
+config AS_HAS_KEYLOCKER
+ def_bool $(as-instr,encodekey256 %eax$(comma)%eax)
+ help
+ Supported by binutils >= 2.36 and LLVM integrated assembler >= V12
+
config AS_WRUSS
def_bool $(as-instr,wrussq %rax$(comma)(%rbx))
help
@@ -29,6 +29,23 @@ config CRYPTO_AES_NI_INTEL
Architecture: x86 (32-bit and 64-bit) using:
- AES-NI (AES new instructions)
+config CRYPTO_AES_KL
+ bool "Ciphers: AES, modes: XTS (AES-KL)"
+ depends on X86 && 64BIT
+ depends on AS_HAS_KEYLOCKER
+ select CRYPTO_AES_NI_INTEL
+ select X86_KEYLOCKER
+
+ help
+ Block cipher: AES cipher algorithms
+ Length-preserving ciphers: AES with XTS
+
+ Architecture: x86 (64-bit) using:
+ - AES-KL (AES Key Locker)
+ - AES-NI for a 192-bit key
+
+ See Documentation/arch/x86/keylocker.rst for more details.
+
config CRYPTO_BLOWFISH_X86_64
tristate "Ciphers: Blowfish, modes: ECB, CBC"
depends on X86 && 64BIT
@@ -50,6 +50,9 @@ obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
+obj-$(CONFIG_CRYPTO_AES_KL) += aeskl-intel.o
+aeskl-intel-y := aeskl-intel_asm.o aeskl-intel_glue.o
+
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
sha1-ssse3-y := sha1_avx2_x86_64_asm.o sha1_ssse3_asm.o sha1_ssse3_glue.o
sha1-ssse3-$(CONFIG_AS_SHA1_NI) += sha1_ni_asm.o
@@ -19,16 +19,17 @@
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
+#include "aeskl-intel_glue.h"
+
#define AES_ALIGN 16
#define AES_ALIGN_ATTR __attribute__((__aligned__(AES_ALIGN)))
#define AES_ALIGN_EXTRA ((AES_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
#define XTS_AES_CTX_SIZE (sizeof(struct aes_xts_ctx) + AES_ALIGN_EXTRA)
-/*
- * Preserve data types for various AES implementations available in x86
- */
+/* Data types for the two AES implementations available in x86 */
union x86_aes_ctx {
struct crypto_aes_ctx aesni;
+ struct aeskl_ctx aeskl;
};
struct aes_xts_ctx {
new file mode 100644
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Implement AES algorithm using AES Key Locker instructions.
+ *
+ * Most code is based from the AES-NI implementation, aesni-intel_asm.S
+ *
+ */
+
+#include <linux/linkage.h>
+#include <linux/cfi_types.h>
+#include <asm/errno.h>
+#include <asm/inst.h>
+#include <asm/frame.h>
+#include "aes-helper_asm.S"
+
+.text
+
+#define STATE1 %xmm0
+#define STATE2 %xmm1
+#define STATE3 %xmm2
+#define STATE4 %xmm3
+#define STATE5 %xmm4
+#define STATE6 %xmm5
+#define STATE7 %xmm6
+#define STATE8 %xmm7
+#define STATE STATE1
+
+#define IV %xmm9
+#define KEY %xmm10
+#define INC %xmm13
+
+#define IN %xmm8
+
+#define HANDLEP %rdi
+#define OUTP %rsi
+#define KLEN %r9d
+#define INP %rdx
+#define T1 %r10
+#define LEN %rcx
+#define IVP %r8
+
+#define UKEYP OUTP
+#define GF128MUL_MASK %xmm11
+
+/*
+ * void __aeskl_setkey(struct crypto_aes_ctx *handlep, const u8 *ukeyp,
+ * unsigned int key_len)
+ */
+SYM_FUNC_START(__aeskl_setkey)
+ FRAME_BEGIN
+ movl %edx, 480(HANDLEP)
+ movdqu (UKEYP), STATE1
+ mov $1, %eax
+ cmp $16, %dl
+ je .Lsetkey_128
+
+ movdqu 0x10(UKEYP), STATE2
+ encodekey256 %eax, %eax
+ movdqu STATE4, 0x30(HANDLEP)
+ jmp .Lsetkey_end
+.Lsetkey_128:
+ encodekey128 %eax, %eax
+
+.Lsetkey_end:
+ movdqu STATE1, (HANDLEP)
+ movdqu STATE2, 0x10(HANDLEP)
+ movdqu STATE3, 0x20(HANDLEP)
+
+ FRAME_END
+ RET
+SYM_FUNC_END(__aeskl_setkey)
+
+/*
+ * int __aeskl_enc(const void *handlep, u8 *outp, const u8 *inp)
+ */
+SYM_FUNC_START(__aeskl_enc)
+ FRAME_BEGIN
+ movdqu (INP), STATE
+ movl 480(HANDLEP), KLEN
+
+ cmp $16, KLEN
+ je .Lenc_128
+ aesenc256kl (HANDLEP), STATE
+ jz .Lenc_err
+ xor %rax, %rax
+ jmp .Lenc_end
+.Lenc_128:
+ aesenc128kl (HANDLEP), STATE
+ jz .Lenc_err
+ xor %rax, %rax
+ jmp .Lenc_end
+
+.Lenc_err:
+ mov $(-EINVAL), %rax
+.Lenc_end:
+ movdqu STATE, (OUTP)
+ FRAME_END
+ RET
+SYM_FUNC_END(__aeskl_enc)
+
+/*
+ * XTS implementation
+ */
+
+/*
+ * _aeskl_gf128mul_x_ble: internal ABI
+ * Multiply in GF(2^128) for XTS IVs
+ * input:
+ * IV: current IV
+ * GF128MUL_MASK == mask with 0x87 and 0x01
+ * output:
+ * IV: next IV
+ * changed:
+ * CTR: == temporary value
+ *
+ * While based on the AES-NI code, this macro is separated here due to
+ * the register constraint. E.g., aesencwide256kl has implicit
+ * operands: XMM0-7.
+ */
+#define _aeskl_gf128mul_x_ble() \
+ pshufd $0x13, IV, KEY; \
+ paddq IV, IV; \
+ psrad $31, KEY; \
+ pand GF128MUL_MASK, KEY; \
+ pxor KEY, IV;
+
+.macro XTS_ENC_DEC operation
+ FRAME_BEGIN
+ movdqa .Lgf128mul_x_ble_mask(%rip), GF128MUL_MASK
+ movups (IVP), IV
+
+ mov 480(HANDLEP), KLEN
+
+.ifc \operation, dec
+ test $15, LEN
+ jz .Lxts_op8_\@
+ sub $16, LEN
+.endif
+
+.Lxts_op8_\@:
+ sub $128, LEN
+ jl .Lxts_op1_pre_\@
+
+ movdqa IV, STATE1
+ movdqu (INP), INC
+ pxor INC, STATE1
+ movdqu IV, (OUTP)
+
+ _aeskl_gf128mul_x_ble()
+ movdqa IV, STATE2
+ movdqu 0x10(INP), INC
+ pxor INC, STATE2
+ movdqu IV, 0x10(OUTP)
+
+ _aeskl_gf128mul_x_ble()
+ movdqa IV, STATE3
+ movdqu 0x20(INP), INC
+ pxor INC, STATE3
+ movdqu IV, 0x20(OUTP)
+
+ _aeskl_gf128mul_x_ble()
+ movdqa IV, STATE4
+ movdqu 0x30(INP), INC
+ pxor INC, STATE4
+ movdqu IV, 0x30(OUTP)
+
+ _aeskl_gf128mul_x_ble()
+ movdqa IV, STATE5
+ movdqu 0x40(INP), INC
+ pxor INC, STATE5
+ movdqu IV, 0x40(OUTP)
+
+ _aeskl_gf128mul_x_ble()
+ movdqa IV, STATE6
+ movdqu 0x50(INP), INC
+ pxor INC, STATE6
+ movdqu IV, 0x50(OUTP)
+
+ _aeskl_gf128mul_x_ble()
+ movdqa IV, STATE7
+ movdqu 0x60(INP), INC
+ pxor INC, STATE7
+ movdqu IV, 0x60(OUTP)
+
+ _aeskl_gf128mul_x_ble()
+ movdqa IV, STATE8
+ movdqu 0x70(INP), INC
+ pxor INC, STATE8
+ movdqu IV, 0x70(OUTP)
+
+ cmp $16, KLEN
+ je .Lxts_op8_128_\@
+.ifc \operation, dec
+ aesdecwide256kl (%rdi)
+.else
+ aesencwide256kl (%rdi)
+.endif
+ jz .Lxts_op_err_\@
+ jmp .Lxts_op8_end_\@
+.Lxts_op8_128_\@:
+.ifc \operation, dec
+ aesdecwide128kl (%rdi)
+.else
+ aesencwide128kl (%rdi)
+.endif
+ jz .Lxts_op_err_\@
+
+.Lxts_op8_end_\@:
+ movdqu 0x00(OUTP), INC
+ pxor INC, STATE1
+ movdqu STATE1, 0x00(OUTP)
+
+ movdqu 0x10(OUTP), INC
+ pxor INC, STATE2
+ movdqu STATE2, 0x10(OUTP)
+
+ movdqu 0x20(OUTP), INC
+ pxor INC, STATE3
+ movdqu STATE3, 0x20(OUTP)
+
+ movdqu 0x30(OUTP), INC
+ pxor INC, STATE4
+ movdqu STATE4, 0x30(OUTP)
+
+ movdqu 0x40(OUTP), INC
+ pxor INC, STATE5
+ movdqu STATE5, 0x40(OUTP)
+
+ movdqu 0x50(OUTP), INC
+ pxor INC, STATE6
+ movdqu STATE6, 0x50(OUTP)
+
+ movdqu 0x60(OUTP), INC
+ pxor INC, STATE7
+ movdqu STATE7, 0x60(OUTP)
+
+ movdqu 0x70(OUTP), INC
+ pxor INC, STATE8
+ movdqu STATE8, 0x70(OUTP)
+
+ _aeskl_gf128mul_x_ble()
+
+ add $128, INP
+ add $128, OUTP
+ test LEN, LEN
+ jnz .Lxts_op8_\@
+
+.Lxts_op_ret_\@:
+ movups IV, (IVP)
+ xor %rax, %rax
+ FRAME_END
+ RET
+
+.Lxts_op1_pre_\@:
+ add $128, LEN
+ jz .Lxts_op_ret_\@
+.ifc \operation, enc
+ sub $16, LEN
+ jl .Lxts_op_cts4_\@
+.endif
+
+.Lxts_op1_\@:
+ movdqu (INP), STATE1
+
+.ifc \operation, dec
+ add $16, INP
+ sub $16, LEN
+ jl .Lxts_op_cts1_\@
+.endif
+
+ pxor IV, STATE1
+
+ cmp $16, KLEN
+ je .Lxts_op1_128_\@
+.ifc \operation, dec
+ aesdec256kl (HANDLEP), STATE1
+.else
+ aesenc256kl (HANDLEP), STATE1
+.endif
+ jz .Lxts_op_err_\@
+ jmp .Lxts_op1_end_\@
+.Lxts_op1_128_\@:
+.ifc \operation, dec
+ aesdec128kl (HANDLEP), STATE1
+.else
+ aesenc128kl (HANDLEP), STATE1
+.endif
+ jz .Lxts_op_err_\@
+
+.Lxts_op1_end_\@:
+ pxor IV, STATE1
+ _aeskl_gf128mul_x_ble()
+
+ test LEN, LEN
+ jz .Lxts_op1_out_\@
+
+.ifc \operation, enc
+ add $16, INP
+ sub $16, LEN
+ jl .Lxts_op_cts1_\@
+.endif
+
+ movdqu STATE1, (OUTP)
+ add $16, OUTP
+ jmp .Lxts_op1_\@
+
+.Lxts_op1_out_\@:
+ movdqu STATE1, (OUTP)
+ jmp .Lxts_op_ret_\@
+
+.Lxts_op_cts4_\@:
+.ifc \operation, enc
+ movdqu STATE8, STATE1
+ sub $16, OUTP
+.endif
+
+.Lxts_op_cts1_\@:
+.ifc \operation, dec
+ movdqa IV, STATE5
+ _aeskl_gf128mul_x_ble()
+
+ pxor IV, STATE1
+
+ cmp $16, KLEN
+ je .Lxts_dec1_cts_pre_128_\@
+ aesdec256kl (HANDLEP), STATE1
+ jz .Lxts_op_err_\@
+ jmp .Lxts_dec1_cts_pre_end_\@
+.Lxts_dec1_cts_pre_128_\@:
+ aesdec128kl (HANDLEP), STATE1
+ jz .Lxts_op_err_\@
+.Lxts_dec1_cts_pre_end_\@:
+ pxor IV, STATE1
+.endif
+
+ lea .Lcts_permute_table(%rip), T1
+ add LEN, INP /* rewind input pointer */
+ add $16, LEN /* # bytes in final block */
+ movups (INP), IN
+
+ mov T1, IVP
+ add $32, IVP
+ add LEN, T1
+ sub LEN, IVP
+ add OUTP, LEN
+
+ movups (T1), STATE2
+ movaps STATE1, STATE3
+ pshufb STATE2, STATE1
+ movups STATE1, (LEN)
+
+ movups (IVP), STATE1
+ pshufb STATE1, IN
+ pblendvb STATE3, IN
+ movaps IN, STATE1
+
+.ifc \operation, dec
+ pxor STATE5, STATE1
+.else
+ pxor IV, STATE1
+.endif
+
+ cmp $16, KLEN
+ je .Lxts_op1_cts_128_\@
+.ifc \operation, dec
+ aesdec256kl (HANDLEP), STATE1
+.else
+ aesenc256kl (HANDLEP), STATE1
+.endif
+ jz .Lxts_op_err_\@
+ jmp .Lxts_op1_cts_end_\@
+.Lxts_op1_cts_128_\@:
+.ifc \operation, dec
+ aesdec128kl (HANDLEP), STATE1
+.else
+ aesenc128kl (HANDLEP), STATE1
+.endif
+ jz .Lxts_op_err_\@
+
+.Lxts_op1_cts_end_\@:
+.ifc \operation, dec
+ pxor STATE5, STATE1
+.else
+ pxor IV, STATE1
+.endif
+ movups STATE1, (OUTP)
+ xor %rax, %rax
+ FRAME_END
+ RET
+
+.Lxts_op_err_\@:
+ mov $(-EINVAL), %rax
+ FRAME_END
+ RET
+.endm
+
+/*
+ * int __aeskl_xts_encrypt(const struct aeskl_ctx *handlep, u8 *outp,
+ * const u8 *inp, unsigned int klen, le128 *ivp)
+ */
+SYM_FUNC_START(__aeskl_xts_encrypt)
+ XTS_ENC_DEC enc
+SYM_FUNC_END(__aeskl_xts_encrypt)
+
+/*
+ * int __aeskl_xts_decrypt(const struct crypto_aes_ctx *handlep, u8 *outp,
+ * const u8 *inp, unsigned int klen, le128 *ivp)
+ */
+SYM_FUNC_START(__aeskl_xts_decrypt)
+ XTS_ENC_DEC dec
+SYM_FUNC_END(__aeskl_xts_decrypt)
+
new file mode 100644
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for AES Key Locker instructions. This file contains glue
+ * code and the real AES implementation is in aeskl-intel_asm.S.
+ *
+ * Most code is based on AES-NI glue code, aesni-intel_glue.c
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/xts.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/simd.h>
+#include <asm/simd.h>
+#include <asm/cpu_device_id.h>
+#include <asm/fpu/api.h>
+#include <asm/keylocker.h>
+
+#include "aes-helper_glue.h"
+#include "aesni-intel_glue.h"
+
+asmlinkage void __aeskl_setkey(struct aeskl_ctx *ctx, const u8 *in_key, unsigned int keylen);
+
+asmlinkage int __aeskl_enc(const void *ctx, u8 *out, const u8 *in);
+
+asmlinkage int __aeskl_xts_encrypt(const struct aeskl_ctx *ctx, u8 *out, const u8 *in,
+ unsigned int len, u8 *iv);
+asmlinkage int __aeskl_xts_decrypt(const struct aeskl_ctx *ctx, u8 *out, const u8 *in,
+ unsigned int len, u8 *iv);
+
+/*
+ * If a hardware failure occurs, the wrapping key may be lost during
+ * sleep states. The state of the feature can be retrieved via
+ * valid_keylocker().
+ *
+ * Since disabling can occur preemptively, check for availability on
+ * every use along with kernel_fpu_begin().
+ */
+
+static int aeskl_setkey(union x86_aes_ctx *ctx, const u8 *in_key, unsigned int keylen)
+{
+ int err;
+
+ if (!crypto_simd_usable())
+ return -EBUSY;
+
+ err = aes_check_keylen(keylen);
+ if (err)
+ return err;
+
+ if (unlikely(keylen == AES_KEYSIZE_192)) {
+ pr_warn_once("AES-KL does not support 192-bit key. Use AES-NI.\n");
+ kernel_fpu_begin();
+ aesni_set_key(&ctx->aesni, in_key, keylen);
+ kernel_fpu_end();
+ return 0;
+ }
+
+ if (!valid_keylocker())
+ return -ENODEV;
+
+ kernel_fpu_begin();
+ __aeskl_setkey(&ctx->aeskl, in_key, keylen);
+ kernel_fpu_end();
+ return 0;
+}
+
+static inline int aeskl_enc(const void *ctx, u8 *out, const u8 *in)
+{
+ if (!valid_keylocker())
+ return -ENODEV;
+
+ return __aeskl_enc(ctx, out, in);
+}
+
+static inline int aeskl_xts_encrypt(const union x86_aes_ctx *ctx, u8 *out, const u8 *in,
+ unsigned int len, u8 *iv)
+{
+ if (!valid_keylocker())
+ return -ENODEV;
+
+ return __aeskl_xts_encrypt(&ctx->aeskl, out, in, len, iv);
+}
+
+static inline int aeskl_xts_decrypt(const union x86_aes_ctx *ctx, u8 *out, const u8 *in,
+ unsigned int len, u8 *iv)
+{
+ if (!valid_keylocker())
+ return -ENODEV;
+
+ return __aeskl_xts_decrypt(&ctx->aeskl, out, in, len, iv);
+}
+
+static int xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ return xts_setkey_common(tfm, key, keylen, aeskl_setkey);
+}
+
+static inline u32 xts_keylen(struct skcipher_request *req)
+{
+ struct aes_xts_ctx *ctx = aes_xts_ctx(crypto_skcipher_reqtfm(req));
+
+ return ctx->crypt_ctx.aeskl.key_length;
+}
+
+static int xts_encrypt(struct skcipher_request *req)
+{
+ u32 keylen = xts_keylen(req);
+
+ if (likely(keylen != AES_KEYSIZE_192))
+ return xts_crypt_common(req, aeskl_xts_encrypt, aeskl_enc);
+ else
+ return xts_crypt_common(req, aesni_xts_encrypt, aesni_enc);
+}
+
+static int xts_decrypt(struct skcipher_request *req)
+{
+ u32 keylen = xts_keylen(req);
+
+ if (likely(keylen != AES_KEYSIZE_192))
+ return xts_crypt_common(req, aeskl_xts_decrypt, aeskl_enc);
+ else
+ return xts_crypt_common(req, aesni_xts_decrypt, aesni_enc);
+}
+
+static struct skcipher_alg aeskl_skciphers[] = {
+ {
+ .base = {
+ .cra_name = "__xts(aes)",
+ .cra_driver_name = "__xts-aes-aeskl",
+ .cra_priority = 200,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = XTS_AES_CTX_SIZE,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .walksize = 2 * AES_BLOCK_SIZE,
+ .setkey = xts_setkey,
+ .encrypt = xts_encrypt,
+ .decrypt = xts_decrypt,
+ }
+};
+
+static struct simd_skcipher_alg *aeskl_simd_skciphers[ARRAY_SIZE(aeskl_skciphers)];
+
+static int __init aeskl_init(void)
+{
+ u32 eax, ebx, ecx, edx;
+
+ if (!valid_keylocker())
+ return -ENODEV;
+
+ cpuid_count(KEYLOCKER_CPUID, 0, &eax, &ebx, &ecx, &edx);
+ if (!(ebx & KEYLOCKER_CPUID_EBX_WIDE))
+ return -ENODEV;
+
+ /*
+ * AES-KL itself does not rely on AES-NI. But, AES-KL does not
+ * support 192-bit keys. To ensure AES compliance, AES-KL falls
+ * back to AES-NI.
+ */
+ if (!boot_cpu_has(X86_FEATURE_AES))
+ return -ENODEV;
+
+ return simd_register_skciphers_compat(aeskl_skciphers, ARRAY_SIZE(aeskl_skciphers),
+ aeskl_simd_skciphers);
+}
+
+static void __exit aeskl_exit(void)
+{
+ simd_unregister_skciphers(aeskl_skciphers, ARRAY_SIZE(aeskl_skciphers),
+ aeskl_simd_skciphers);
+}
+
+late_initcall(aeskl_init);
+module_exit(aeskl_exit);
+
+MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, AES Key Locker implementation");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("aes");
new file mode 100644
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _AESKL_INTEL_GLUE_H
+#define _AESKL_INTEL_GLUE_H
+
+#include <crypto/aes.h>
+#include <linux/types.h>
+
+#define AESKL_AAD_SIZE 16
+#define AESKL_TAG_SIZE 16
+#define AESKL_CIPHERTEXT_MAX AES_KEYSIZE_256
+
+/* The Key Locker handle is an encoded form of the AES key. */
+struct aeskl_handle {
+ u8 additional_authdata[AESKL_AAD_SIZE];
+ u8 integrity_tag[AESKL_TAG_SIZE];
+ u8 ciphre_text[AESKL_CIPHERTEXT_MAX];
+};
+
+/*
+ * Key Locker does not support 192-bit key size. The driver needs to
+ * retrieve the key size in the first place. The offset of the
+ * 'key_length' field here should be compatible with struct
+ * crypto_aes_ctx.
+ */
+#define AESKL_CTX_RESERVED (sizeof(struct crypto_aes_ctx) - sizeof(struct aeskl_handle) \
+ - sizeof(u32))
+
+struct aeskl_ctx {
+ struct aeskl_handle handle;
+ u8 reserved[AESKL_CTX_RESERVED];
+ u32 key_length;
+};
+
+#endif /* _AESKL_INTEL_GLUE_H */
@@ -37,6 +37,7 @@
#include <linux/static_call.h>
#include "aes-helper_glue.h"
+#include "aesni-intel_glue.h"
#define RFC4106_HASH_SUBKEY_SIZE 16
#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
@@ -72,9 +73,6 @@ struct gcm_context_data {
u8 hash_keys[GCM_BLOCK_LEN * 16];
};
-asmlinkage void aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
- unsigned int key_len);
-asmlinkage void __aesni_enc(const void *ctx, u8 *out, const u8 *in);
asmlinkage void __aesni_dec(const void *ctx, u8 *out, const u8 *in);
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len);
@@ -89,21 +87,9 @@ asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
-static inline int aesni_enc(const void *ctx, u8 *out, const u8 *in)
-{
- __aesni_enc(ctx, out, in);
- return 0;
-}
-
#define AVX_GEN2_OPTSIZE 640
#define AVX_GEN4_OPTSIZE 4096
-asmlinkage void __aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in, unsigned int len, u8 *iv);
-
-asmlinkage void __aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in, unsigned int len, u8 *iv);
-
#ifdef CONFIG_X86_64
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
@@ -271,20 +257,6 @@ static inline int aesni_xts_setkey(union x86_aes_ctx *ctx,
return aes_set_key_common(&ctx->aesni, in_key, key_len);
}
-static inline int aesni_xts_encrypt(const union x86_aes_ctx *ctx, u8 *out, const u8 *in,
- unsigned int len, u8 *iv)
-{
- __aesni_xts_encrypt(&ctx->aesni, out, in, len, iv);
- return 0;
-}
-
-static inline int aesni_xts_decrypt(const union x86_aes_ctx *ctx, u8 *out, const u8 *in,
- unsigned int len, u8 *iv)
-{
- __aesni_xts_decrypt(&ctx->aesni, out, in, len, iv);
- return 0;
-}
-
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
new file mode 100644
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * These are AES-NI functions that are used by the AES-KL code as a
+ * fallback when it is given a 192-bit key. Key Locker does not support
+ * 192-bit keys.
+ */
+
+#ifndef _AESNI_INTEL_GLUE_H
+#define _AESNI_INTEL_GLUE_H
+
+asmlinkage void aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len);
+asmlinkage void __aesni_enc(const void *ctx, u8 *out, const u8 *in);
+asmlinkage void __aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void __aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
+
+static inline int aesni_enc(const void *ctx, u8 *out, const u8 *in)
+{
+ __aesni_enc(ctx, out, in);
+ return 0;
+}
+
+static inline int aesni_xts_encrypt(const union x86_aes_ctx *ctx, u8 *out, const u8 *in,
+ unsigned int len, u8 *iv)
+{
+ __aesni_xts_encrypt(&ctx->aesni, out, in, len, iv);
+ return 0;
+}
+
+static inline int aesni_xts_decrypt(const union x86_aes_ctx *ctx, u8 *out, const u8 *in,
+ unsigned int len, u8 *iv)
+{
+ __aesni_xts_decrypt(&ctx->aesni, out, in, len, iv);
+ return 0;
+}
+
+#endif /* _AESNI_INTEL_GLUE_H */