new file mode 100644
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/*
+ * Constant values shared between AES implementations:
+ */
+
+.pushsection .rodata
+.align 16
+.Lcts_permute_table:
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
+ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+ .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
+.popsection
+
+.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16
+.align 16
+.Lgf128mul_x_ble_mask:
+ .octa 0x00000000000000010000000000000087
+.previous
new file mode 100644
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Shared glue code between AES implementations, refactored from the AES-NI's.
+ *
+ * The helper code is inlined for a performance reason. With the mitigation
+ * for speculative executions like retpoline, indirect calls become very
+ * expensive at a cost of measurable overhead.
+ */
+
+#ifndef _AES_HELPER_GLUE_H
+#define _AES_HELPER_GLUE_H
+
+#include <linux/err.h>
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/xts.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/simd.h>
+
+#define AES_ALIGN 16
+#define AES_ALIGN_ATTR __attribute__((__aligned__(AES_ALIGN)))
+#define AES_ALIGN_EXTRA ((AES_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
+#define XTS_AES_CTX_SIZE (sizeof(struct aes_xts_ctx) + AES_ALIGN_EXTRA)
+
+/*
+ * Preserve data types for various AES implementations available in x86
+ */
+union x86_aes_ctx {
+ struct crypto_aes_ctx aesni;
+};
+
+struct aes_xts_ctx {
+ union x86_aes_ctx tweak_ctx AES_ALIGN_ATTR;
+ union x86_aes_ctx crypt_ctx AES_ALIGN_ATTR;
+};
+
+static inline void *aes_align_addr(void *addr)
+{
+ return (crypto_tfm_ctx_alignment() >= AES_ALIGN) ? addr : PTR_ALIGN(addr, AES_ALIGN);
+}
+
+static inline struct aes_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm)
+{
+ return aes_align_addr(crypto_skcipher_ctx(tfm));
+}
+
+static inline int
+xts_setkey_common(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen,
+ int (*fn)(union x86_aes_ctx *ctx, const u8 *in_key, unsigned int key_len))
+{
+ struct aes_xts_ctx *ctx = aes_xts_ctx(tfm);
+ int err;
+
+ err = xts_verify_key(tfm, key, keylen);
+ if (err)
+ return err;
+
+ keylen /= 2;
+
+ /* first half of xts-key is for crypt */
+ err = fn(&ctx->crypt_ctx, key, keylen);
+ if (err)
+ return err;
+
+ /* second half of xts-key is for tweak */
+ return fn(&ctx->tweak_ctx, key + keylen, keylen);
+}
+
+static inline int
+xts_crypt_common(struct skcipher_request *req,
+ int (*crypt_fn)(const union x86_aes_ctx *ctx, u8 *out, const u8 *in,
+ unsigned int len, u8 *iv),
+ int (*crypt1_fn)(const void *ctx, u8 *out, const u8 *in))
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct aes_xts_ctx *ctx = aes_xts_ctx(tfm);
+ int tail = req->cryptlen % AES_BLOCK_SIZE;
+ struct skcipher_request subreq;
+ struct skcipher_walk walk;
+ int err;
+
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ if (!walk.nbytes)
+ return err;
+
+ if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
+ int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+
+ skcipher_walk_abort(&walk);
+
+ skcipher_request_set_tfm(&subreq, tfm);
+ skcipher_request_set_callback(&subreq,
+ skcipher_request_flags(req),
+ NULL, NULL);
+ skcipher_request_set_crypt(&subreq, req->src, req->dst,
+ blocks * AES_BLOCK_SIZE, req->iv);
+ req = &subreq;
+
+ err = skcipher_walk_virt(&walk, req, false);
+ if (!walk.nbytes)
+ return err;
+ } else {
+ tail = 0;
+ }
+
+ kernel_fpu_begin();
+
+ /* calculate first value of T */
+ err = crypt1_fn(&ctx->tweak_ctx, walk.iv, walk.iv);
+ if (err) {
+ kernel_fpu_end();
+ return err;
+ }
+
+ while (walk.nbytes > 0) {
+ int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes &= ~(AES_BLOCK_SIZE - 1);
+
+ err = crypt_fn(&ctx->crypt_ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, walk.iv);
+ kernel_fpu_end();
+ if (err)
+ return err;
+
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+
+ if (walk.nbytes > 0)
+ kernel_fpu_begin();
+ }
+
+ if (unlikely(tail > 0 && !err)) {
+ struct scatterlist sg_src[2], sg_dst[2];
+ struct scatterlist *src, *dst;
+
+ src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
+ else
+ dst = src;
+
+ skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &subreq, false);
+ if (err)
+ return err;
+
+ kernel_fpu_begin();
+ err = crypt_fn(&ctx->crypt_ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ walk.nbytes, walk.iv);
+ kernel_fpu_end();
+ if (err)
+ return err;
+
+ err = skcipher_walk_done(&walk, 0);
+ }
+ return err;
+}
+
+#endif /* _AES_HELPER_GLUE_H */
@@ -28,6 +28,7 @@
#include <linux/linkage.h>
#include <asm/frame.h>
#include <asm/nospec-branch.h>
+#include "aes-helper_asm.S"
/*
* The following macros are used to move an (un)aligned 16 byte value to/from
@@ -1934,9 +1935,9 @@ SYM_FUNC_START(aesni_set_key)
SYM_FUNC_END(aesni_set_key)
/*
- * void aesni_enc(const void *ctx, u8 *dst, const u8 *src)
+ * void __aesni_enc(const void *ctx, u8 *dst, const u8 *src)
*/
-SYM_FUNC_START(aesni_enc)
+SYM_FUNC_START(__aesni_enc)
FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
@@ -1955,7 +1956,7 @@ SYM_FUNC_START(aesni_enc)
#endif
FRAME_END
RET
-SYM_FUNC_END(aesni_enc)
+SYM_FUNC_END(__aesni_enc)
/*
* _aesni_enc1: internal ABI
@@ -2123,9 +2124,9 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
SYM_FUNC_END(_aesni_enc4)
/*
- * void aesni_dec (const void *ctx, u8 *dst, const u8 *src)
+ * void __aesni_dec (const void *ctx, u8 *dst, const u8 *src)
*/
-SYM_FUNC_START(aesni_dec)
+SYM_FUNC_START(__aesni_dec)
FRAME_BEGIN
#ifndef __x86_64__
pushl KEYP
@@ -2145,7 +2146,7 @@ SYM_FUNC_START(aesni_dec)
#endif
FRAME_END
RET
-SYM_FUNC_END(aesni_dec)
+SYM_FUNC_END(__aesni_dec)
/*
* _aesni_dec1: internal ABI
@@ -2688,22 +2689,14 @@ SYM_FUNC_START(aesni_cts_cbc_dec)
RET
SYM_FUNC_END(aesni_cts_cbc_dec)
+#ifdef __x86_64__
+
.pushsection .rodata
.align 16
-.Lcts_permute_table:
- .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
- .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
- .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07
- .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f
- .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
- .byte 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80
-#ifdef __x86_64__
.Lbswap_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
-#endif
.popsection
-#ifdef __x86_64__
/*
* _aesni_inc_init: internal ABI
* setup registers used by _aesni_inc
@@ -2818,12 +2811,6 @@ SYM_FUNC_END(aesni_ctr_enc)
#endif
-.section .rodata.cst16.gf128mul_x_ble_mask, "aM", @progbits, 16
-.align 16
-.Lgf128mul_x_ble_mask:
- .octa 0x00000000000000010000000000000087
-.previous
-
/*
* _aesni_gf128mul_x_ble: internal ABI
* Multiply in GF(2^128) for XTS IVs
@@ -2843,10 +2830,10 @@ SYM_FUNC_END(aesni_ctr_enc)
pxor KEY, IV;
/*
- * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
- * const u8 *src, unsigned int len, le128 *iv)
+ * void __aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
+ * const u8 *src, unsigned int len, le128 *iv)
*/
-SYM_FUNC_START(aesni_xts_encrypt)
+SYM_FUNC_START(__aesni_xts_encrypt)
FRAME_BEGIN
#ifndef __x86_64__
pushl IVP
@@ -2995,13 +2982,13 @@ SYM_FUNC_START(aesni_xts_encrypt)
movups STATE, (OUTP)
jmp .Lxts_enc_ret
-SYM_FUNC_END(aesni_xts_encrypt)
+SYM_FUNC_END(__aesni_xts_encrypt)
/*
- * void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
- * const u8 *src, unsigned int len, le128 *iv)
+ * void __aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst,
+ * const u8 *src, unsigned int len, le128 *iv)
*/
-SYM_FUNC_START(aesni_xts_decrypt)
+SYM_FUNC_START(__aesni_xts_decrypt)
FRAME_BEGIN
#ifndef __x86_64__
pushl IVP
@@ -3157,4 +3144,4 @@ SYM_FUNC_START(aesni_xts_decrypt)
movups STATE, (OUTP)
jmp .Lxts_dec_ret
-SYM_FUNC_END(aesni_xts_decrypt)
+SYM_FUNC_END(__aesni_xts_decrypt)
@@ -36,33 +36,25 @@
#include <linux/spinlock.h>
#include <linux/static_call.h>
+#include "aes-helper_glue.h"
-#define AESNI_ALIGN 16
-#define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
-#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
#define RFC4106_HASH_SUBKEY_SIZE 16
-#define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
-#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
-#define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
+#define AES_BLOCK_MASK (~(AES_BLOCK_SIZE - 1))
+#define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AES_ALIGN_EXTRA)
/* This data is stored at the end of the crypto_tfm struct.
* It's a type of per "session" data storage location.
* This needs to be 16 byte aligned.
*/
struct aesni_rfc4106_gcm_ctx {
- u8 hash_subkey[16] AESNI_ALIGN_ATTR;
- struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
+ u8 hash_subkey[16] AES_ALIGN_ATTR;
+ struct crypto_aes_ctx aes_key_expanded AES_ALIGN_ATTR;
u8 nonce[4];
};
struct generic_gcmaes_ctx {
- u8 hash_subkey[16] AESNI_ALIGN_ATTR;
- struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
-};
-
-struct aesni_xts_ctx {
- struct crypto_aes_ctx tweak_ctx AESNI_ALIGN_ATTR;
- struct crypto_aes_ctx crypt_ctx AESNI_ALIGN_ATTR;
+ u8 hash_subkey[16] AES_ALIGN_ATTR;
+ struct crypto_aes_ctx aes_key_expanded AES_ALIGN_ATTR;
};
#define GCM_BLOCK_LEN 16
@@ -80,17 +72,10 @@ struct gcm_context_data {
u8 hash_keys[GCM_BLOCK_LEN * 16];
};
-static inline void *aes_align_addr(void *addr)
-{
- if (crypto_tfm_ctx_alignment() >= AESNI_ALIGN)
- return addr;
- return PTR_ALIGN(addr, AESNI_ALIGN);
-}
-
asmlinkage void aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
unsigned int key_len);
-asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
-asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
+asmlinkage void __aesni_enc(const void *ctx, u8 *out, const u8 *in);
+asmlinkage void __aesni_dec(const void *ctx, u8 *out, const u8 *in);
asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len);
asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
@@ -104,14 +89,20 @@ asmlinkage void aesni_cts_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
asmlinkage void aesni_cts_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
+static inline int aesni_enc(const void *ctx, u8 *out, const u8 *in)
+{
+ __aesni_enc(ctx, out, in);
+ return 0;
+}
+
#define AVX_GEN2_OPTSIZE 640
#define AVX_GEN4_OPTSIZE 4096
-asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void __aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
-asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
- const u8 *in, unsigned int len, u8 *iv);
+asmlinkage void __aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out,
+ const u8 *in, unsigned int len, u8 *iv);
#ifdef CONFIG_X86_64
@@ -223,11 +214,6 @@ static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
return aes_align_addr(raw_ctx);
}
-static inline struct aesni_xts_ctx *aes_xts_ctx(struct crypto_skcipher *tfm)
-{
- return aes_align_addr(crypto_skcipher_ctx(tfm));
-}
-
static int aes_set_key_common(struct crypto_aes_ctx *ctx,
const u8 *in_key, unsigned int key_len)
{
@@ -261,7 +247,7 @@ static void aesni_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
aes_encrypt(ctx, dst, src);
} else {
kernel_fpu_begin();
- aesni_enc(ctx, dst, src);
+ __aesni_enc(ctx, dst, src);
kernel_fpu_end();
}
}
@@ -274,11 +260,31 @@ static void aesni_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
aes_decrypt(ctx, dst, src);
} else {
kernel_fpu_begin();
- aesni_dec(ctx, dst, src);
+ __aesni_dec(ctx, dst, src);
kernel_fpu_end();
}
}
+static inline int aesni_xts_setkey(union x86_aes_ctx *ctx,
+ const u8 *in_key, unsigned int key_len)
+{
+ return aes_set_key_common(&ctx->aesni, in_key, key_len);
+}
+
+static inline int aesni_xts_encrypt(const union x86_aes_ctx *ctx, u8 *out, const u8 *in,
+ unsigned int len, u8 *iv)
+{
+ __aesni_xts_encrypt(&ctx->aesni, out, in, len, iv);
+ return 0;
+}
+
+static inline int aesni_xts_decrypt(const union x86_aes_ctx *ctx, u8 *out, const u8 *in,
+ unsigned int len, u8 *iv)
+{
+ __aesni_xts_decrypt(&ctx->aesni, out, in, len, iv);
+ return 0;
+}
+
static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
@@ -524,7 +530,7 @@ static int ctr_crypt(struct skcipher_request *req)
nbytes &= ~AES_BLOCK_MASK;
if (walk.nbytes == walk.total && nbytes > 0) {
- aesni_enc(ctx, keystream, walk.iv);
+ __aesni_enc(ctx, keystream, walk.iv);
crypto_xor_cpy(walk.dst.virt.addr + walk.nbytes - nbytes,
walk.src.virt.addr + walk.nbytes - nbytes,
keystream, nbytes);
@@ -668,8 +674,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
u8 *iv, void *aes_ctx, u8 *auth_tag,
unsigned long auth_tag_len)
{
- u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
- struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
+ u8 databuf[sizeof(struct gcm_context_data) + (AES_ALIGN - 8)] __aligned(8);
+ struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AES_ALIGN);
unsigned long left = req->cryptlen;
struct scatter_walk assoc_sg_walk;
struct skcipher_walk walk;
@@ -824,8 +830,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
- u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
- u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ u8 ivbuf[16 + (AES_ALIGN - 8)] __aligned(8);
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AES_ALIGN);
unsigned int i;
__be32 counter = cpu_to_be32(1);
@@ -852,8 +858,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
- u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
- u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ u8 ivbuf[16 + (AES_ALIGN - 8)] __aligned(8);
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AES_ALIGN);
unsigned int i;
if (unlikely(req->assoclen != 16 && req->assoclen != 20))
@@ -878,126 +884,17 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
- struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- /* first half of xts-key is for crypt */
- err = aes_set_key_common(&ctx->crypt_ctx, key, keylen);
- if (err)
- return err;
-
- /* second half of xts-key is for tweak */
- return aes_set_key_common(&ctx->tweak_ctx, key + keylen, keylen);
-}
-
-static int xts_crypt(struct skcipher_request *req, bool encrypt)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct aesni_xts_ctx *ctx = aes_xts_ctx(tfm);
- int tail = req->cryptlen % AES_BLOCK_SIZE;
- struct skcipher_request subreq;
- struct skcipher_walk walk;
- int err;
-
- if (req->cryptlen < AES_BLOCK_SIZE)
- return -EINVAL;
-
- err = skcipher_walk_virt(&walk, req, false);
- if (!walk.nbytes)
- return err;
-
- if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
- int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
-
- skcipher_walk_abort(&walk);
-
- skcipher_request_set_tfm(&subreq, tfm);
- skcipher_request_set_callback(&subreq,
- skcipher_request_flags(req),
- NULL, NULL);
- skcipher_request_set_crypt(&subreq, req->src, req->dst,
- blocks * AES_BLOCK_SIZE, req->iv);
- req = &subreq;
-
- err = skcipher_walk_virt(&walk, req, false);
- if (!walk.nbytes)
- return err;
- } else {
- tail = 0;
- }
-
- kernel_fpu_begin();
-
- /* calculate first value of T */
- aesni_enc(&ctx->tweak_ctx, walk.iv, walk.iv);
-
- while (walk.nbytes > 0) {
- int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes &= ~(AES_BLOCK_SIZE - 1);
-
- if (encrypt)
- aesni_xts_encrypt(&ctx->crypt_ctx,
- walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, walk.iv);
- else
- aesni_xts_decrypt(&ctx->crypt_ctx,
- walk.dst.virt.addr, walk.src.virt.addr,
- nbytes, walk.iv);
- kernel_fpu_end();
-
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
-
- if (walk.nbytes > 0)
- kernel_fpu_begin();
- }
-
- if (unlikely(tail > 0 && !err)) {
- struct scatterlist sg_src[2], sg_dst[2];
- struct scatterlist *src, *dst;
-
- dst = src = scatterwalk_ffwd(sg_src, req->src, req->cryptlen);
- if (req->dst != req->src)
- dst = scatterwalk_ffwd(sg_dst, req->dst, req->cryptlen);
-
- skcipher_request_set_crypt(req, src, dst, AES_BLOCK_SIZE + tail,
- req->iv);
-
- err = skcipher_walk_virt(&walk, &subreq, false);
- if (err)
- return err;
-
- kernel_fpu_begin();
- if (encrypt)
- aesni_xts_encrypt(&ctx->crypt_ctx,
- walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes, walk.iv);
- else
- aesni_xts_decrypt(&ctx->crypt_ctx,
- walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes, walk.iv);
- kernel_fpu_end();
-
- err = skcipher_walk_done(&walk, 0);
- }
- return err;
+ return xts_setkey_common(tfm, key, keylen, aesni_xts_setkey);
}
static int xts_encrypt(struct skcipher_request *req)
{
- return xts_crypt(req, true);
+ return xts_crypt_common(req, aesni_xts_encrypt, aesni_enc);
}
static int xts_decrypt(struct skcipher_request *req)
{
- return xts_crypt(req, false);
+ return xts_crypt_common(req, aesni_xts_decrypt, aesni_enc);
}
static struct crypto_alg aesni_cipher_alg = {
@@ -1152,8 +1049,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
- u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
- u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ u8 ivbuf[16 + (AES_ALIGN - 8)] __aligned(8);
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AES_ALIGN);
__be32 counter = cpu_to_be32(1);
memcpy(iv, req->iv, 12);
@@ -1169,8 +1066,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
void *aes_ctx = &(ctx->aes_key_expanded);
- u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
- u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ u8 ivbuf[16 + (AES_ALIGN - 8)] __aligned(8);
+ u8 *iv = PTR_ALIGN(&ivbuf[0], AES_ALIGN);
memcpy(iv, req->iv, 12);
*((__be32 *)(iv+12)) = counter;