@@ -191,7 +191,7 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io);
static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
/*
- * Use this to access cipher attributes that are the same for each CPU.
+ * Use this to access cipher attributes that are independent of the key.
*/
static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
{
@@ -310,10 +310,11 @@ static int crypt_iv_essiv_wipe(struct crypt_config *cc)
return err;
}
-/* Set up per cpu cipher state */
-static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
- struct dm_target *ti,
- u8 *salt, unsigned saltsize)
+/* Allocate the cipher for ESSIV */
+static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc,
+ struct dm_target *ti,
+ const u8 *salt,
+ unsigned int saltsize)
{
struct crypto_cipher *essiv_tfm;
int err;
@@ -393,8 +394,8 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
cc->iv_gen_private.essiv.salt = salt;
cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
- essiv_tfm = setup_essiv_cpu(cc, ti, salt,
- crypto_ahash_digestsize(hash_tfm));
+ essiv_tfm = alloc_essiv_cipher(cc, ti, salt,
+ crypto_ahash_digestsize(hash_tfm));
if (IS_ERR(essiv_tfm)) {
crypt_iv_essiv_dtr(cc);
return PTR_ERR(essiv_tfm);