@@ -364,20 +364,32 @@ static int crypto_ahash_op(struct ahash_request *req,
int crypto_ahash_final(struct ahash_request *req)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
- crypto_stat_ahash_final(req, ret);
+ crypto_stat_ahash_final(nbytes, ret, alg);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
- crypto_stat_ahash_final(req, ret);
+ crypto_stat_ahash_final(nbytes, ret, alg);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
@@ -385,13 +397,18 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_ahash_op(req, tfm->digest);
- crypto_stat_ahash_final(req, ret);
+ crypto_stat_ahash_final(nbytes, ret, alg);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -35,9 +35,13 @@ static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
+ struct crypto_alg *alg = tfm->base.__crt_alg;
u8 *buf = NULL;
int err;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
if (!buf)
@@ -50,7 +54,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
}
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
- crypto_stat_rng_seed(tfm, err);
+ crypto_stat_rng_seed(alg, err);
out:
kzfree(buf);
return err;
@@ -234,31 +234,31 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
-static inline void crypto_stat_compress(struct acomp_req *req, int ret)
+static inline void crypto_stat_compress(unsigned int slen, int ret,
+ struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&tfm->base.__crt_alg->compress_err_cnt);
+ atomic64_inc(&alg->compress_err_cnt);
} else {
- atomic64_inc(&tfm->base.__crt_alg->compress_cnt);
- atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen);
+ atomic64_inc(&alg->compress_cnt);
+ atomic64_add(slen, &alg->compress_tlen);
}
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
+static inline void crypto_stat_decompress(unsigned int slen, int ret,
+ struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&tfm->base.__crt_alg->compress_err_cnt);
+ atomic64_inc(&alg->compress_err_cnt);
} else {
- atomic64_inc(&tfm->base.__crt_alg->decompress_cnt);
- atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen);
+ atomic64_inc(&alg->decompress_cnt);
+ atomic64_add(slen, &alg->decompress_tlen);
}
+ crypto_alg_put(alg);
#endif
}
@@ -274,10 +274,15 @@ static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
static inline int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int slen = req->slen;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
ret = tfm->compress(req);
- crypto_stat_compress(req, ret);
+ crypto_stat_compress(slen, ret, alg);
return ret;
}
@@ -293,10 +298,15 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int slen = req->slen;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
ret = tfm->decompress(req);
- crypto_stat_decompress(req, ret);
+ crypto_stat_decompress(slen, ret, alg);
return ret;
}
@@ -306,31 +306,31 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
return __crypto_aead_cast(req->base.tfm);
}
-static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret)
+static inline void crypto_stat_aead_encrypt(unsigned int cryptlen,
+ struct crypto_alg *alg, int ret)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&tfm->base.__crt_alg->aead_err_cnt);
+ atomic64_inc(&alg->aead_err_cnt);
} else {
- atomic64_inc(&tfm->base.__crt_alg->encrypt_cnt);
- atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen);
+ atomic64_inc(&alg->encrypt_cnt);
+ atomic64_add(cryptlen, &alg->encrypt_tlen);
}
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
+static inline void crypto_stat_aead_decrypt(unsigned int cryptlen,
+ struct crypto_alg *alg, int ret)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&tfm->base.__crt_alg->aead_err_cnt);
+ atomic64_inc(&alg->aead_err_cnt);
} else {
- atomic64_inc(&tfm->base.__crt_alg->decrypt_cnt);
- atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen);
+ atomic64_inc(&alg->decrypt_cnt);
+ atomic64_add(cryptlen, &alg->decrypt_tlen);
}
+ crypto_alg_put(alg);
#endif
}
@@ -356,13 +356,18 @@ static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
static inline int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_alg *alg = aead->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_aead_alg(aead)->encrypt(req);
- crypto_stat_aead_encrypt(req, ret);
+ crypto_stat_aead_encrypt(cryptlen, alg, ret);
return ret;
}
@@ -391,15 +396,20 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_alg *alg = aead->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else if (req->cryptlen < crypto_aead_authsize(aead))
ret = -EINVAL;
else
ret = crypto_aead_alg(aead)->decrypt(req);
- crypto_stat_aead_decrypt(req, ret);
+ crypto_stat_aead_decrypt(cryptlen, alg, ret);
return ret;
}
@@ -271,59 +271,53 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
-static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req,
- int ret)
+static inline void crypto_stat_akcipher_encrypt(unsigned int src_len, int ret,
+ struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ atomic64_inc(&alg->akcipher_err_cnt);
} else {
- atomic64_inc(&tfm->base.__crt_alg->encrypt_cnt);
- atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen);
+ atomic64_inc(&alg->encrypt_cnt);
+ atomic64_add(src_len, &alg->encrypt_tlen);
}
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req,
- int ret)
+static inline void crypto_stat_akcipher_decrypt(unsigned int src_len, int ret,
+ struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ atomic64_inc(&alg->akcipher_err_cnt);
} else {
- atomic64_inc(&tfm->base.__crt_alg->decrypt_cnt);
- atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen);
+ atomic64_inc(&alg->decrypt_cnt);
+ atomic64_add(src_len, &alg->decrypt_tlen);
}
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_akcipher_sign(struct akcipher_request *req,
- int ret)
+static inline void crypto_stat_akcipher_sign(int ret, struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ atomic64_inc(&alg->akcipher_err_cnt);
else
- atomic64_inc(&tfm->base.__crt_alg->sign_cnt);
+ atomic64_inc(&alg->sign_cnt);
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_akcipher_verify(struct akcipher_request *req,
- int ret)
+static inline void crypto_stat_akcipher_verify(int ret, struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ atomic64_inc(&alg->akcipher_err_cnt);
else
- atomic64_inc(&tfm->base.__crt_alg->verify_cnt);
+ atomic64_inc(&alg->verify_cnt);
+ crypto_alg_put(alg);
#endif
}
@@ -341,10 +335,15 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ unsigned int src_len = req->src_len;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(calg);
+#endif
ret = alg->encrypt(req);
- crypto_stat_akcipher_encrypt(req, ret);
+ crypto_stat_akcipher_encrypt(src_len, ret, calg);
return ret;
}
@@ -362,10 +361,15 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ unsigned int src_len = req->src_len;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(calg);
+#endif
ret = alg->decrypt(req);
- crypto_stat_akcipher_decrypt(req, ret);
+ crypto_stat_akcipher_decrypt(src_len, ret, calg);
return ret;
}
@@ -383,10 +387,14 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(calg);
+#endif
ret = alg->sign(req);
- crypto_stat_akcipher_sign(req, ret);
+ crypto_stat_akcipher_sign(ret, calg);
return ret;
}
@@ -404,10 +412,14 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(calg);
+#endif
ret = alg->verify(req);
- crypto_stat_akcipher_verify(req, ret);
+ crypto_stat_akcipher_verify(ret, calg);
return ret;
}
@@ -412,29 +412,29 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
-static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret)
+static inline void crypto_stat_ahash_update(unsigned int nbytes, int ret,
+ struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&tfm->base.__crt_alg->hash_err_cnt);
+ atomic64_inc(&alg->hash_err_cnt);
else
- atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
+ atomic64_add(nbytes, &alg->hash_tlen);
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret)
+static inline void crypto_stat_ahash_final(unsigned int nbytes, int ret,
+ struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&tfm->base.__crt_alg->hash_err_cnt);
+ atomic64_inc(&alg->hash_err_cnt);
} else {
- atomic64_inc(&tfm->base.__crt_alg->hash_cnt);
- atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
+ atomic64_inc(&alg->hash_cnt);
+ atomic64_add(nbytes, &alg->hash_tlen);
}
+ crypto_alg_put(alg);
#endif
}
@@ -552,10 +552,16 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*/
static inline int crypto_ahash_update(struct ahash_request *req)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
ret = crypto_ahash_reqtfm(req)->update(req);
- crypto_stat_ahash_update(req, ret);
+ crypto_stat_ahash_update(nbytes, ret, alg);
return ret;
}
@@ -268,39 +268,38 @@ struct kpp_secret {
unsigned short len;
};
-static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret)
+static inline void crypto_stat_kpp_set_secret(struct crypto_alg *alg, int ret)
{
#ifdef CONFIG_CRYPTO_STATS
if (ret)
- atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ atomic64_inc(&alg->kpp_err_cnt);
else
- atomic64_inc(&tfm->base.__crt_alg->setsecret_cnt);
+ atomic64_inc(&alg->setsecret_cnt);
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req,
+static inline void crypto_stat_kpp_generate_public_key(struct crypto_alg *alg,
int ret)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-
if (ret)
- atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ atomic64_inc(&alg->kpp_err_cnt);
else
- atomic64_inc(&tfm->base.__crt_alg->generate_public_key_cnt);
+ atomic64_inc(&alg->generate_public_key_cnt);
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req,
+static inline void crypto_stat_kpp_compute_shared_secret(struct crypto_alg *alg,
int ret)
{
#ifdef CONFIG_CRYPTO_STATS
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-
if (ret)
- atomic64_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ atomic64_inc(&alg->kpp_err_cnt);
else
- atomic64_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt);
+ atomic64_inc(&alg->compute_shared_secret_cnt);
+ crypto_alg_put(alg);
#endif
}
@@ -323,10 +322,14 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(calg);
+#endif
ret = alg->set_secret(tfm, buffer, len);
- crypto_stat_kpp_set_secret(tfm, ret);
+ crypto_stat_kpp_set_secret(calg, ret);
return ret;
}
@@ -347,10 +350,14 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(calg);
+#endif
ret = alg->generate_public_key(req);
- crypto_stat_kpp_generate_public_key(req, ret);
+ crypto_stat_kpp_generate_public_key(calg, ret);
return ret;
}
@@ -368,10 +375,14 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(calg);
+#endif
ret = alg->compute_shared_secret(req);
- crypto_stat_kpp_compute_shared_secret(req, ret);
+ crypto_stat_kpp_compute_shared_secret(calg, ret);
return ret;
}
@@ -122,26 +122,28 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
-static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret)
+static inline void crypto_stat_rng_seed(struct crypto_alg *alg, int ret)
{
#ifdef CONFIG_CRYPTO_STATS
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&tfm->base.__crt_alg->rng_err_cnt);
+ atomic64_inc(&alg->rng_err_cnt);
else
- atomic64_inc(&tfm->base.__crt_alg->seed_cnt);
+ atomic64_inc(&alg->seed_cnt);
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_rng_generate(struct crypto_rng *tfm,
+static inline void crypto_stat_rng_generate(struct crypto_alg *alg,
unsigned int dlen, int ret)
{
#ifdef CONFIG_CRYPTO_STATS
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&tfm->base.__crt_alg->rng_err_cnt);
+ atomic64_inc(&alg->rng_err_cnt);
} else {
- atomic64_inc(&tfm->base.__crt_alg->generate_cnt);
- atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen);
+ atomic64_inc(&alg->generate_cnt);
+ atomic64_add(dlen, &alg->generate_tlen);
}
+ crypto_alg_put(alg);
#endif
}
@@ -163,10 +165,14 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
+ struct crypto_alg *alg = tfm->base.__crt_alg;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
- crypto_stat_rng_generate(tfm, dlen, ret);
+ crypto_stat_rng_generate(alg, dlen, ret);
return ret;
}
@@ -486,7 +486,7 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
return container_of(tfm, struct crypto_sync_skcipher, base);
}
-static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req,
+static inline void crypto_stat_skcipher_encrypt(unsigned int cryptlen,
int ret, struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
@@ -494,12 +494,13 @@ static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req,
atomic64_inc(&alg->cipher_err_cnt);
} else {
atomic64_inc(&alg->encrypt_cnt);
- atomic64_add(req->cryptlen, &alg->encrypt_tlen);
+ atomic64_add(cryptlen, &alg->encrypt_tlen);
}
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
+static inline void crypto_stat_skcipher_decrypt(unsigned int cryptlen,
int ret, struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
@@ -507,8 +508,9 @@ static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
atomic64_inc(&alg->cipher_err_cnt);
} else {
atomic64_inc(&alg->decrypt_cnt);
- atomic64_add(req->cryptlen, &alg->decrypt_tlen);
+ atomic64_add(cryptlen, &alg->decrypt_tlen);
}
+ crypto_alg_put(alg);
#endif
}
@@ -526,13 +528,18 @@ static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = tfm->encrypt(req);
- crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg);
+ crypto_stat_skcipher_encrypt(cryptlen, ret, alg);
return ret;
}
@@ -550,13 +557,18 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = tfm->decrypt(req);
- crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg);
+ crypto_stat_skcipher_decrypt(cryptlen, ret, alg);
return ret;
}
@@ -987,35 +987,31 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
return __crypto_ablkcipher_cast(req->base.tfm);
}
-static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
- int ret)
+static inline void crypto_stat_ablkcipher_encrypt(unsigned int nbytes, int ret,
+ struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
+ atomic64_inc(&alg->cipher_err_cnt);
} else {
- atomic64_inc(&crt->base->base.__crt_alg->encrypt_cnt);
- atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
+ atomic64_inc(&alg->encrypt_cnt);
+ atomic64_add(nbytes, &alg->encrypt_tlen);
}
+ crypto_alg_put(alg);
#endif
}
-static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
- int ret)
+static inline void crypto_stat_ablkcipher_decrypt(unsigned int nbytes, int ret,
+ struct crypto_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
-
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
+ atomic64_inc(&alg->cipher_err_cnt);
} else {
- atomic64_inc(&crt->base->base.__crt_alg->decrypt_cnt);
- atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
+ atomic64_inc(&alg->decrypt_cnt);
+ atomic64_add(nbytes, &alg->decrypt_tlen);
}
+ crypto_alg_put(alg);
#endif
}
@@ -1034,10 +1030,15 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+ struct crypto_alg *alg = crt->base->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
ret = crt->encrypt(req);
- crypto_stat_ablkcipher_encrypt(req, ret);
+ crypto_stat_ablkcipher_encrypt(nbytes, ret, alg);
return ret;
}
@@ -1056,10 +1057,15 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+ struct crypto_alg *alg = crt->base->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+#ifdef CONFIG_CRYPTO_STATS
+ crypto_alg_get(alg);
+#endif
ret = crt->decrypt(req);
- crypto_stat_ablkcipher_decrypt(req, ret);
+ crypto_stat_ablkcipher_decrypt(nbytes, ret, alg);
return ret;
}
All crypto_stats functions use the struct xxx_request for feeding stats, but in some case this structure could already be freed. For fixing this, the needed parameters (len and alg) will be stored before the request being executed. Fixes: cac5818c25d0 ("crypto: user - Implement a generic crypto statistics") Reported-by: syzbot <syzbot+6939a606a5305e9e9799@syzkaller.appspotmail.com> Signed-off-by: Corentin Labbe <clabbe@baylibre.com> --- crypto/ahash.c | 23 ++++++++++-- crypto/rng.c | 6 +++- include/crypto/acompress.h | 38 ++++++++++++-------- include/crypto/aead.h | 38 ++++++++++++-------- include/crypto/akcipher.h | 72 ++++++++++++++++++++++---------------- include/crypto/hash.h | 30 +++++++++------- include/crypto/kpp.h | 43 ++++++++++++++--------- include/crypto/rng.h | 22 +++++++----- include/crypto/skcipher.h | 24 +++++++++---- include/linux/crypto.h | 42 ++++++++++++---------- 10 files changed, 216 insertions(+), 122 deletions(-)