diff mbox series

[2/10] crypto: aead - Count error stats differently

Message ID E1pSE2H-00BVkZ-8X@formenos.hmeau.com (mailing list archive)
State Superseded
Delegated to: Herbert Xu
Headers show
Series crypto: api - Restructure stats code | expand

Commit Message

Herbert Xu Feb. 15, 2023, 9:25 a.m. UTC
Move all stat code specific to aead into the aead code.

While we're at it, change the stats so that bytes and counts
are always incremented even in case of error.  This allows the
reference counting to be removed as we can now increment the
counters prior to the operation.

After the operation we simply increase the error count if necessary.
This is safe as errors can only occur synchronously (or rather,
the existing code already ignored asynchronous errors which are
only visible to the callback function).

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---

 crypto/aead.c             |   85 ++++++++++++++++++++++++++++++++++++++++------
 crypto/algapi.c           |   26 --------------
 crypto/crypto_user_stat.c |   21 -----------
 include/crypto/aead.h     |   22 +++++++++++
 include/linux/crypto.h    |   24 ------------
 5 files changed, 96 insertions(+), 82 deletions(-)

Comments

Eric Biggers Feb. 16, 2023, 5:35 a.m. UTC | #1
On Wed, Feb 15, 2023 at 05:25:09PM +0800, Herbert Xu wrote:
>  int crypto_aead_encrypt(struct aead_request *req)
>  {
>  	struct crypto_aead *aead = crypto_aead_reqtfm(req);
> -	struct crypto_alg *alg = aead->base.__crt_alg;
> +	struct aead_alg *alg = crypto_aead_alg(aead);
>  	unsigned int cryptlen = req->cryptlen;

The cryptlen local variable is no longer needed.  Just use req->cryptlen below.

> +	struct crypto_istat_aead *istat;
>  	int ret;
>  
> -	crypto_stats_get(alg);
> +	istat = aead_get_stat(alg);
> +
> +	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
> +		atomic64_inc(&istat->encrypt_cnt);
> +		atomic64_add(cryptlen, &istat->encrypt_tlen);
> +	}
> +

This could just check whether istat is NULL:

	istat = aead_get_stat(alg);
	if (istat) {
		atomic64_inc(&istat->encrypt_cnt);
		atomic64_add(req->cryptlen, &istat->encrypt_tlen);
	}

That's simpler, and it makes it clearer that the pointer is not dereferenced
when it is NULL.

Note that aead_get_stat() is an inline function, so the stats code will still be
optimized out when !CONFIG_CRYPTO_STATS.

> +static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
> +{
> +	if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
> +		return err;
> +
> +	if (err && err != -EINPROGRESS && err != -EBUSY)
> +		atomic64_inc(&istat->err_cnt);
> +
> +	return err;
> +}
> +
[...]
>  	if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
>  		ret = -ENOKEY;
>  	else
> -		ret = crypto_aead_alg(aead)->encrypt(req);
> -	crypto_stats_aead_encrypt(cryptlen, alg, ret);
> -	return ret;
> +		ret = alg->encrypt(req);
> +
> +	return crypto_aead_errstat(istat, ret);

Similarly, istat != NULL could be used instead of CONFIG_CRYPTO_STATS.

IMO, this would also be easier to read if the stats increment was just coded
directly, like it is above, without the crypto_aead_errstat() function:

	if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
		ret = -ENOKEY;
	else
		ret = alg->encrypt(req);

	if (istat && ret && ret != -EINPROGRESS && ret != -EBUSY)
		atomic64_inc(&istat->err_cnt);

	return ret;

Similarly for all the other algorithm types.

- Eric
Eric Biggers Feb. 16, 2023, 5:45 a.m. UTC | #2
A couple more comments:

On Wed, Feb 15, 2023 at 05:25:09PM +0800, Herbert Xu wrote:
> +static int crypto_aead_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
> +	__maybe_unused;
> +static int crypto_aead_report_stat(struct sk_buff *skb, struct crypto_alg *alg)

This could be just:

static int __maybe_unused
crypto_aead_report_stat(struct sk_buff *skb, struct crypto_alg *alg)

> +{
> +	struct aead_alg *aead = container_of(alg, struct aead_alg, base);
> +	struct crypto_istat_aead *istat = aead_get_stat(aead);
> +	struct crypto_stat_aead raead;
> +
> +	memset(&raead, 0, sizeof(raead));
> +
> +	strscpy(raead.type, "aead", sizeof(raead.type));
> +
> +	raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
> +	raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
> +	raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
> +	raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
> +	raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
> +
> +	return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
> +}

But actually it might be better to keep #ifdef-ing this whole function out when
!CONFIG_CRYPTO_STATS, since in that case it contains an unconditional null
pointer dereference.  Yes, it's not executed, but it might be confusing.

>  static int aead_prepare_alg(struct aead_alg *alg)
>  {
> +	struct crypto_istat_aead *istat = aead_get_stat(alg);
>  	struct crypto_alg *base = &alg->base;
>  
>  	if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
> @@ -232,6 +292,9 @@ static int aead_prepare_alg(struct aead_alg *alg)
>  	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
>  	base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
>  
> +	if (IS_ENABLED(CONFIG_CRYPTO_STATS))
> +		memset(istat, 0, sizeof(*istat));
> +

Above is another place that can just do 'if (istat)'.

- Eric
Herbert Xu Feb. 16, 2023, 5:58 a.m. UTC | #3
On Wed, Feb 15, 2023 at 09:35:21PM -0800, Eric Biggers wrote:
> On Wed, Feb 15, 2023 at 05:25:09PM +0800, Herbert Xu wrote:
> >  int crypto_aead_encrypt(struct aead_request *req)
> >  {
> >  	struct crypto_aead *aead = crypto_aead_reqtfm(req);
> > -	struct crypto_alg *alg = aead->base.__crt_alg;
> > +	struct aead_alg *alg = crypto_aead_alg(aead);
> >  	unsigned int cryptlen = req->cryptlen;
> 
> The cryptlen local variable is no longer needed.  Just use req->cryptlen below.

Thanks, I'll remove it.

> This could just check whether istat is NULL:

Yes but that would introduce an unnecessary branch when STATS
are enabled.  I agree that it makes no difference if you don't
enable STATS though.

Cheers,
Herbert Xu Feb. 16, 2023, 6 a.m. UTC | #4
On Wed, Feb 15, 2023 at 09:45:13PM -0800, Eric Biggers wrote:
>
> This could be just:
> 
> static int __maybe_unused
> crypto_aead_report_stat(struct sk_buff *skb, struct crypto_alg *alg)

Good point.  I'll convert them over.

> But actually it might be better to keep #ifdef-ing this whole function out when
> !CONFIG_CRYPTO_STATS, since in that case it contains an unconditional null
> pointer dereference.  Yes, it's not executed, but it might be confusing.

But then I'd need to compile this twice for testing so I'd rather
keep it this way.

> > +	if (IS_ENABLED(CONFIG_CRYPTO_STATS))
> > +		memset(istat, 0, sizeof(*istat));
> > +
> 
> Above is another place that can just do 'if (istat)'.

As I mentioned before, this would create an unnecessary branch
when STATS are enabled (which would presumably be the common case).

Thanks,
Eric Biggers Feb. 16, 2023, 6:31 a.m. UTC | #5
On Thu, Feb 16, 2023 at 02:00:20PM +0800, Herbert Xu wrote:
> > > +	if (IS_ENABLED(CONFIG_CRYPTO_STATS))
> > > +		memset(istat, 0, sizeof(*istat));
> > > +
> > 
> > Above is another place that can just do 'if (istat)'.
> 
> As I mentioned before, this would create an unnecessary branch
> when STATS are enabled (which would presumably be the common case).
> 

I was hoping the compiler would know the pointer is non-NULL, since it's created
through an expression like &foo->bar where bar is at nonzero offset, and foo is
also dereferenced.  Unfortunately it does seem that's not the case, though,
probably because of some of the compiler flags the kernel is compiled with
(-fno-strict-aliasing and -fno-delete-null-pointer-checks, maybe?).

Anyway, if CONFIG_CRYPTO_STATS=y is the common case, that's unfortunate.  Surely
hardly anyone actually uses the feature, and all this stats collection for every
crypto operation is for nothing?

Here's a thread where someone claimed that disabling CONFIG_CRYPTO_STATS
significantly improves performance:
https://lists.ceph.io/hyperkitty/list/ceph-users@ceph.io/thread/44GMO5UGOXDZKFSOQMCPPHYTREUEA3ZI/

IMO this feature should never have been accepted.  But could we at least put the
stats collection behind a static branch that defaults to off?  If someone really
wants to collect stats, they can set a sysctl that turns on the static branch.

- Eric
Herbert Xu Feb. 16, 2023, 8:34 a.m. UTC | #6
On Wed, Feb 15, 2023 at 10:31:27PM -0800, Eric Biggers wrote:
 
> I was hoping the compiler would know the pointer is non-NULL, since it's created
> through an expression like &foo->bar where bar is at nonzero offset, and foo is
> also dereferenced.  Unfortunately it does seem that's not the case, though,
> probably because of some of the compiler flags the kernel is compiled with
> (-fno-strict-aliasing and -fno-delete-null-pointer-checks, maybe?).

I'd be worried if the compiler optimised it away :)

Just because p is not NULL, it does not follow that (p + X) where
X is a small integer is also not NULL.  Sure it happens to be
true in kernel space but that's something that we'd have to explicitly
tell the compiler and I don't think there is any way for us to
communicate that through.

> Anyway, if CONFIG_CRYPTO_STATS=y is the common case, that's unfortunate.  Surely

BTW that was just a completely wild guess on my part based on how
distros operate.  I just had a quick look and it seems that Debian
at least disables this option but Fedora leaves it on.

> hardly anyone actually uses the feature, and all this stats collection for every
> crypto operation is for nothing?

I know.

> Here's a thread where someone claimed that disabling CONFIG_CRYPTO_STATS
> significantly improves performance:
> https://lists.ceph.io/hyperkitty/list/ceph-users@ceph.io/thread/44GMO5UGOXDZKFSOQMCPPHYTREUEA3ZI/

Not surprising as it's doing atomic ops on contended memory.  At
least this patch series kills two of those atomic ops.

> IMO this feature should never have been accepted.  But could we at least put the

You're more than welcome to nack such patches in future :)

> stats collection behind a static branch that defaults to off?  If someone really
> wants to collect stats, they can set a sysctl that turns on the static branch.

Yes I would certainly be open to such a patch.  Another avenue to
explore is turning the atomic ops into percpu/local ones similar
to what networking does to its counters.

Thanks,
diff mbox series

Patch

diff --git a/crypto/aead.c b/crypto/aead.c
index 16991095270d..a36c3417ff6c 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -8,17 +8,27 @@ 
  */
 
 #include <crypto/internal/aead.h>
+#include <linux/cryptouser.h>
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
-#include <linux/cryptouser.h>
+#include <linux/string.h>
 #include <net/netlink.h>
 
 #include "internal.h"
 
+static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+	return &alg->stat;
+#else
+	return NULL;
+#endif
+}
+
 static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
 			    unsigned int keylen)
 {
@@ -80,39 +90,64 @@  int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
 }
 EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
 
+static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
+{
+	if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
+		return err;
+
+	if (err && err != -EINPROGRESS && err != -EBUSY)
+		atomic64_inc(&istat->err_cnt);
+
+	return err;
+}
+
 int crypto_aead_encrypt(struct aead_request *req)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct crypto_alg *alg = aead->base.__crt_alg;
+	struct aead_alg *alg = crypto_aead_alg(aead);
 	unsigned int cryptlen = req->cryptlen;
+	struct crypto_istat_aead *istat;
 	int ret;
 
-	crypto_stats_get(alg);
+	istat = aead_get_stat(alg);
+
+	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+		atomic64_inc(&istat->encrypt_cnt);
+		atomic64_add(cryptlen, &istat->encrypt_tlen);
+	}
+
 	if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
 		ret = -ENOKEY;
 	else
-		ret = crypto_aead_alg(aead)->encrypt(req);
-	crypto_stats_aead_encrypt(cryptlen, alg, ret);
-	return ret;
+		ret = alg->encrypt(req);
+
+	return crypto_aead_errstat(istat, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
 
 int crypto_aead_decrypt(struct aead_request *req)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct crypto_alg *alg = aead->base.__crt_alg;
+	struct aead_alg *alg = crypto_aead_alg(aead);
 	unsigned int cryptlen = req->cryptlen;
+	struct crypto_istat_aead *istat;
 	int ret;
 
-	crypto_stats_get(alg);
+	istat = aead_get_stat(alg);
+
+	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
+		atomic64_inc(&istat->encrypt_cnt);
+		atomic64_add(cryptlen, &istat->encrypt_tlen);
+	}
+
 	if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
 		ret = -ENOKEY;
 	else if (req->cryptlen < crypto_aead_authsize(aead))
 		ret = -EINVAL;
 	else
-		ret = crypto_aead_alg(aead)->decrypt(req);
-	crypto_stats_aead_decrypt(cryptlen, alg, ret);
-	return ret;
+		ret = alg->decrypt(req);
+
+	return crypto_aead_errstat(istat, ret);
 }
 EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
 
@@ -188,6 +223,27 @@  static void crypto_aead_free_instance(struct crypto_instance *inst)
 	aead->free(aead);
 }
 
+static int crypto_aead_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
+	__maybe_unused;
+static int crypto_aead_report_stat(struct sk_buff *skb, struct crypto_alg *alg)
+{
+	struct aead_alg *aead = container_of(alg, struct aead_alg, base);
+	struct crypto_istat_aead *istat = aead_get_stat(aead);
+	struct crypto_stat_aead raead;
+
+	memset(&raead, 0, sizeof(raead));
+
+	strscpy(raead.type, "aead", sizeof(raead.type));
+
+	raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
+	raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
+	raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
+	raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
+	raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
+
+	return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
+}
+
 static const struct crypto_type crypto_aead_type = {
 	.extsize = crypto_alg_extsize,
 	.init_tfm = crypto_aead_init_tfm,
@@ -196,6 +252,9 @@  static const struct crypto_type crypto_aead_type = {
 	.show = crypto_aead_show,
 #endif
 	.report = crypto_aead_report,
+#ifdef CONFIG_CRYPTO_STATS
+	.report_stat = crypto_aead_report_stat,
+#endif
 	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
 	.maskset = CRYPTO_ALG_TYPE_MASK,
 	.type = CRYPTO_ALG_TYPE_AEAD,
@@ -219,6 +278,7 @@  EXPORT_SYMBOL_GPL(crypto_alloc_aead);
 
 static int aead_prepare_alg(struct aead_alg *alg)
 {
+	struct crypto_istat_aead *istat = aead_get_stat(alg);
 	struct crypto_alg *base = &alg->base;
 
 	if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
@@ -232,6 +292,9 @@  static int aead_prepare_alg(struct aead_alg *alg)
 	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
 	base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
 
+	if (IS_ENABLED(CONFIG_CRYPTO_STATS))
+		memset(istat, 0, sizeof(*istat));
+
 	return 0;
 }
 
diff --git a/crypto/algapi.c b/crypto/algapi.c
index d08f864f08be..f7f7c61d456a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -1051,32 +1051,6 @@  void crypto_stats_get(struct crypto_alg *alg)
 }
 EXPORT_SYMBOL_GPL(crypto_stats_get);
 
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
-			       int ret)
-{
-	if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-		atomic64_inc(&alg->stats.aead.err_cnt);
-	} else {
-		atomic64_inc(&alg->stats.aead.encrypt_cnt);
-		atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen);
-	}
-	crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt);
-
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg,
-			       int ret)
-{
-	if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
-		atomic64_inc(&alg->stats.aead.err_cnt);
-	} else {
-		atomic64_inc(&alg->stats.aead.decrypt_cnt);
-		atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen);
-	}
-	crypto_alg_put(alg);
-}
-EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt);
-
 void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret,
 				   struct crypto_alg *alg)
 {
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
index 2369814029fa..50ec076507a1 100644
--- a/crypto/crypto_user_stat.c
+++ b/crypto/crypto_user_stat.c
@@ -28,23 +28,6 @@  struct crypto_dump_info {
 	u16 nlmsg_flags;
 };
 
-static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
-{
-	struct crypto_stat_aead raead;
-
-	memset(&raead, 0, sizeof(raead));
-
-	strscpy(raead.type, "aead", sizeof(raead.type));
-
-	raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt);
-	raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen);
-	raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt);
-	raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen);
-	raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt);
-
-	return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
-}
-
 static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
 {
 	struct crypto_stat_cipher rcipher;
@@ -211,10 +194,6 @@  static int crypto_reportstat_one(struct crypto_alg *alg,
 	}
 
 	switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
-	case CRYPTO_ALG_TYPE_AEAD:
-		if (crypto_report_aead(skb, alg))
-			goto nla_put_failure;
-		break;
 	case CRYPTO_ALG_TYPE_SKCIPHER:
 		if (crypto_report_cipher(skb, alg))
 			goto nla_put_failure;
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 4a2b7e6e0c1f..35e45b854a6f 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -8,6 +8,7 @@ 
 #ifndef _CRYPTO_AEAD_H
 #define _CRYPTO_AEAD_H
 
+#include <linux/atomic.h>
 #include <linux/container_of.h>
 #include <linux/crypto.h>
 #include <linux/slab.h>
@@ -100,6 +101,22 @@  struct aead_request {
 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
+/*
+ * struct crypto_istat_aead - statistics for AEAD algorithm
+ * @encrypt_cnt:	number of encrypt requests
+ * @encrypt_tlen:	total data size handled by encrypt requests
+ * @decrypt_cnt:	number of decrypt requests
+ * @decrypt_tlen:	total data size handled by decrypt requests
+ * @err_cnt:		number of error for AEAD requests
+ */
+struct crypto_istat_aead {
+	atomic64_t encrypt_cnt;
+	atomic64_t encrypt_tlen;
+	atomic64_t decrypt_cnt;
+	atomic64_t decrypt_tlen;
+	atomic64_t err_cnt;
+};
+
 /**
  * struct aead_alg - AEAD cipher definition
  * @maxauthsize: Set the maximum authentication tag size supported by the
@@ -118,6 +135,7 @@  struct aead_request {
  * @setkey: see struct skcipher_alg
  * @encrypt: see struct skcipher_alg
  * @decrypt: see struct skcipher_alg
+ * @stat: statistics for AEAD algorithm
  * @ivsize: see struct skcipher_alg
  * @chunksize: see struct skcipher_alg
  * @init: Initialize the cryptographic transformation object. This function
@@ -144,6 +162,10 @@  struct aead_alg {
 	int (*init)(struct crypto_aead *tfm);
 	void (*exit)(struct crypto_aead *tfm);
 
+#ifdef CONFIG_CRYPTO_STATS
+	struct crypto_istat_aead stat;
+#endif
+
 	unsigned int ivsize;
 	unsigned int maxauthsize;
 	unsigned int chunksize;
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index bb1d9b0e1647..9eb6fc8ab69c 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -276,22 +276,6 @@  struct compress_alg {
 };
 
 #ifdef CONFIG_CRYPTO_STATS
-/*
- * struct crypto_istat_aead - statistics for AEAD algorithm
- * @encrypt_cnt:	number of encrypt requests
- * @encrypt_tlen:	total data size handled by encrypt requests
- * @decrypt_cnt:	number of decrypt requests
- * @decrypt_tlen:	total data size handled by decrypt requests
- * @err_cnt:		number of error for AEAD requests
- */
-struct crypto_istat_aead {
-	atomic64_t encrypt_cnt;
-	atomic64_t encrypt_tlen;
-	atomic64_t decrypt_cnt;
-	atomic64_t decrypt_tlen;
-	atomic64_t err_cnt;
-};
-
 /*
  * struct crypto_istat_akcipher - statistics for akcipher algorithm
  * @encrypt_cnt:	number of encrypt requests
@@ -463,7 +447,6 @@  struct crypto_istat_rng {
  * @cra_destroy: internally used
  *
  * @stats: union of all possible crypto_istat_xxx structures
- * @stats.aead:		statistics for AEAD algorithm
  * @stats.akcipher:	statistics for akcipher algorithm
  * @stats.cipher:	statistics for cipher algorithm
  * @stats.compress:	statistics for compress algorithm
@@ -505,7 +488,6 @@  struct crypto_alg {
 
 #ifdef CONFIG_CRYPTO_STATS
 	union {
-		struct crypto_istat_aead aead;
 		struct crypto_istat_akcipher akcipher;
 		struct crypto_istat_cipher cipher;
 		struct crypto_istat_compress compress;
@@ -520,8 +502,6 @@  struct crypto_alg {
 #ifdef CONFIG_CRYPTO_STATS
 void crypto_stats_init(struct crypto_alg *alg);
 void crypto_stats_get(struct crypto_alg *alg);
-void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
-void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
 void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
 void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
 void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
@@ -542,10 +522,6 @@  static inline void crypto_stats_init(struct crypto_alg *alg)
 {}
 static inline void crypto_stats_get(struct crypto_alg *alg)
 {}
-static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
-static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
-{}
 static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
 {}
 static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)