diff mbox series

[v2] evm: Fix a small race in init_desc()

Message ID 20200512174706.GA298379@mwanda (mailing list archive)
State New, archived
Headers show
Series [v2] evm: Fix a small race in init_desc() | expand

Commit Message

Dan Carpenter May 12, 2020, 5:47 p.m. UTC
This patch avoids a kernel panic due to accessing an error pointer set by
crypto_alloc_shash(). It occurs especially when there are many files that
require an unsupported algorithm, as it would increase the likelihood of
the following race condition.

Imagine we have two threads and in the first thread crypto_alloc_shash()
fails and returns an error pointer.

		*tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
		if (IS_ERR(*tfm)) {
			rc = PTR_ERR(*tfm); <--- FIRST THREAD HERE!
			pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
			*tfm = NULL;

And the second thread is here:

	if (*tfm == NULL) {  <--- SECOND THREAD HERE!
		mutex_lock(&mutex);
		if (*tfm)
			goto out;

Since "*tfm" is non-NULL, we assume that it is valid and that leads to
a crash when it dereferences "*tfm".

	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
                                                             ^^^^

This patch fixes the problem by introducing a temporary "tmp_tfm" and
only setting "*tfm" at the very end after everything has succeeded.  The
other change is that I reversed the initial "if (!*tfm) {" condition and
pull the code in one indent level.

Cc: stable@vger.kernel.org
Fixes: d46eb3699502b ("evm: crypto hash replaced by shash")
Reported-by: Roberto Sassu <roberto.sassu@huawei.com>
Reported-by: Krzysztof Struczynski <krzysztof.struczynski@huawei.com>
Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
---
v2: I folded mine patch together with Roberto's

 security/integrity/evm/evm_crypto.c | 44 ++++++++++++++---------------
 1 file changed, 22 insertions(+), 22 deletions(-)

Comments

Roberto Sassu May 14, 2020, 6:47 a.m. UTC | #1
> From: Dan Carpenter [mailto:dan.carpenter@oracle.com]
> Sent: Tuesday, May 12, 2020 7:47 PM
> This patch avoids a kernel panic due to accessing an error pointer set by
> crypto_alloc_shash(). It occurs especially when there are many files that
> require an unsupported algorithm, as it would increase the likelihood of
> the following race condition.
> 
> Imagine we have two threads and in the first thread crypto_alloc_shash()
> fails and returns an error pointer.
> 
> 		*tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
> 		if (IS_ERR(*tfm)) {
> 			rc = PTR_ERR(*tfm); <--- FIRST THREAD HERE!
> 			pr_err("Can not allocate %s (reason: %ld)\n", algo,
> rc);
> 			*tfm = NULL;
> 
> And the second thread is here:
> 
> 	if (*tfm == NULL) {  <--- SECOND THREAD HERE!
> 		mutex_lock(&mutex);
> 		if (*tfm)
> 			goto out;
> 
> Since "*tfm" is non-NULL, we assume that it is valid and that leads to
> a crash when it dereferences "*tfm".
> 
> 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
>                                                              ^^^^
> 
> This patch fixes the problem by introducing a temporary "tmp_tfm" and
> only setting "*tfm" at the very end after everything has succeeded.  The
> other change is that I reversed the initial "if (!*tfm) {" condition and
> pull the code in one indent level.
> 
> Cc: stable@vger.kernel.org
> Fixes: d46eb3699502b ("evm: crypto hash replaced by shash")
> Reported-by: Roberto Sassu <roberto.sassu@huawei.com>
> Reported-by: Krzysztof Struczynski <krzysztof.struczynski@huawei.com>
> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>

Acked-by: Roberto Sassu <roberto.sassu@huawei.com>

Roberto

HUAWEI TECHNOLOGIES Duesseldorf GmbH, HRB 56063
Managing Director: Li Peng, Li Jian, Shi Yanli


> ---
> v2: I folded mine patch together with Roberto's
> 
>  security/integrity/evm/evm_crypto.c | 44 ++++++++++++++---------------
>  1 file changed, 22 insertions(+), 22 deletions(-)
> 
> diff --git a/security/integrity/evm/evm_crypto.c
> b/security/integrity/evm/evm_crypto.c
> index 35682852ddea9..c9f7206591b30 100644
> --- a/security/integrity/evm/evm_crypto.c
> +++ b/security/integrity/evm/evm_crypto.c
> @@ -73,7 +73,7 @@ static struct shash_desc *init_desc(char type, uint8_t
> hash_algo)
>  {
>  	long rc;
>  	const char *algo;
> -	struct crypto_shash **tfm;
> +	struct crypto_shash **tfm, *tmp_tfm;
>  	struct shash_desc *desc;
> 
>  	if (type == EVM_XATTR_HMAC) {
> @@ -91,31 +91,31 @@ static struct shash_desc *init_desc(char type, uint8_t
> hash_algo)
>  		algo = hash_algo_name[hash_algo];
>  	}
> 
> -	if (*tfm == NULL) {
> -		mutex_lock(&mutex);
> -		if (*tfm)
> -			goto out;
> -		*tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
> -		if (IS_ERR(*tfm)) {
> -			rc = PTR_ERR(*tfm);
> -			pr_err("Can not allocate %s (reason: %ld)\n", algo,
> rc);
> -			*tfm = NULL;
> +	if (*tfm)
> +		goto alloc;
> +	mutex_lock(&mutex);
> +	if (*tfm)
> +		goto unlock;
> +
> +	tmp_tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
> +	if (IS_ERR(tmp_tfm)) {
> +		pr_err("Can not allocate %s (reason: %ld)\n", algo,
> +		       PTR_ERR(tmp_tfm));
> +		mutex_unlock(&mutex);
> +		return ERR_CAST(tmp_tfm);
> +	}
> +	if (type == EVM_XATTR_HMAC) {
> +		rc = crypto_shash_setkey(tmp_tfm, evmkey, evmkey_len);
> +		if (rc) {
> +			crypto_free_shash(tmp_tfm);
>  			mutex_unlock(&mutex);
>  			return ERR_PTR(rc);
>  		}
> -		if (type == EVM_XATTR_HMAC) {
> -			rc = crypto_shash_setkey(*tfm, evmkey,
> evmkey_len);
> -			if (rc) {
> -				crypto_free_shash(*tfm);
> -				*tfm = NULL;
> -				mutex_unlock(&mutex);
> -				return ERR_PTR(rc);
> -			}
> -		}
> -out:
> -		mutex_unlock(&mutex);
>  	}
> -
> +	*tfm = tmp_tfm;
> +unlock:
> +	mutex_unlock(&mutex);
> +alloc:
>  	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
>  			GFP_KERNEL);
>  	if (!desc)
> --
> 2.26.2
Krzysztof Struczynski May 14, 2020, 7:11 a.m. UTC | #2
> -----Original Message-----
> From: Roberto Sassu
> Sent: Thursday, May 14, 2020 8:47 AM
> To: Dan Carpenter <dan.carpenter@oracle.com>; Mimi Zohar
> <zohar@linux.ibm.com>; Krzysztof Struczynski
> <krzysztof.struczynski@huawei.com>
> Cc: James Morris <jmorris@namei.org>; Serge E. Hallyn <serge@hallyn.com>;
> Dmitry Kasatkin <dmitry.kasatkin@nokia.com>; linux-
> integrity@vger.kernel.org; linux-security-module@vger.kernel.org; kernel-
> janitors@vger.kernel.org
> Subject: RE: [PATCH v2] evm: Fix a small race in init_desc()
> 
> > From: Dan Carpenter [mailto:dan.carpenter@oracle.com]
> > Sent: Tuesday, May 12, 2020 7:47 PM
> > This patch avoids a kernel panic due to accessing an error pointer set
> > by crypto_alloc_shash(). It occurs especially when there are many
> > files that require an unsupported algorithm, as it would increase the
> > likelihood of the following race condition.
> >
> > Imagine we have two threads and in the first thread
> > crypto_alloc_shash() fails and returns an error pointer.
> >
> > 		*tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
> > 		if (IS_ERR(*tfm)) {
> > 			rc = PTR_ERR(*tfm); <--- FIRST THREAD HERE!
> > 			pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
> > 			*tfm = NULL;
> >
> > And the second thread is here:
> >
> > 	if (*tfm == NULL) {  <--- SECOND THREAD HERE!
> > 		mutex_lock(&mutex);
> > 		if (*tfm)
> > 			goto out;
> >
> > Since "*tfm" is non-NULL, we assume that it is valid and that leads to
> > a crash when it dereferences "*tfm".
> >
> > 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
> >                                                              ^^^^
> >
> > This patch fixes the problem by introducing a temporary "tmp_tfm" and
> > only setting "*tfm" at the very end after everything has succeeded.
> > The other change is that I reversed the initial "if (!*tfm) {"
> > condition and pull the code in one indent level.
> >
> > Cc: stable@vger.kernel.org
> > Fixes: d46eb3699502b ("evm: crypto hash replaced by shash")
> > Reported-by: Roberto Sassu <roberto.sassu@huawei.com>
> > Reported-by: Krzysztof Struczynski <krzysztof.struczynski@huawei.com>
> > Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
> 
> Acked-by: Roberto Sassu <roberto.sassu@huawei.com>

Acked-by: Krzysztof Struczynski <krzysztof.struczynski@huawei.com>

Krzysztof
> 
> Roberto
> 
> HUAWEI TECHNOLOGIES Duesseldorf GmbH, HRB 56063 Managing Director: Li
> Peng, Li Jian, Shi Yanli
> 
> 
> > ---
> > v2: I folded mine patch together with Roberto's
> >
> >  security/integrity/evm/evm_crypto.c | 44
> > ++++++++++++++---------------
> >  1 file changed, 22 insertions(+), 22 deletions(-)
> >
> > diff --git a/security/integrity/evm/evm_crypto.c
> > b/security/integrity/evm/evm_crypto.c
> > index 35682852ddea9..c9f7206591b30 100644
> > --- a/security/integrity/evm/evm_crypto.c
> > +++ b/security/integrity/evm/evm_crypto.c
> > @@ -73,7 +73,7 @@ static struct shash_desc *init_desc(char type,
> > uint8_t
> > hash_algo)
> >  {
> >  	long rc;
> >  	const char *algo;
> > -	struct crypto_shash **tfm;
> > +	struct crypto_shash **tfm, *tmp_tfm;
> >  	struct shash_desc *desc;
> >
> >  	if (type == EVM_XATTR_HMAC) {
> > @@ -91,31 +91,31 @@ static struct shash_desc *init_desc(char type,
> > uint8_t
> > hash_algo)
> >  		algo = hash_algo_name[hash_algo];
> >  	}
> >
> > -	if (*tfm == NULL) {
> > -		mutex_lock(&mutex);
> > -		if (*tfm)
> > -			goto out;
> > -		*tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
> > -		if (IS_ERR(*tfm)) {
> > -			rc = PTR_ERR(*tfm);
> > -			pr_err("Can not allocate %s (reason: %ld)\n", algo,
> > rc);
> > -			*tfm = NULL;
> > +	if (*tfm)
> > +		goto alloc;
> > +	mutex_lock(&mutex);
> > +	if (*tfm)
> > +		goto unlock;
> > +
> > +	tmp_tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
> > +	if (IS_ERR(tmp_tfm)) {
> > +		pr_err("Can not allocate %s (reason: %ld)\n", algo,
> > +		       PTR_ERR(tmp_tfm));
> > +		mutex_unlock(&mutex);
> > +		return ERR_CAST(tmp_tfm);
> > +	}
> > +	if (type == EVM_XATTR_HMAC) {
> > +		rc = crypto_shash_setkey(tmp_tfm, evmkey, evmkey_len);
> > +		if (rc) {
> > +			crypto_free_shash(tmp_tfm);
> >  			mutex_unlock(&mutex);
> >  			return ERR_PTR(rc);
> >  		}
> > -		if (type == EVM_XATTR_HMAC) {
> > -			rc = crypto_shash_setkey(*tfm, evmkey,
> > evmkey_len);
> > -			if (rc) {
> > -				crypto_free_shash(*tfm);
> > -				*tfm = NULL;
> > -				mutex_unlock(&mutex);
> > -				return ERR_PTR(rc);
> > -			}
> > -		}
> > -out:
> > -		mutex_unlock(&mutex);
> >  	}
> > -
> > +	*tfm = tmp_tfm;
> > +unlock:
> > +	mutex_unlock(&mutex);
> > +alloc:
> >  	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
> >  			GFP_KERNEL);
> >  	if (!desc)
> > --
> > 2.26.2
Mimi Zohar May 14, 2020, 6:21 p.m. UTC | #3
On Thu, 2020-05-14 at 07:11 +0000, Krzysztof Struczynski wrote:
> > > From: Dan Carpenter [mailto:dan.carpenter@oracle.com]
> > > This patch avoids a kernel panic due to accessing an error pointer set
> > > by crypto_alloc_shash(). It occurs especially when there are many
> > > files that require an unsupported algorithm, as it would increase the
> > > likelihood of the following race condition.
> > >
> > > Imagine we have two threads and in the first thread
> > > crypto_alloc_shash() fails and returns an error pointer.
> > >
> > > 		*tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
> > > 		if (IS_ERR(*tfm)) {
> > > 			rc = PTR_ERR(*tfm); <--- FIRST THREAD HERE!
> > > 			pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
> > > 			*tfm = NULL;
> > >
> > > And the second thread is here:
> > >
> > > 	if (*tfm == NULL) {  <--- SECOND THREAD HERE!
> > > 		mutex_lock(&mutex);
> > > 		if (*tfm)
> > > 			goto out;
> > >
> > > Since "*tfm" is non-NULL, we assume that it is valid and that leads to
> > > a crash when it dereferences "*tfm".
> > >
> > > 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
> > >                                                              ^^^^
> > >
> > > This patch fixes the problem by introducing a temporary "tmp_tfm" and
> > > only setting "*tfm" at the very end after everything has succeeded.
> > > The other change is that I reversed the initial "if (!*tfm) {"
> > > condition and pull the code in one indent level.
> > >
> > > Cc: stable@vger.kernel.org
> > > Fixes: d46eb3699502b ("evm: crypto hash replaced by shash")
> > > Reported-by: Roberto Sassu <roberto.sassu@huawei.com>
> > > Reported-by: Krzysztof Struczynski <krzysztof.struczynski@huawei.com>
> > > Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
> > 
> > Acked-by: Roberto Sassu <roberto.sassu@huawei.com>
> 
> Acked-by: Krzysztof Struczynski <krzysztof.struczynski@huawei.com>

Thanks, Roberto and Krzysztof.

This patch is now queued in the "fixes" branch.

Mimi
diff mbox series

Patch

diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 35682852ddea9..c9f7206591b30 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -73,7 +73,7 @@  static struct shash_desc *init_desc(char type, uint8_t hash_algo)
 {
 	long rc;
 	const char *algo;
-	struct crypto_shash **tfm;
+	struct crypto_shash **tfm, *tmp_tfm;
 	struct shash_desc *desc;
 
 	if (type == EVM_XATTR_HMAC) {
@@ -91,31 +91,31 @@  static struct shash_desc *init_desc(char type, uint8_t hash_algo)
 		algo = hash_algo_name[hash_algo];
 	}
 
-	if (*tfm == NULL) {
-		mutex_lock(&mutex);
-		if (*tfm)
-			goto out;
-		*tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
-		if (IS_ERR(*tfm)) {
-			rc = PTR_ERR(*tfm);
-			pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
-			*tfm = NULL;
+	if (*tfm)
+		goto alloc;
+	mutex_lock(&mutex);
+	if (*tfm)
+		goto unlock;
+
+	tmp_tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
+	if (IS_ERR(tmp_tfm)) {
+		pr_err("Can not allocate %s (reason: %ld)\n", algo,
+		       PTR_ERR(tmp_tfm));
+		mutex_unlock(&mutex);
+		return ERR_CAST(tmp_tfm);
+	}
+	if (type == EVM_XATTR_HMAC) {
+		rc = crypto_shash_setkey(tmp_tfm, evmkey, evmkey_len);
+		if (rc) {
+			crypto_free_shash(tmp_tfm);
 			mutex_unlock(&mutex);
 			return ERR_PTR(rc);
 		}
-		if (type == EVM_XATTR_HMAC) {
-			rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len);
-			if (rc) {
-				crypto_free_shash(*tfm);
-				*tfm = NULL;
-				mutex_unlock(&mutex);
-				return ERR_PTR(rc);
-			}
-		}
-out:
-		mutex_unlock(&mutex);
 	}
-
+	*tfm = tmp_tfm;
+unlock:
+	mutex_unlock(&mutex);
+alloc:
 	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
 			GFP_KERNEL);
 	if (!desc)