Patchwork [v3] crypto: algif_aead - skip SGL entries with NULL page

login
register
mail settings
Submitter Stephan Mueller
Date Nov. 10, 2017, 10:04 a.m.
Message ID <9325029.81qbAra0rM@positron.chronox.de>
Download mbox | patch
Permalink /patch/10052785/
State Accepted
Delegated to: Herbert Xu
Headers show

Comments

Stephan Mueller - Nov. 10, 2017, 10:04 a.m.
Hi Herbert,

I missed the termination of the outer loop of list_for_each_entry_safe.

The patch was tested on x86 64 and 32 bit environments.

---8<---

The TX SGL may contain SGL entries that are assigned a NULL page. This
may happen if a multi-stage AIO operation is performed where the data
for each stage is pointed to by one SGL entry. Upon completion of that
stage, af_alg_pull_tsgl will assign NULL to the SGL entry.

The NULL cipher used to copy the AAD from TX SGL to the destination
buffer, however, cannot handle the case where the SGL starts with an SGL
entry having a NULL page. Thus, the code needs to advance the start
pointer into the SGL to the first non-NULL entry.

This fixes a crash visible on Intel x86 32 bit using the libkcapi test
suite.

Fixes: 72548b093ee38 ("crypto: algif_aead - copy AAD from src to dst")
Signed-off-by: Stephan Mueller <smueller@chronox.de>
---
 crypto/algif_aead.c | 33 ++++++++++++++++++++++++---------
 1 file changed, 24 insertions(+), 9 deletions(-)
Herbert Xu - Nov. 24, 2017, 7:37 a.m.
On Fri, Nov 10, 2017 at 11:04:52AM +0100, Stephan Müller wrote:
> Hi Herbert,
> 
> I missed the termination of the outer loop of list_for_each_entry_safe.
> 
> The patch was tested on x86 64 and 32 bit environments.
> 
> ---8<---
> 
> The TX SGL may contain SGL entries that are assigned a NULL page. This
> may happen if a multi-stage AIO operation is performed where the data
> for each stage is pointed to by one SGL entry. Upon completion of that
> stage, af_alg_pull_tsgl will assign NULL to the SGL entry.
> 
> The NULL cipher used to copy the AAD from TX SGL to the destination
> buffer, however, cannot handle the case where the SGL starts with an SGL
> entry having a NULL page. Thus, the code needs to advance the start
> pointer into the SGL to the first non-NULL entry.
> 
> This fixes a crash visible on Intel x86 32 bit using the libkcapi test
> suite.
> 
> Fixes: 72548b093ee38 ("crypto: algif_aead - copy AAD from src to dst")
> Signed-off-by: Stephan Mueller <smueller@chronox.de>

Patch applied.  Thanks.
Stephan Mueller - Nov. 24, 2017, 4:09 p.m.
Am Freitag, 24. November 2017, 08:37:28 CET schrieb Herbert Xu:

Hi Herbert,

> On Fri, Nov 10, 2017 at 11:04:52AM +0100, Stephan Müller wrote:
> > Hi Herbert,
> > 
> > I missed the termination of the outer loop of list_for_each_entry_safe.
> > 
> > The patch was tested on x86 64 and 32 bit environments.
> > 
> > ---8<---
> > 
> > The TX SGL may contain SGL entries that are assigned a NULL page. This
> > may happen if a multi-stage AIO operation is performed where the data
> > for each stage is pointed to by one SGL entry. Upon completion of that
> > stage, af_alg_pull_tsgl will assign NULL to the SGL entry.
> > 
> > The NULL cipher used to copy the AAD from TX SGL to the destination
> > buffer, however, cannot handle the case where the SGL starts with an SGL
> > entry having a NULL page. Thus, the code needs to advance the start
> > pointer into the SGL to the first non-NULL entry.
> > 
> > This fixes a crash visible on Intel x86 32 bit using the libkcapi test
> > suite.
> > 
> > Fixes: 72548b093ee38 ("crypto: algif_aead - copy AAD from src to dst")
> > Signed-off-by: Stephan Mueller <smueller@chronox.de>
> 
> Patch applied.  Thanks.

Would it make sense to feed it to stable?

Ciao
Stephan

Patch

diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 6cdd4fb08335..7822e2fecb0b 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -101,10 +101,10 @@  static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 	struct aead_tfm *aeadc = pask->private;
 	struct crypto_aead *tfm = aeadc->aead;
 	struct crypto_skcipher *null_tfm = aeadc->null_tfm;
-	unsigned int as = crypto_aead_authsize(tfm);
+	unsigned int i, as = crypto_aead_authsize(tfm);
 	struct af_alg_async_req *areq;
-	struct af_alg_tsgl *tsgl;
-	struct scatterlist *src;
+	struct af_alg_tsgl *tsgl, *tmp;
+	struct scatterlist *rsgl_src, *tsgl_src = NULL;
 	int err = 0;
 	size_t used = 0;		/* [in]  TX bufs to be en/decrypted */
 	size_t outlen = 0;		/* [out] RX bufs produced by kernel */
@@ -178,7 +178,22 @@  static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 	}
 
 	processed = used + ctx->aead_assoclen;
-	tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
+	list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
+		for (i = 0; i < tsgl->cur; i++) {
+			struct scatterlist *process_sg = tsgl->sg + i;
+
+			if (!(process_sg->length) || !sg_page(process_sg))
+				continue;
+			tsgl_src = process_sg;
+			break;
+		}
+		if (tsgl_src)
+			break;
+	}
+	if (processed && !tsgl_src) {
+		err = -EFAULT;
+		goto free;
+	}
 
 	/*
 	 * Copy of AAD from source to destination
@@ -194,7 +209,7 @@  static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 	 */
 
 	/* Use the RX SGL as source (and destination) for crypto op. */
-	src = areq->first_rsgl.sgl.sg;
+	rsgl_src = areq->first_rsgl.sgl.sg;
 
 	if (ctx->enc) {
 		/*
@@ -207,7 +222,7 @@  static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 		 *	    v	   v
 		 * RX SGL: AAD || PT || Tag
 		 */
-		err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+		err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
 					   areq->first_rsgl.sgl.sg, processed);
 		if (err)
 			goto free;
@@ -225,7 +240,7 @@  static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 		 */
 
 		 /* Copy AAD || CT to RX SGL buffer for in-place operation. */
-		err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+		err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
 					   areq->first_rsgl.sgl.sg, outlen);
 		if (err)
 			goto free;
@@ -257,11 +272,11 @@  static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
 				 areq->tsgl);
 		} else
 			/* no RX SGL present (e.g. authentication only) */
-			src = areq->tsgl;
+			rsgl_src = areq->tsgl;
 	}
 
 	/* Initialize the crypto operation */
-	aead_request_set_crypt(&areq->cra_u.aead_req, src,
+	aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
 			       areq->first_rsgl.sgl.sg, used, ctx->iv);
 	aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
 	aead_request_set_tfm(&areq->cra_u.aead_req, tfm);