diff mbox series

[v2,10/29] crypto: powerpc/p10-aes-gcm - simplify handling of linear associated data

Message ID 20241230001418.74739-11-ebiggers@kernel.org (mailing list archive)
State Under Review
Delegated to: Herbert Xu
Headers show
Series crypto: scatterlist handling improvements | expand

Commit Message

Eric Biggers Dec. 30, 2024, 12:13 a.m. UTC
From: Eric Biggers <ebiggers@google.com>

p10_aes_gcm_crypt() is abusing the scatter_walk API to get the virtual
address for the first source scatterlist element.  But this code is only
built for PPC64 which is a !HIGHMEM platform, and it can read past a
page boundary from the address returned by scatterwalk_map() which means
it already assumes the address is from the kernel's direct map.  Thus,
just use sg_virt() instead to get the same result in a simpler way.

Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Danny Tsen <dtsen@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Naveen N Rao <naveen@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Eric Biggers <ebiggers@google.com>
---

This patch is part of a long series touching many files, so I have
limited the Cc list on the full series.  If you want the full series and
did not receive it, please retrieve it from lore.kernel.org.

 arch/powerpc/crypto/aes-gcm-p10-glue.c | 8 ++------
 1 file changed, 2 insertions(+), 6 deletions(-)

Comments

Christophe Leroy Jan. 2, 2025, 11:50 a.m. UTC | #1
Le 30/12/2024 à 01:13, Eric Biggers a écrit :
> From: Eric Biggers <ebiggers@google.com>
> 
> p10_aes_gcm_crypt() is abusing the scatter_walk API to get the virtual
> address for the first source scatterlist element.  But this code is only
> built for PPC64 which is a !HIGHMEM platform, and it can read past a
> page boundary from the address returned by scatterwalk_map() which means
> it already assumes the address is from the kernel's direct map.  Thus,
> just use sg_virt() instead to get the same result in a simpler way.
> 
> Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
> Cc: Danny Tsen <dtsen@linux.ibm.com>
> Cc: Michael Ellerman <mpe@ellerman.id.au>
> Cc: Naveen N Rao <naveen@kernel.org>
> Cc: Nicholas Piggin <npiggin@gmail.com>
> Cc: linuxppc-dev@lists.ozlabs.org
> Signed-off-by: Eric Biggers <ebiggers@google.com>
> ---
> 
> This patch is part of a long series touching many files, so I have
> limited the Cc list on the full series.  If you want the full series and
> did not receive it, please retrieve it from lore.kernel.org.
> 
>   arch/powerpc/crypto/aes-gcm-p10-glue.c | 8 ++------
>   1 file changed, 2 insertions(+), 6 deletions(-)
> 
> diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
> index f37b3d13fc53..2862c3cf8e41 100644
> --- a/arch/powerpc/crypto/aes-gcm-p10-glue.c
> +++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c
> @@ -212,11 +212,10 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
>   	struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
>   	u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN];
>   	struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
>   	u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
>   	struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
> -	struct scatter_walk assoc_sg_walk;
>   	struct skcipher_walk walk;
>   	u8 *assocmem = NULL;
>   	u8 *assoc;
>   	unsigned int cryptlen = req->cryptlen;
>   	unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
> @@ -232,12 +231,11 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
>   	memset(ivbuf, 0, sizeof(ivbuf));
>   	memcpy(iv, riv, GCM_IV_SIZE);
>   
>   	/* Linearize assoc, if not already linear */
>   	if (req->src->length >= assoclen && req->src->length) {
> -		scatterwalk_start(&assoc_sg_walk, req->src);
> -		assoc = scatterwalk_map(&assoc_sg_walk);
> +		assoc = sg_virt(req->src); /* ppc64 is !HIGHMEM */
>   	} else {
>   		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
>   			      GFP_KERNEL : GFP_ATOMIC;
>   
>   		/* assoc can be any length, so must be on heap */
> @@ -251,13 +249,11 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
>   
>   	vsx_begin();
>   	gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
>   	vsx_end();
>   
> -	if (!assocmem)
> -		scatterwalk_unmap(assoc);
> -	else
> +	if (assocmem)
>   		kfree(assocmem);

kfree() accepts a NULL pointer, you can call kfree(assocmem) without 'if 
(assocmem)'


>   
>   	if (enc)
>   		ret = skcipher_walk_aead_encrypt(&walk, req, false);
>   	else
Eric Biggers Jan. 2, 2025, 5:24 p.m. UTC | #2
On Thu, Jan 02, 2025 at 12:50:50PM +0100, Christophe Leroy wrote:
> 
> 
> Le 30/12/2024 à 01:13, Eric Biggers a écrit :
> > From: Eric Biggers <ebiggers@google.com>
> > 
> > p10_aes_gcm_crypt() is abusing the scatter_walk API to get the virtual
> > address for the first source scatterlist element.  But this code is only
> > built for PPC64 which is a !HIGHMEM platform, and it can read past a
> > page boundary from the address returned by scatterwalk_map() which means
> > it already assumes the address is from the kernel's direct map.  Thus,
> > just use sg_virt() instead to get the same result in a simpler way.
> > 
> > Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
> > Cc: Danny Tsen <dtsen@linux.ibm.com>
> > Cc: Michael Ellerman <mpe@ellerman.id.au>
> > Cc: Naveen N Rao <naveen@kernel.org>
> > Cc: Nicholas Piggin <npiggin@gmail.com>
> > Cc: linuxppc-dev@lists.ozlabs.org
> > Signed-off-by: Eric Biggers <ebiggers@google.com>
> > ---
> > 
> > This patch is part of a long series touching many files, so I have
> > limited the Cc list on the full series.  If you want the full series and
> > did not receive it, please retrieve it from lore.kernel.org.
> > 
> >   arch/powerpc/crypto/aes-gcm-p10-glue.c | 8 ++------
> >   1 file changed, 2 insertions(+), 6 deletions(-)
> > 
> > diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
> > index f37b3d13fc53..2862c3cf8e41 100644
> > --- a/arch/powerpc/crypto/aes-gcm-p10-glue.c
> > +++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c
> > @@ -212,11 +212,10 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
> >   	struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
> >   	u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN];
> >   	struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
> >   	u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
> >   	struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
> > -	struct scatter_walk assoc_sg_walk;
> >   	struct skcipher_walk walk;
> >   	u8 *assocmem = NULL;
> >   	u8 *assoc;
> >   	unsigned int cryptlen = req->cryptlen;
> >   	unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
> > @@ -232,12 +231,11 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
> >   	memset(ivbuf, 0, sizeof(ivbuf));
> >   	memcpy(iv, riv, GCM_IV_SIZE);
> >   	/* Linearize assoc, if not already linear */
> >   	if (req->src->length >= assoclen && req->src->length) {
> > -		scatterwalk_start(&assoc_sg_walk, req->src);
> > -		assoc = scatterwalk_map(&assoc_sg_walk);
> > +		assoc = sg_virt(req->src); /* ppc64 is !HIGHMEM */
> >   	} else {
> >   		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
> >   			      GFP_KERNEL : GFP_ATOMIC;
> >   		/* assoc can be any length, so must be on heap */
> > @@ -251,13 +249,11 @@ static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
> >   	vsx_begin();
> >   	gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
> >   	vsx_end();
> > -	if (!assocmem)
> > -		scatterwalk_unmap(assoc);
> > -	else
> > +	if (assocmem)
> >   		kfree(assocmem);
> 
> kfree() accepts a NULL pointer, you can call kfree(assocmem) without 'if
> (assocmem)'

The existing code did that too, but sure I'll change that in v3.

- Eric
diff mbox series

Patch

diff --git a/arch/powerpc/crypto/aes-gcm-p10-glue.c b/arch/powerpc/crypto/aes-gcm-p10-glue.c
index f37b3d13fc53..2862c3cf8e41 100644
--- a/arch/powerpc/crypto/aes-gcm-p10-glue.c
+++ b/arch/powerpc/crypto/aes-gcm-p10-glue.c
@@ -212,11 +212,10 @@  static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
 	struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
 	u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN];
 	struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
 	u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
 	struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
-	struct scatter_walk assoc_sg_walk;
 	struct skcipher_walk walk;
 	u8 *assocmem = NULL;
 	u8 *assoc;
 	unsigned int cryptlen = req->cryptlen;
 	unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
@@ -232,12 +231,11 @@  static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
 	memset(ivbuf, 0, sizeof(ivbuf));
 	memcpy(iv, riv, GCM_IV_SIZE);
 
 	/* Linearize assoc, if not already linear */
 	if (req->src->length >= assoclen && req->src->length) {
-		scatterwalk_start(&assoc_sg_walk, req->src);
-		assoc = scatterwalk_map(&assoc_sg_walk);
+		assoc = sg_virt(req->src); /* ppc64 is !HIGHMEM */
 	} else {
 		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
 			      GFP_KERNEL : GFP_ATOMIC;
 
 		/* assoc can be any length, so must be on heap */
@@ -251,13 +249,11 @@  static int p10_aes_gcm_crypt(struct aead_request *req, u8 *riv,
 
 	vsx_begin();
 	gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
 	vsx_end();
 
-	if (!assocmem)
-		scatterwalk_unmap(assoc);
-	else
+	if (assocmem)
 		kfree(assocmem);
 
 	if (enc)
 		ret = skcipher_walk_aead_encrypt(&walk, req, false);
 	else