@@ -275,8 +275,10 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
rctx->datbuf.size = rctx->len;
rctx->datbuf.buf = dma_alloc_coherent(se->dev, rctx->datbuf.size,
&rctx->datbuf.addr, GFP_KERNEL);
- if (!rctx->datbuf.buf)
- return -ENOMEM;
+ if (!rctx->datbuf.buf) {
+ ret = -ENOMEM;
+ goto out_finalize;
+ }
scatterwalk_map_and_copy(rctx->datbuf.buf, req->src, 0, req->cryptlen, 0);
@@ -292,6 +294,7 @@ static int tegra_aes_do_one_req(struct crypto_engine *engine, void *areq)
dma_free_coherent(ctx->se->dev, rctx->datbuf.size,
rctx->datbuf.buf, rctx->datbuf.addr);
+out_finalize:
crypto_finalize_skcipher_request(se->engine, req, ret);
return 0;
@@ -1152,21 +1155,21 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
ret = tegra_ccm_crypt_init(req, se, rctx);
if (ret)
- return ret;
+ goto out_finalize;
/* Allocate buffers required */
rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
&rctx->inbuf.addr, GFP_KERNEL);
if (!rctx->inbuf.buf)
- return -ENOMEM;
+ goto out_finalize;
rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen + 100;
rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
&rctx->outbuf.addr, GFP_KERNEL);
if (!rctx->outbuf.buf) {
ret = -ENOMEM;
- goto outbuf_err;
+ goto out_free_inbuf;
}
if (rctx->encrypt) {
@@ -1195,10 +1198,11 @@ static int tegra_ccm_do_one_req(struct crypto_engine *engine, void *areq)
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err:
+out_free_inbuf:
dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
+out_finalize:
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
@@ -1229,15 +1233,17 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
rctx->inbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
rctx->inbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->inbuf.size,
&rctx->inbuf.addr, GFP_KERNEL);
- if (!rctx->inbuf.buf)
- return -ENOMEM;
+ if (!rctx->inbuf.buf) {
+ ret = -ENOMEM;
+ goto out_finalize;
+ }
rctx->outbuf.size = rctx->assoclen + rctx->authsize + rctx->cryptlen;
rctx->outbuf.buf = dma_alloc_coherent(ctx->se->dev, rctx->outbuf.size,
&rctx->outbuf.addr, GFP_KERNEL);
if (!rctx->outbuf.buf) {
ret = -ENOMEM;
- goto outbuf_err;
+ goto out_free_inbuf;
}
/* If there is associated data perform GMAC operation */
@@ -1266,11 +1272,11 @@ static int tegra_gcm_do_one_req(struct crypto_engine *engine, void *areq)
dma_free_coherent(ctx->se->dev, rctx->outbuf.size,
rctx->outbuf.buf, rctx->outbuf.addr);
-outbuf_err:
+out_free_inbuf:
dma_free_coherent(ctx->se->dev, rctx->inbuf.size,
rctx->inbuf.buf, rctx->inbuf.addr);
- /* Finalize the request if there are no errors */
+out_finalize:
crypto_finalize_aead_request(ctx->se->engine, req, ret);
return 0;
Call the crypto finalize function before exiting *do_one_req() functions. This allows the driver to take up further requests even if the previous one fails. Fixes: 0880bb3b00c8 ("crypto: tegra - Add Tegra Security Engine driver") Signed-off-by: Akhil R <akhilrajeev@nvidia.com> --- drivers/crypto/tegra/tegra-se-aes.c | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-)