diff mbox

[3/3] crypto: caam/qi - fix IV DMA mapping and updating

Message ID 20180328123919.24120-4-horia.geanta@nxp.com (mailing list archive)
State Accepted
Delegated to: Herbert Xu
Headers show

Commit Message

Horia Geanta March 28, 2018, 12:39 p.m. UTC
There are two IV-related issues:
(1) crypto API does not guarantee to provide an IV buffer that is DMAable,
thus it's incorrect to DMA map it
(2) for in-place decryption, since ciphertext is overwritten with
plaintext, updated IV (req->info) will contain the last block of plaintext
(instead of the last block of ciphertext)

While these two issues could be fixed separately, it's straightforward
to fix both in the same time - by using the {ablkcipher,aead}_edesc
extended descriptor to store the IV that will be fed to the crypto engine;
this allows for fixing (2) by saving req->src[last_block] in req->info
directly, i.e. without allocating yet another temporary buffer.

A side effect of the fix is that it's no longer possible to have the IV
contiguous with req->src or req->dst.
Code checking for this case is removed.

Cc: <stable@vger.kernel.org> # 4.14+
Fixes: a68a19380522 ("crypto: caam/qi - properly set IV after {en,de}crypt")
Link: http://lkml.kernel.org/r/20170113084620.GF22022@gondor.apana.org.au
Reported-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
---
 drivers/crypto/caam/caamalg_qi.c | 227 ++++++++++++++++++++-------------------
 1 file changed, 116 insertions(+), 111 deletions(-)
diff mbox

Patch

diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c2b5762d56a0..a6b76b3c8abe 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -726,7 +726,7 @@  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
  * @assoclen: associated data length, in CAAM endianness
  * @assoclen_dma: bus physical mapped address of req->assoclen
  * @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
  */
 struct aead_edesc {
 	int src_nents;
@@ -737,9 +737,6 @@  struct aead_edesc {
 	unsigned int assoclen;
 	dma_addr_t assoclen_dma;
 	struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_AEAD_SG						\
-	((CAAM_QI_MEMCACHE_SIZE - offsetof(struct aead_edesc, sgt)) /	\
-	 sizeof(struct qm_sg_entry))
 	struct qm_sg_entry sgt[0];
 };
 
@@ -751,7 +748,7 @@  struct aead_edesc {
  * @qm_sg_bytes: length of dma mapped h/w link table
  * @qm_sg_dma: bus physical mapped address of h/w link table
  * @drv_req: driver-specific request structure
- * @sgt: the h/w link table
+ * @sgt: the h/w link table, followed by IV
  */
 struct ablkcipher_edesc {
 	int src_nents;
@@ -760,9 +757,6 @@  struct ablkcipher_edesc {
 	int qm_sg_bytes;
 	dma_addr_t qm_sg_dma;
 	struct caam_drv_req drv_req;
-#define CAAM_QI_MAX_ABLKCIPHER_SG					    \
-	((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
-	 sizeof(struct qm_sg_entry))
 	struct qm_sg_entry sgt[0];
 };
 
@@ -984,17 +978,8 @@  static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 		}
 	}
 
-	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) {
+	if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
 		ivsize = crypto_aead_ivsize(aead);
-		iv_dma = dma_map_single(qidev, req->iv, ivsize, DMA_TO_DEVICE);
-		if (dma_mapping_error(qidev, iv_dma)) {
-			dev_err(qidev, "unable to map IV\n");
-			caam_unmap(qidev, req->src, req->dst, src_nents,
-				   dst_nents, 0, 0, op_type, 0, 0);
-			qi_cache_free(edesc);
-			return ERR_PTR(-ENOMEM);
-		}
-	}
 
 	/*
 	 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
@@ -1002,16 +987,33 @@  static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 	 */
 	qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
 		     (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
-	if (unlikely(qm_sg_ents > CAAM_QI_MAX_AEAD_SG)) {
-		dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
-			qm_sg_ents, CAAM_QI_MAX_AEAD_SG);
-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-			   iv_dma, ivsize, op_type, 0, 0);
+	sg_table = &edesc->sgt[0];
+	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+	if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+		     CAAM_QI_MEMCACHE_SIZE)) {
+		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+			qm_sg_ents, ivsize);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
 		qi_cache_free(edesc);
 		return ERR_PTR(-ENOMEM);
 	}
-	sg_table = &edesc->sgt[0];
-	qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+
+	if (ivsize) {
+		u8 *iv = (u8 *)(sg_table + qm_sg_ents);
+
+		/* Make sure IV is located in a DMAable area */
+		memcpy(iv, req->iv, ivsize);
+
+		iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+		if (dma_mapping_error(qidev, iv_dma)) {
+			dev_err(qidev, "unable to map IV\n");
+			caam_unmap(qidev, req->src, req->dst, src_nents,
+				   dst_nents, 0, 0, 0, 0, 0);
+			qi_cache_free(edesc);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
 
 	edesc->src_nents = src_nents;
 	edesc->dst_nents = dst_nents;
@@ -1164,15 +1166,27 @@  static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
 #endif
 
 	ablkcipher_unmap(qidev, edesc, req);
-	qi_cache_free(edesc);
+
+	/* In case initial IV was generated, copy it in GIVCIPHER request */
+	if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
+		u8 *iv;
+		struct skcipher_givcrypt_request *greq;
+
+		greq = container_of(req, struct skcipher_givcrypt_request,
+				    creq);
+		iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
+		memcpy(greq->giv, iv, ivsize);
+	}
 
 	/*
 	 * The crypto API expects us to set the IV (req->info) to the last
 	 * ciphertext block. This is used e.g. by the CTS mode.
 	 */
-	scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
-				 ivsize, 0);
+	if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
+		scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+					 ivsize, ivsize, 0);
 
+	qi_cache_free(edesc);
 	ablkcipher_request_complete(req, status);
 }
 
@@ -1187,9 +1201,9 @@  static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 	int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
 	struct ablkcipher_edesc *edesc;
 	dma_addr_t iv_dma;
-	bool in_contig;
+	u8 *iv;
 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-	int dst_sg_idx, qm_sg_ents;
+	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
 	struct qm_sg_entry *sg_table, *fd_sgt;
 	struct caam_drv_ctx *drv_ctx;
 	enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
@@ -1236,55 +1250,53 @@  static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 		}
 	}
 
-	iv_dma = dma_map_single(qidev, req->info, ivsize, DMA_TO_DEVICE);
-	if (dma_mapping_error(qidev, iv_dma)) {
-		dev_err(qidev, "unable to map IV\n");
-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-			   0, 0, 0, 0);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	if (mapped_src_nents == 1 &&
-	    iv_dma + ivsize == sg_dma_address(req->src)) {
-		in_contig = true;
-		qm_sg_ents = 0;
-	} else {
-		in_contig = false;
-		qm_sg_ents = 1 + mapped_src_nents;
-	}
+	qm_sg_ents = 1 + mapped_src_nents;
 	dst_sg_idx = qm_sg_ents;
 
 	qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
-	if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
-		dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
-			qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-			   iv_dma, ivsize, op_type, 0, 0);
+	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+			qm_sg_ents, ivsize);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	/* allocate space for base edesc and link tables */
+	/* allocate space for base edesc, link tables and IV */
 	edesc = qi_cache_alloc(GFP_DMA | flags);
 	if (unlikely(!edesc)) {
 		dev_err(qidev, "could not allocate extended descriptor\n");
-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-			   iv_dma, ivsize, op_type, 0, 0);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Make sure IV is located in a DMAable area */
+	sg_table = &edesc->sgt[0];
+	iv = (u8 *)(sg_table + qm_sg_ents);
+	memcpy(iv, req->info, ivsize);
+
+	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(qidev, iv_dma)) {
+		dev_err(qidev, "unable to map IV\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		qi_cache_free(edesc);
 		return ERR_PTR(-ENOMEM);
 	}
 
 	edesc->src_nents = src_nents;
 	edesc->dst_nents = dst_nents;
 	edesc->iv_dma = iv_dma;
-	sg_table = &edesc->sgt[0];
-	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+	edesc->qm_sg_bytes = qm_sg_bytes;
 	edesc->drv_req.app_ctx = req;
 	edesc->drv_req.cbk = ablkcipher_done;
 	edesc->drv_req.drv_ctx = drv_ctx;
 
-	if (!in_contig) {
-		dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
-	}
+	dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+	sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
 
 	if (mapped_dst_nents > 1)
 		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
@@ -1302,20 +1314,12 @@  static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 
 	fd_sgt = &edesc->drv_req.fd_sgt[0];
 
-	if (!in_contig)
-		dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
-					  ivsize + req->nbytes, 0);
-	else
-		dma_to_qm_sg_one_last(&fd_sgt[1], iv_dma, ivsize + req->nbytes,
-				      0);
+	dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
+				  ivsize + req->nbytes, 0);
 
 	if (req->src == req->dst) {
-		if (!in_contig)
-			dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
-					     sizeof(*sg_table), req->nbytes, 0);
-		else
-			dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-					 req->nbytes, 0);
+		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
+				     sizeof(*sg_table), req->nbytes, 0);
 	} else if (mapped_dst_nents > 1) {
 		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
 				     sizeof(*sg_table), req->nbytes, 0);
@@ -1339,10 +1343,10 @@  static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 	int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
 	struct ablkcipher_edesc *edesc;
 	dma_addr_t iv_dma;
-	bool out_contig;
+	u8 *iv;
 	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 	struct qm_sg_entry *sg_table, *fd_sgt;
-	int dst_sg_idx, qm_sg_ents;
+	int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
 	struct caam_drv_ctx *drv_ctx;
 
 	drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
@@ -1390,46 +1394,45 @@  static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 		mapped_dst_nents = src_nents;
 	}
 
-	iv_dma = dma_map_single(qidev, creq->giv, ivsize, DMA_FROM_DEVICE);
-	if (dma_mapping_error(qidev, iv_dma)) {
-		dev_err(qidev, "unable to map IV\n");
-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-			   0, 0, 0, 0);
-		return ERR_PTR(-ENOMEM);
-	}
-
 	qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
 	dst_sg_idx = qm_sg_ents;
-	if (mapped_dst_nents == 1 &&
-	    iv_dma + ivsize == sg_dma_address(req->dst)) {
-		out_contig = true;
-	} else {
-		out_contig = false;
-		qm_sg_ents += 1 + mapped_dst_nents;
-	}
 
-	if (unlikely(qm_sg_ents > CAAM_QI_MAX_ABLKCIPHER_SG)) {
-		dev_err(qidev, "Insufficient S/G entries: %d > %zu\n",
-			qm_sg_ents, CAAM_QI_MAX_ABLKCIPHER_SG);
-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
+	qm_sg_ents += 1 + mapped_dst_nents;
+	qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
+	if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+		     ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+		dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
+			qm_sg_ents, ivsize);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
 		return ERR_PTR(-ENOMEM);
 	}
 
-	/* allocate space for base edesc and link tables */
+	/* allocate space for base edesc, link tables and IV */
 	edesc = qi_cache_alloc(GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(qidev, "could not allocate extended descriptor\n");
-		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-			   iv_dma, ivsize, GIVENCRYPT, 0, 0);
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* Make sure IV is located in a DMAable area */
+	sg_table = &edesc->sgt[0];
+	iv = (u8 *)(sg_table + qm_sg_ents);
+	iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
+	if (dma_mapping_error(qidev, iv_dma)) {
+		dev_err(qidev, "unable to map IV\n");
+		caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
+			   0, 0, 0, 0);
+		qi_cache_free(edesc);
 		return ERR_PTR(-ENOMEM);
 	}
 
 	edesc->src_nents = src_nents;
 	edesc->dst_nents = dst_nents;
 	edesc->iv_dma = iv_dma;
-	sg_table = &edesc->sgt[0];
-	edesc->qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+	edesc->qm_sg_bytes = qm_sg_bytes;
 	edesc->drv_req.app_ctx = req;
 	edesc->drv_req.cbk = ablkcipher_done;
 	edesc->drv_req.drv_ctx = drv_ctx;
@@ -1437,11 +1440,9 @@  static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 	if (mapped_src_nents > 1)
 		sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
 
-	if (!out_contig) {
-		dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
-		sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-				 dst_sg_idx + 1, 0);
-	}
+	dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
+	sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
+			 0);
 
 	edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
 					  DMA_TO_DEVICE);
@@ -1462,13 +1463,8 @@  static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
 		dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
 				 req->nbytes, 0);
 
-	if (!out_contig)
-		dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
-				     sizeof(*sg_table), ivsize + req->nbytes,
-				     0);
-	else
-		dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
-				 ivsize + req->nbytes, 0);
+	dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
+			     sizeof(*sg_table), ivsize + req->nbytes, 0);
 
 	return edesc;
 }
@@ -1478,6 +1474,7 @@  static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
 	struct ablkcipher_edesc *edesc;
 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
 	int ret;
 
 	if (unlikely(caam_congested))
@@ -1488,6 +1485,14 @@  static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
+	/*
+	 * The crypto API expects us to set the IV (req->info) to the last
+	 * ciphertext block.
+	 */
+	if (!encrypt)
+		scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+					 ivsize, ivsize, 0);
+
 	ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
 	if (!ret) {
 		ret = -EINPROGRESS;