diff mbox

[02/10] crypto: caam - group algorithm related params

Message ID 1479822252-23833-3-git-send-email-horia.geanta@nxp.com (mailing list archive)
State Accepted
Delegated to: Herbert Xu
Headers show

Commit Message

Horia Geanta Nov. 22, 2016, 1:44 p.m. UTC
In preparation of factoring out the shared descriptors,
struct alginfo is introduced to group the algorithm related
parameters.

Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
---
 drivers/crypto/caam/caamalg.c     | 394 +++++++++++++++++++++-----------------
 drivers/crypto/caam/caamhash.c    |  48 +++--
 drivers/crypto/caam/desc_constr.h |  19 ++
 drivers/crypto/caam/key_gen.c     |  12 +-
 drivers/crypto/caam/key_gen.h     |   6 +-
 5 files changed, 274 insertions(+), 205 deletions(-)
diff mbox

Patch

diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 567e234fb49b..4141143cce7d 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -217,14 +217,11 @@  struct caam_ctx {
 	dma_addr_t sh_desc_enc_dma;
 	dma_addr_t sh_desc_dec_dma;
 	dma_addr_t sh_desc_givenc_dma;
-	u32 class1_alg_type;
-	u32 class2_alg_type;
 	u32 alg_op;
 	u8 key[CAAM_MAX_KEY_SIZE];
 	dma_addr_t key_dma;
-	unsigned int enckeylen;
-	unsigned int split_key_len;
-	unsigned int split_key_pad_len;
+	struct alginfo adata;
+	struct alginfo cdata;
 	unsigned int authsize;
 };
 
@@ -232,7 +229,7 @@  static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
 				  int keys_fit_inline, bool is_rfc3686)
 {
 	u32 *key_jump_cmd;
-	unsigned int enckeylen = ctx->enckeylen;
+	unsigned int enckeylen = ctx->cdata.keylen;
 
 	/* Note: Context registers are saved. */
 	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
@@ -250,24 +247,23 @@  static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
 		enckeylen -= CTR_RFC3686_NONCE_SIZE;
 
 	if (keys_fit_inline) {
-		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
-				  ctx->split_key_len, CLASS_2 |
-				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
-		append_key_as_imm(desc, (void *)ctx->key +
-				  ctx->split_key_pad_len, enckeylen,
+		append_key_as_imm(desc, (void *)ctx->adata.key,
+				  ctx->adata.keylen_pad, ctx->adata.keylen,
+				  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
+		append_key_as_imm(desc, (void *)ctx->cdata.key, enckeylen,
 				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
 	} else {
-		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+		append_key(desc, ctx->adata.key, ctx->adata.keylen, CLASS_2 |
 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
-		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
-			   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+		append_key(desc, ctx->cdata.key, enckeylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
 	}
 
 	/* Load Counter into CONTEXT1 reg */
 	if (is_rfc3686) {
 		u32 *nonce;
 
-		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
+		nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
 			       enckeylen);
 		append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
 				   LDST_CLASS_IND_CCB |
@@ -286,7 +282,6 @@  static int aead_null_set_sh_desc(struct crypto_aead *aead)
 {
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
 	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
 	u32 *desc;
 
@@ -295,8 +290,13 @@  static int aead_null_set_sh_desc(struct crypto_aead *aead)
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
 	if (DESC_AEAD_NULL_ENC_LEN + AEAD_DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	    ctx->adata.keylen_pad <= CAAM_DESC_BYTES_MAX) {
+		ctx->adata.key_inline = true;
+		ctx->adata.key = (uintptr_t)ctx->key;
+	} else {
+		ctx->adata.key_inline = false;
+		ctx->adata.key = ctx->key_dma;
+	}
 
 	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
@@ -306,12 +306,12 @@  static int aead_null_set_sh_desc(struct crypto_aead *aead)
 	/* Skip if already shared */
 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 				   JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
-				  ctx->split_key_len, CLASS_2 |
-				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	if (ctx->adata.key_inline)
+		append_key_as_imm(desc, (void *)ctx->adata.key,
+				  ctx->adata.keylen_pad, ctx->adata.keylen,
+				  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
 	else
-		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+		append_key(desc, ctx->adata.key, ctx->adata.keylen, CLASS_2 |
 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
@@ -336,8 +336,8 @@  static int aead_null_set_sh_desc(struct crypto_aead *aead)
 				     (0x8 << MOVE_LEN_SHIFT));
 
 	/* Class 2 operation */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	/* Read and write cryptlen bytes */
 	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
@@ -370,10 +370,14 @@  static int aead_null_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
 	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	    ctx->adata.keylen_pad <= CAAM_DESC_BYTES_MAX) {
+		ctx->adata.key_inline = true;
+		ctx->adata.key = (uintptr_t)ctx->key;
+	} else {
+		ctx->adata.key_inline = false;
+		ctx->adata.key = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_dec;
 
@@ -383,18 +387,18 @@  static int aead_null_set_sh_desc(struct crypto_aead *aead)
 	/* Skip if already shared */
 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 				   JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
-				  ctx->split_key_len, CLASS_2 |
-				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	if (ctx->adata.key_inline)
+		append_key_as_imm(desc, (void *)ctx->adata.key,
+				  ctx->adata.keylen_pad, ctx->adata.keylen,
+				  CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
 	else
-		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+		append_key(desc, ctx->adata.key, ctx->adata.keylen, CLASS_2 |
 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* Class 2 operation */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+	append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
 	/* assoclen + cryptlen = seqoutlen */
 	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
@@ -465,7 +469,7 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 	u32 geniv, moveiv;
 	u32 ctx1_iv_off = 0;
 	u32 *desc;
-	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
 	const bool is_rfc3686 = alg->caam.rfc3686;
 
@@ -473,7 +477,7 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 		return 0;
 
 	/* NULL encryption / decryption */
-	if (!ctx->enckeylen)
+	if (!ctx->cdata.keylen)
 		return aead_null_set_sh_desc(aead);
 
 	/*
@@ -498,12 +502,18 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
 	if (DESC_AEAD_ENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen +
+	    ctx->adata.keylen_pad + ctx->cdata.keylen +
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
-	    CAAM_DESC_BYTES_MAX)
+	    CAAM_DESC_BYTES_MAX) {
 		keys_fit_inline = true;
+		ctx->adata.key = (uintptr_t)ctx->key;
+		ctx->cdata.key = (uintptr_t)(ctx->key + ctx->adata.keylen_pad);
+	} else {
+		keys_fit_inline = false;
+		ctx->adata.key = ctx->key_dma;
+		ctx->cdata.key = ctx->key_dma + ctx->adata.keylen_pad;
+	}
 
 	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
@@ -512,8 +522,8 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	/* Class 2 operation */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	/* Read and write assoclen bytes */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -534,8 +544,8 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 				      LDST_OFFSET_SHIFT));
 
 	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	/* Read and write cryptlen bytes */
 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -564,12 +574,18 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
 	if (DESC_AEAD_DEC_LEN + AUTHENC_DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen +
+	    ctx->adata.keylen_pad + ctx->cdata.keylen +
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
-	    CAAM_DESC_BYTES_MAX)
+	    CAAM_DESC_BYTES_MAX) {
 		keys_fit_inline = true;
+		ctx->adata.key = (uintptr_t)ctx->key;
+		ctx->cdata.key = (uintptr_t)(ctx->key + ctx->adata.keylen_pad);
+	} else {
+		keys_fit_inline = false;
+		ctx->adata.key = ctx->key_dma;
+		ctx->cdata.key = ctx->key_dma + ctx->adata.keylen_pad;
+	}
 
 	/* aead_decrypt shared descriptor */
 	desc = ctx->sh_desc_dec;
@@ -578,8 +594,8 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	/* Class 2 operation */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+	append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
 	/* Read and write assoclen bytes */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -612,10 +628,10 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 
 	/* Choose operation */
 	if (ctr_mode)
-		append_operation(desc, ctx->class1_alg_type |
+		append_operation(desc, ctx->cdata.algtype |
 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
 	else
-		append_dec_op1(desc, ctx->class1_alg_type);
+		append_dec_op1(desc, ctx->cdata.algtype);
 
 	/* Read and write cryptlen bytes */
 	append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
@@ -646,12 +662,18 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
 	if (DESC_AEAD_GIVENC_LEN + AUTHENC_DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen +
+	    ctx->adata.keylen_pad + ctx->cdata.keylen +
 	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
-	    CAAM_DESC_BYTES_MAX)
+	    CAAM_DESC_BYTES_MAX) {
 		keys_fit_inline = true;
+		ctx->adata.key = (uintptr_t)ctx->key;
+		ctx->cdata.key = (uintptr_t)(ctx->key + ctx->adata.keylen_pad);
+	} else {
+		keys_fit_inline = false;
+		ctx->adata.key = ctx->key_dma;
+		ctx->cdata.key = ctx->key_dma + ctx->adata.keylen_pad;
+	}
 
 	/* aead_givencrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
@@ -682,8 +704,8 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 		    (ivsize << MOVE_LEN_SHIFT));
 
 	/* Return to encryption */
-	append_operation(desc, ctx->class2_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->adata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	/* Read and write assoclen bytes */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -712,8 +734,8 @@  static int aead_set_sh_desc(struct crypto_aead *aead)
 				      LDST_OFFSET_SHIFT));
 
 	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	/* Will write ivsize + cryptlen */
 	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -764,12 +786,11 @@  static int gcm_set_sh_desc(struct crypto_aead *aead)
 {
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
 	u32 *key_jump_cmd, *zero_payload_jump_cmd,
 	    *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
 	u32 *desc;
 
-	if (!ctx->enckeylen || !ctx->authsize)
+	if (!ctx->cdata.keylen || !ctx->authsize)
 		return 0;
 
 	/*
@@ -778,8 +799,13 @@  static int gcm_set_sh_desc(struct crypto_aead *aead)
 	 * must fit into the 64-word Descriptor h/w Buffer
 	 */
 	if (DESC_GCM_ENC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	    ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key = (uintptr_t)ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_enc;
 
@@ -788,17 +814,18 @@  static int gcm_set_sh_desc(struct crypto_aead *aead)
 	/* skip key loading if they are loaded due to sharing */
 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 				   JUMP_COND_SHRD | JUMP_COND_SELF);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	if (ctx->cdata.key_inline)
+		append_key_as_imm(desc, (void *)ctx->cdata.key,
+				  ctx->cdata.keylen, ctx->cdata.keylen,
+				  CLASS_1 | KEY_DEST_CLASS_REG);
 	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
+		append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	/* if assoclen + cryptlen is ZERO, skip to ICV write */
 	append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -870,10 +897,14 @@  static int gcm_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
 	if (DESC_GCM_DEC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	    ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key = (uintptr_t)ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_dec;
 
@@ -883,17 +914,18 @@  static int gcm_set_sh_desc(struct crypto_aead *aead)
 	key_jump_cmd = append_jump(desc, JUMP_JSL |
 				   JUMP_TEST_ALL | JUMP_COND_SHRD |
 				   JUMP_COND_SELF);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	if (ctx->cdata.key_inline)
+		append_key_as_imm(desc, (void *)ctx->cdata.key,
+				  ctx->cdata.keylen, ctx->cdata.keylen,
+				  CLASS_1 | KEY_DEST_CLASS_REG);
 	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
+		append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
 	/* if assoclen is ZERO, skip reading the assoc data */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -964,11 +996,10 @@  static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 {
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
 	u32 *key_jump_cmd;
 	u32 *desc;
 
-	if (!ctx->enckeylen || !ctx->authsize)
+	if (!ctx->cdata.keylen || !ctx->authsize)
 		return 0;
 
 	/*
@@ -977,8 +1008,13 @@  static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 	 * must fit into the 64-word Descriptor h/w Buffer
 	 */
 	if (DESC_RFC4106_ENC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	    ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key = (uintptr_t)ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_enc;
 
@@ -987,17 +1023,18 @@  static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 	/* Skip key loading if it is loaded due to sharing */
 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 				   JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	if (ctx->cdata.key_inline)
+		append_key_as_imm(desc, (void *)ctx->cdata.key,
+				  ctx->cdata.keylen, ctx->cdata.keylen,
+				  CLASS_1 | KEY_DEST_CLASS_REG);
 	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
+		append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -1049,10 +1086,14 @@  static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
 	if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	    ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key = (uintptr_t)ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_dec;
 
@@ -1061,17 +1102,18 @@  static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 	/* Skip key loading if it is loaded due to sharing */
 	key_jump_cmd = append_jump(desc, JUMP_JSL |
 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	if (ctx->cdata.key_inline)
+		append_key_as_imm(desc, (void *)ctx->cdata.key,
+				  ctx->cdata.keylen, ctx->cdata.keylen,
+				  CLASS_1 | KEY_DEST_CLASS_REG);
 	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
+		append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
 	append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8);
 	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
@@ -1137,12 +1179,11 @@  static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 {
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
 	u32 *key_jump_cmd;
 	u32 *read_move_cmd, *write_move_cmd;
 	u32 *desc;
 
-	if (!ctx->enckeylen || !ctx->authsize)
+	if (!ctx->cdata.keylen || !ctx->authsize)
 		return 0;
 
 	/*
@@ -1151,8 +1192,13 @@  static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 	 * must fit into the 64-word Descriptor h/w Buffer
 	 */
 	if (DESC_RFC4543_ENC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	    ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key = (uintptr_t)ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_enc;
 
@@ -1161,17 +1207,18 @@  static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 	/* Skip key loading if it is loaded due to sharing */
 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 				   JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	if (ctx->cdata.key_inline)
+		append_key_as_imm(desc, (void *)ctx->cdata.key,
+				  ctx->cdata.keylen, ctx->cdata.keylen,
+				  CLASS_1 | KEY_DEST_CLASS_REG);
 	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
+		append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	/* assoclen + cryptlen = seqinlen */
 	append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1222,10 +1269,14 @@  static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
-	keys_fit_inline = false;
 	if (DESC_RFC4543_DEC_LEN + GCM_DESC_JOB_IO_LEN +
-	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
-		keys_fit_inline = true;
+	    ctx->cdata.keylen <= CAAM_DESC_BYTES_MAX) {
+		ctx->cdata.key_inline = true;
+		ctx->cdata.key = (uintptr_t)ctx->key;
+	} else {
+		ctx->cdata.key_inline = false;
+		ctx->cdata.key = ctx->key_dma;
+	}
 
 	desc = ctx->sh_desc_dec;
 
@@ -1234,17 +1285,18 @@  static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 	/* Skip key loading if it is loaded due to sharing */
 	key_jump_cmd = append_jump(desc, JUMP_JSL |
 				   JUMP_TEST_ALL | JUMP_COND_SHRD);
-	if (keys_fit_inline)
-		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	if (ctx->cdata.key_inline)
+		append_key_as_imm(desc, (void *)ctx->cdata.key,
+				  ctx->cdata.keylen, ctx->cdata.keylen,
+				  CLASS_1 | KEY_DEST_CLASS_REG);
 	else
-		append_key(desc, ctx->key_dma, ctx->enckeylen,
-			   CLASS_1 | KEY_DEST_CLASS_REG);
+		append_key(desc, ctx->cdata.key, ctx->cdata.keylen, CLASS_1 |
+			   KEY_DEST_CLASS_REG);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
 	/* Class 1 operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
 	/* assoclen + cryptlen = seqoutlen */
 	append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ);
@@ -1313,9 +1365,8 @@  static int rfc4543_setauthsize(struct crypto_aead *authenc,
 static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
 			      u32 authkeylen)
 {
-	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
-			       ctx->split_key_pad_len, key_in, authkeylen,
-			       ctx->alg_op);
+	return gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key_in,
+			     authkeylen, ctx->alg_op);
 }
 
 static int aead_setkey(struct crypto_aead *aead,
@@ -1332,11 +1383,11 @@  static int aead_setkey(struct crypto_aead *aead,
 		goto badkey;
 
 	/* Pick class 2 key length from algorithm submask */
-	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
-				      OP_ALG_ALGSEL_SHIFT] * 2;
-	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+	ctx->adata.keylen = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+				     OP_ALG_ALGSEL_SHIFT] * 2;
+	ctx->adata.keylen_pad = ALIGN(ctx->adata.keylen, 16);
 
-	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+	if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
 		goto badkey;
 
 #ifdef DEBUG
@@ -1344,7 +1395,7 @@  static int aead_setkey(struct crypto_aead *aead,
 	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
 	       keys.authkeylen);
 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
-	       ctx->split_key_len, ctx->split_key_pad_len);
+	       ctx->adata.keylen, ctx->adata.keylen_pad);
 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 #endif
@@ -1355,9 +1406,9 @@  static int aead_setkey(struct crypto_aead *aead,
 	}
 
 	/* postpend encryption key to auth split key */
-	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+	memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
 
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
 				      keys.enckeylen, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1366,14 +1417,14 @@  static int aead_setkey(struct crypto_aead *aead,
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
-		       ctx->split_key_pad_len + keys.enckeylen, 1);
+		       ctx->adata.keylen_pad + keys.enckeylen, 1);
 #endif
 
-	ctx->enckeylen = keys.enckeylen;
+	ctx->cdata.keylen = keys.enckeylen;
 
 	ret = aead_set_sh_desc(aead);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
 				 keys.enckeylen, DMA_TO_DEVICE);
 	}
 
@@ -1402,11 +1453,11 @@  static int gcm_setkey(struct crypto_aead *aead,
 		dev_err(jrdev, "unable to map key i/o memory\n");
 		return -ENOMEM;
 	}
-	ctx->enckeylen = keylen;
+	ctx->cdata.keylen = keylen;
 
 	ret = gcm_set_sh_desc(aead);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
 				 DMA_TO_DEVICE);
 	}
 
@@ -1434,9 +1485,9 @@  static int rfc4106_setkey(struct crypto_aead *aead,
 	 * The last four bytes of the key material are used as the salt value
 	 * in the nonce. Update the AES key length.
 	 */
-	ctx->enckeylen = keylen - 4;
+	ctx->cdata.keylen = keylen - 4;
 
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
 				      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1445,7 +1496,7 @@  static int rfc4106_setkey(struct crypto_aead *aead,
 
 	ret = rfc4106_set_sh_desc(aead);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
 				 DMA_TO_DEVICE);
 	}
 
@@ -1473,9 +1524,9 @@  static int rfc4543_setkey(struct crypto_aead *aead,
 	 * The last four bytes of the key material are used as the salt value
 	 * in the nonce. Update the AES key length.
 	 */
-	ctx->enckeylen = keylen - 4;
+	ctx->cdata.keylen = keylen - 4;
 
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
 				      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
@@ -1484,7 +1535,7 @@  static int rfc4543_setkey(struct crypto_aead *aead,
 
 	ret = rfc4543_set_sh_desc(aead);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
 				 DMA_TO_DEVICE);
 	}
 
@@ -1505,7 +1556,7 @@  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	u8 *nonce;
 	u32 geniv;
 	u32 ctx1_iv_off = 0;
-	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
 	const bool is_rfc3686 = (ctr_mode &&
 				 (strstr(alg_name, "rfc3686") != NULL));
@@ -1539,7 +1590,9 @@  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		dev_err(jrdev, "unable to map key i/o memory\n");
 		return -ENOMEM;
 	}
-	ctx->enckeylen = keylen;
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key = (uintptr_t)ctx->key;
+	ctx->cdata.key_inline = true;
 
 	/* ablkcipher_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
@@ -1549,9 +1602,8 @@  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 				   JUMP_COND_SHRD);
 
 	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 |
-			  KEY_DEST_CLASS_REG);
+	append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
+			  ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
 
 	/* Load nonce into CONTEXT1 reg */
 	if (is_rfc3686) {
@@ -1580,8 +1632,8 @@  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 				      LDST_OFFSET_SHIFT));
 
 	/* Load operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	/* Perform operation */
 	ablkcipher_append_src_dst(desc);
@@ -1608,9 +1660,8 @@  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 				   JUMP_COND_SHRD);
 
 	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 |
-			  KEY_DEST_CLASS_REG);
+	append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
+			  ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
 
 	/* Load nonce into CONTEXT1 reg */
 	if (is_rfc3686) {
@@ -1640,10 +1691,10 @@  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 
 	/* Choose operation */
 	if (ctr_mode)
-		append_operation(desc, ctx->class1_alg_type |
+		append_operation(desc, ctx->cdata.algtype |
 				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
 	else
-		append_dec_op1(desc, ctx->class1_alg_type);
+		append_dec_op1(desc, ctx->cdata.algtype);
 
 	/* Perform operation */
 	ablkcipher_append_src_dst(desc);
@@ -1671,9 +1722,8 @@  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 				   JUMP_COND_SHRD);
 
 	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 |
-			  KEY_DEST_CLASS_REG);
+	append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
+			  ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
 
 	/* Load Nonce into CONTEXT1 reg */
 	if (is_rfc3686) {
@@ -1720,8 +1770,8 @@  static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 			    (1 << JUMP_OFFSET_SHIFT));
 
 	/* Load operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
+			 OP_ALG_ENCRYPT);
 
 	/* Perform operation */
 	ablkcipher_append_src_dst(desc);
@@ -1764,7 +1814,9 @@  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		dev_err(jrdev, "unable to map key i/o memory\n");
 		return -ENOMEM;
 	}
-	ctx->enckeylen = keylen;
+	ctx->cdata.keylen = keylen;
+	ctx->cdata.key = (uintptr_t)ctx->key;
+	ctx->cdata.key_inline = true;
 
 	/* xts_ablkcipher_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
@@ -1774,8 +1826,8 @@  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 				   JUMP_COND_SHRD);
 
 	/* Load class1 keys only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
+			  ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
 
 	/* Load sector size with index 40 bytes (0x28) */
 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
@@ -1794,7 +1846,7 @@  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
 	/* Load operation */
-	append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL |
+	append_operation(desc, ctx->cdata.algtype | OP_ALG_AS_INITFINAL |
 			 OP_ALG_ENCRYPT);
 
 	/* Perform operation */
@@ -1821,8 +1873,8 @@  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 				   JUMP_COND_SHRD);
 
 	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	append_key_as_imm(desc, (void *)ctx->cdata.key, ctx->cdata.keylen,
+			  ctx->cdata.keylen, CLASS_1 | KEY_DEST_CLASS_REG);
 
 	/* Load sector size with index 40 bytes (0x28) */
 	append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT |
@@ -1841,7 +1893,7 @@  static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 	append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP);
 
 	/* Load operation */
-	append_dec_op1(desc, ctx->class1_alg_type);
+	append_dec_op1(desc, ctx->cdata.algtype);
 
 	/* Perform operation */
 	ablkcipher_append_src_dst(desc);
@@ -2141,7 +2193,7 @@  static void init_gcm_job(struct aead_request *req,
 			 FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
 	/* Append Salt */
 	if (!generic_gcm)
-		append_data(desc, ctx->key + ctx->enckeylen, 4);
+		append_data(desc, ctx->key + ctx->cdata.keylen, 4);
 	/* Append IV */
 	append_data(desc, req->iv, ivsize);
 	/* End of blank commands */
@@ -2156,7 +2208,7 @@  static void init_authenc_job(struct aead_request *req,
 						 struct caam_aead_alg, aead);
 	unsigned int ivsize = crypto_aead_ivsize(aead);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
-	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+	const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
 			       OP_ALG_AAI_CTR_MOD128);
 	const bool is_rfc3686 = alg->caam.rfc3686;
 	u32 *desc = edesc->hw_desc;
@@ -4395,8 +4447,8 @@  static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
 	}
 
 	/* copy descriptor header template value */
-	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
-	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+	ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam->alg_op;
 
 	return 0;
@@ -4440,7 +4492,7 @@  static void caam_exit_common(struct caam_ctx *ctx)
 	if (ctx->key_dma &&
 	    !dma_mapping_error(ctx->jrdev, ctx->key_dma))
 		dma_unmap_single(ctx->jrdev, ctx->key_dma,
-				 ctx->enckeylen + ctx->split_key_pad_len,
+				 ctx->cdata.keylen + ctx->adata.keylen_pad,
 				 DMA_TO_DEVICE);
 
 	caam_jr_free(ctx->jrdev);
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 86f360853502..5e569ff06b4b 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -108,13 +108,11 @@  struct caam_hash_ctx {
 	dma_addr_t sh_desc_fin_dma;
 	dma_addr_t sh_desc_digest_dma;
 	struct device *jrdev;
-	u32 alg_type;
 	u32 alg_op;
 	u8 key[CAAM_MAX_HASH_KEY_SIZE];
 	dma_addr_t key_dma;
 	int ctx_len;
-	unsigned int split_key_len;
-	unsigned int split_key_pad_len;
+	struct alginfo adata;
 };
 
 /* ahash state */
@@ -223,9 +221,9 @@  static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
 /* Common shared descriptor commands */
 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
 {
-	append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
-			  ctx->split_key_len, CLASS_2 |
-			  KEY_DEST_MDHA_SPLIT | KEY_ENC);
+	append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
+			  ctx->adata.keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
+			  KEY_ENC);
 }
 
 /* Append key if it has been set */
@@ -235,7 +233,7 @@  static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx)
 
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
 
-	if (ctx->split_key_len) {
+	if (ctx->adata.keylen) {
 		/* Skip if already shared */
 		key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 					   JUMP_COND_SHRD);
@@ -310,7 +308,7 @@  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 	u32 have_key = 0;
 	u32 *desc;
 
-	if (ctx->split_key_len)
+	if (ctx->adata.keylen)
 		have_key = OP_ALG_AAI_HMAC_PRECOMP;
 
 	/* ahash_update shared descriptor */
@@ -323,7 +321,7 @@  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 		   LDST_CLASS_2_CCB | ctx->ctx_len);
 
 	/* Class 2 operation */
-	append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE |
+	append_operation(desc, ctx->adata.algtype | OP_ALG_AS_UPDATE |
 			 OP_ALG_ENCRYPT);
 
 	/* Load data and write to result or context */
@@ -344,7 +342,7 @@  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 	/* ahash_update_first shared descriptor */
 	desc = ctx->sh_desc_update_first;
 
-	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT,
+	ahash_data_to_out(desc, have_key | ctx->adata.algtype, OP_ALG_AS_INIT,
 			  ctx->ctx_len, ctx);
 
 	ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
@@ -363,7 +361,7 @@  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 	/* ahash_final shared descriptor */
 	desc = ctx->sh_desc_fin;
 
-	ahash_ctx_data_to_out(desc, have_key | ctx->alg_type,
+	ahash_ctx_data_to_out(desc, have_key | ctx->adata.algtype,
 			      OP_ALG_AS_FINALIZE, digestsize, ctx);
 
 	ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
@@ -381,8 +379,8 @@  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 	/* ahash_digest shared descriptor */
 	desc = ctx->sh_desc_digest;
 
-	ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL,
-			  digestsize, ctx);
+	ahash_data_to_out(desc, have_key | ctx->adata.algtype,
+			  OP_ALG_AS_INITFINAL, digestsize, ctx);
 
 	ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
 						 desc_bytes(desc),
@@ -404,9 +402,8 @@  static int ahash_set_sh_desc(struct crypto_ahash *ahash)
 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 			      u32 keylen)
 {
-	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
-			       ctx->split_key_pad_len, key_in, keylen,
-			       ctx->alg_op);
+	return gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key_in, keylen,
+			     ctx->alg_op);
 }
 
 /* Digest hash size if it is too large */
@@ -444,7 +441,7 @@  static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
 	}
 
 	/* Job descriptor to perform unkeyed hash on key_in */
-	append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT |
+	append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
 			 OP_ALG_AS_INITFINAL);
 	append_seq_in_ptr(desc, src_dma, *keylen, 0);
 	append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
@@ -515,13 +512,13 @@  static int ahash_setkey(struct crypto_ahash *ahash,
 	}
 
 	/* Pick class 2 key length from algorithm submask */
-	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
-				      OP_ALG_ALGSEL_SHIFT] * 2;
-	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+	ctx->adata.keylen = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+				     OP_ALG_ALGSEL_SHIFT] * 2;
+	ctx->adata.keylen_pad = ALIGN(ctx->adata.keylen, 16);
 
 #ifdef DEBUG
 	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
-	       ctx->split_key_len, ctx->split_key_pad_len);
+	       ctx->adata.keylen, ctx->adata.keylen_pad);
 	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
 #endif
@@ -530,7 +527,7 @@  static int ahash_setkey(struct crypto_ahash *ahash,
 	if (ret)
 		goto bad_free_key;
 
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad,
 				      DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, ctx->key_dma)) {
 		dev_err(jrdev, "unable to map key i/o memory\n");
@@ -540,14 +537,15 @@  static int ahash_setkey(struct crypto_ahash *ahash,
 #ifdef DEBUG
 	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
-		       ctx->split_key_pad_len, 1);
+		       ctx->adata.keylen_pad, 1);
 #endif
 
 	ret = ahash_set_sh_desc(ahash);
 	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len,
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad,
 				 DMA_TO_DEVICE);
 	}
+
  error_free_key:
 	kfree(hashed_key);
 	return ret;
@@ -1832,7 +1830,7 @@  static int caam_hash_cra_init(struct crypto_tfm *tfm)
 		return PTR_ERR(ctx->jrdev);
 	}
 	/* copy descriptor header template value */
-	ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+	ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
 	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op;
 
 	ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 354da735af62..bfef7952dfb7 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -430,3 +430,22 @@  do { \
 	APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
 	APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ *           functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @keylen_pad: padded length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_inline
+ *       is true, dma (bus) address if key_inline is false.
+ * @key_inline: true - key can be inlined in the descriptor; false - key is
+ *              referenced by the descriptor
+ */
+struct alginfo {
+	u32 algtype;
+	unsigned int keylen;
+	unsigned int keylen_pad;
+	u64 key;
+	bool key_inline;
+};
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4ff9762..df287e751df1 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -41,8 +41,8 @@  Split key generation-----------------------------------------------
 [06] 0x64260028    fifostr: class2 mdsplit-jdk len=40
 			@0xffe04000
 */
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
-		  int split_key_pad_len, const u8 *key_in, u32 keylen,
+int gen_split_key(struct device *jrdev, u8 *key_out,
+		  struct alginfo * const adata, const u8 *key_in, u32 keylen,
 		  u32 alg_op)
 {
 	u32 *desc;
@@ -63,7 +63,7 @@  int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 		goto out_free;
 	}
 
-	dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
+	dma_addr_out = dma_map_single(jrdev, key_out, adata->keylen_pad,
 				      DMA_FROM_DEVICE);
 	if (dma_mapping_error(jrdev, dma_addr_out)) {
 		dev_err(jrdev, "unable to map key output memory\n");
@@ -87,7 +87,7 @@  int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 	 * FIFO_STORE with the explicit split-key content store
 	 * (0x26 output type)
 	 */
-	append_fifo_store(desc, dma_addr_out, split_key_len,
+	append_fifo_store(desc, dma_addr_out, adata->keylen,
 			  LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
 
 #ifdef DEBUG
@@ -108,11 +108,11 @@  int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 #ifdef DEBUG
 		print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
 			       DUMP_PREFIX_ADDRESS, 16, 4, key_out,
-			       split_key_pad_len, 1);
+			       adata->keylen_pad, 1);
 #endif
 	}
 
-	dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
+	dma_unmap_single(jrdev, dma_addr_out, adata->keylen_pad,
 			 DMA_FROM_DEVICE);
 out_unmap_in:
 	dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
diff --git a/drivers/crypto/caam/key_gen.h b/drivers/crypto/caam/key_gen.h
index c5588f6d8109..511882af0596 100644
--- a/drivers/crypto/caam/key_gen.h
+++ b/drivers/crypto/caam/key_gen.h
@@ -12,6 +12,6 @@  struct split_key_result {
 
 void split_key_done(struct device *dev, u32 *desc, u32 err, void *context);
 
-int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
-		    int split_key_pad_len, const u8 *key_in, u32 keylen,
-		    u32 alg_op);
+int gen_split_key(struct device *jrdev, u8 *key_out,
+		  struct alginfo * const adata, const u8 *key_in, u32 keylen,
+		  u32 alg_op);