diff mbox series

[09/15] crypto: ux500/hash: Get rid of state from request context

Message ID 20220721134050.1047866-10-linus.walleij@linaro.org (mailing list archive)
State Superseded
Delegated to: Herbert Xu
Headers show
Series Ux500 hash cleanup | expand

Commit Message

Linus Walleij July 21, 2022, 1:40 p.m. UTC
The request context is exactly for that: context state related
to the request. The code was (ab)using the state used to store
the hardware state for this. Move out the three variables from
the hardware state to the request context and clean up the
mess left behind.

Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
---
 drivers/crypto/ux500/hash/hash_alg.h  | 21 +++++++--------
 drivers/crypto/ux500/hash/hash_core.c | 38 +++++++++++----------------
 2 files changed, 26 insertions(+), 33 deletions(-)
diff mbox series

Patch

diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
index d9d59dba6e6e..5aa86c4855f5 100644
--- a/drivers/crypto/ux500/hash/hash_alg.h
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -214,17 +214,10 @@  struct hash_register {
  * @csr[52]:	HASH Context Swap Registers 0-39.
  * @csfull:	HASH Context Swap Registers 40 ie Status flags.
  * @csdatain:	HASH Context Swap Registers 41 ie Input data.
- * @buffer:	Working buffer for messages going to the hardware.
- * @length:	Length of the part of message hashed so far (floor(N/64) * 64).
- * @index:	Valid number of bytes in buffer (N % 64).
  *
  * This structure is used between context switches, i.e. when ongoing jobs are
  * interupted with new jobs. When this happens we need to store intermediate
  * results in software.
- *
- * WARNING: "index" is the  member of the structure, to be sure  that "buffer"
- * is aligned on a 4-bytes boundary. This is highly implementation dependent
- * and MUST be checked whenever this code is ported on new platforms.
  */
 struct hash_state {
 	u32		temp_cr;
@@ -233,9 +226,6 @@  struct hash_state {
 	u32		csr[52];
 	u32		csfull;
 	u32		csdatain;
-	u32		buffer[HASH_BLOCK_SIZE / sizeof(u32)];
-	struct uint64	length;
-	u8		index;
 };
 
 /**
@@ -333,13 +323,22 @@  struct hash_ctx {
 
 /**
  * struct hash_ctx - The request context used for hash calculations.
+ * @buffer:	Working buffer for messages going to the hardware.
+ * @length:	Length of the part of message hashed so far (floor(N/64) * 64).
+ * @index:	Valid number of bytes in buffer (N % 64).
  * @state:	The state of the current calculations.
  * @dma_mode:	Used in special cases (workaround), e.g. need to change to
  *		cpu mode, if not supported/working in dma mode.
  * @hw_initialized: Indicates if hardware is initialized for new operations.
+ *
+ * WARNING: "index" is the  member of the structure, to be sure  that "buffer"
+ * is aligned on a 4-bytes boundary. This is highly implementation dependent
+ * and MUST be checked whenever this code is ported on new platforms.
  */
 struct hash_req_ctx {
-	struct hash_state	state;
+	u32			buffer[HASH_BLOCK_SIZE / sizeof(u32)];
+	struct uint64		length;
+	u8			index;
 	bool			dma_mode;
 	bool			hw_initialized;
 };
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index c2e8bd977f57..46dad128b6fe 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -448,7 +448,9 @@  static int ux500_hash_init(struct ahash_request *req)
 	if (!ctx->key)
 		ctx->keylen = 0;
 
-	memset(&req_ctx->state, 0, sizeof(struct hash_state));
+	req_ctx->index = 0;
+	req_ctx->length.low_word = 0;
+	req_ctx->length.high_word = 0;
 	req_ctx->hw_initialized = false;
 	if (hash_mode == HASH_MODE_DMA) {
 		if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
@@ -553,11 +555,11 @@  static void hash_messagepad(struct hash_device_data *device_data,
  */
 static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
 {
-	ctx->state.length.low_word += incr;
+	ctx->length.low_word += incr;
 
 	/* Check for wrap-around */
-	if (ctx->state.length.low_word < incr)
-		ctx->state.length.high_word++;
+	if (ctx->length.low_word < incr)
+		ctx->length.high_word++;
 }
 
 /**
@@ -872,9 +874,9 @@  static int hash_hw_final(struct ahash_request *req)
 		}
 	}
 
-	if (req_ctx->state.index) {
-		hash_messagepad(device_data, req_ctx->state.buffer,
-				req_ctx->state.index);
+	if (req_ctx->index) {
+		hash_messagepad(device_data, req_ctx->buffer,
+				req_ctx->index);
 	} else {
 		HASH_SET_DCAL;
 		while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
@@ -922,8 +924,8 @@  int hash_hw_update(struct ahash_request *req)
 	struct crypto_hash_walk walk;
 	int msg_length;
 
-	index = req_ctx->state.index;
-	buffer = (u8 *)req_ctx->state.buffer;
+	index = req_ctx->index;
+	buffer = (u8 *)req_ctx->buffer;
 
 	msg_length = crypto_hash_walk_first(req, &walk);
 
@@ -931,10 +933,10 @@  int hash_hw_update(struct ahash_request *req)
 	if (msg_length == 0)
 		return 0;
 
-	/* Check if ctx->state.length + msg_length
+	/* Check if ctx->length + msg_length
 	   overflows */
-	if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
-	    HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
+	if (msg_length > (req_ctx->length.low_word + msg_length) &&
+	    req_ctx->length.high_word == HASH_HIGH_WORD_VAL_MAX) {
 		pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
 		return crypto_hash_walk_done(&walk, -EPERM);
 	}
@@ -955,9 +957,9 @@  int hash_hw_update(struct ahash_request *req)
 		msg_length = crypto_hash_walk_done(&walk, 0);
 	}
 
-	req_ctx->state.index = index;
+	req_ctx->index = index;
 	dev_dbg(device_data->dev, "%s: indata length=%d\n",
-		__func__, req_ctx->state.index);
+		__func__, req_ctx->index);
 
 	return 0;
 }
@@ -980,14 +982,6 @@  int hash_resume_state(struct hash_device_data *device_data,
 		return -EPERM;
 	}
 
-	/* Check correctness of index and length members */
-	if (device_state->index > HASH_BLOCK_SIZE ||
-	    (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
-		dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
-			__func__);
-		return -EPERM;
-	}
-
 	/*
 	 * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
 	 * prepare the initialize the HASH accelerator to compute the message