@@ -23,11 +23,6 @@ static inline u32 qce_read(struct qce_device *qce, u32 offset)
return readl(qce->base + offset);
}
-static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
-{
- writel(val, qce->base + offset);
-}
-
static inline void qce_write_array(struct qce_device *qce, u32 offset,
const u32 *val, unsigned int len)
{
@@ -157,11 +152,13 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
__be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
u32 auth_cfg = 0, config;
unsigned int iv_words;
+ int ret;
/* if not the last, the size has to be on the block boundary */
if (!rctx->last_blk && req->nbytes % blocksize)
return -EINVAL;
+ qce_clear_bam_transaction(qce);
qce_setup_config(qce);
if (IS_CMAC(rctx->flags)) {
@@ -225,7 +222,7 @@ static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
qce_crypto_go(qce, true);
- return 0;
+ return qce_submit_cmd_desc(qce, 0);
}
#endif
@@ -325,7 +322,9 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
u32 encr_cfg = 0, auth_cfg = 0, config;
unsigned int ivsize = rctx->ivsize;
unsigned long flags = rctx->flags;
+ int ret;
+ qce_clear_bam_transaction(qce);
qce_setup_config(qce);
if (IS_XTS(flags))
@@ -388,7 +387,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
qce_crypto_go(qce, true);
- return 0;
+ return qce_submit_cmd_desc(qce, 0);
}
#endif
@@ -438,7 +437,9 @@ static int qce_setup_regs_aead(struct crypto_async_request *async_req)
unsigned long flags = rctx->flags;
u32 encr_cfg, auth_cfg, config, totallen;
u32 iv_last_word;
+ int ret;
+ qce_clear_bam_transaction(qce);
qce_setup_config(qce);
/* Write encryption key */
@@ -537,7 +538,7 @@ static int qce_setup_regs_aead(struct crypto_async_request *async_req)
/* Start the process */
qce_crypto_go(qce, !IS_CCM(flags));
- return 0;
+ return qce_submit_cmd_desc(qce, 0);
}
#endif
@@ -100,5 +100,6 @@ void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
int qce_check_status(struct qce_device *qce, u32 *status);
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
int qce_start(struct crypto_async_request *async_req, u32 type);
+void qce_write(struct qce_device *qce, unsigned int offset, u32 val);
#endif /* _COMMON_H_ */
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <crypto/scatterwalk.h>
+#include "common.h"
#include "core.h"
#include "dma.h"
@@ -106,9 +107,9 @@ int qce_submit_cmd_desc(struct qce_device *qce, unsigned long flags)
return ret;
}
-static __maybe_unused void
-qce_prep_dma_command_desc(struct qce_device *qce, struct qce_dma_data *dma,
- unsigned int addr, void *buff)
+static void qce_prep_dma_command_desc(struct qce_device *qce,
+ struct qce_dma_data *dma,
+ unsigned int addr, void *buff)
{
struct qce_bam_transaction *qce_bam_txn = dma->qce_bam_txn;
struct bam_cmd_element *qce_bam_ce_buffer;
@@ -134,6 +135,12 @@ qce_prep_dma_command_desc(struct qce_device *qce, struct qce_dma_data *dma,
qce_bam_txn->qce_pre_bam_ce_index = qce_bam_txn->qce_bam_ce_index;
}
+void qce_write(struct qce_device *qce, unsigned int offset, u32 val)
+{
+ qce_prep_dma_command_desc(qce, &qce->dma, (qce->base_dma + offset),
+ &val);
+}
+
static void qce_dma_release(void *data)
{
struct qce_dma_data *dma = data;