@@ -1,6 +1,7 @@
/*
* QEMU Crypto af_alg-backend hash/hmac support
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (c) 2017 HUAWEI TECHNOLOGIES CO., LTD.
*
* Authors:
@@ -113,6 +114,127 @@ qcrypto_afalg_hmac_ctx_new(QCryptoHashAlgorithm alg,
return qcrypto_afalg_hash_hmac_ctx_new(alg, key, nkey, true, errp);
}
+static
+QCryptoHash *qcrypto_afalg_hash_new(QCryptoHashAlgorithm alg, Error **errp)
+{
+ /* Check if hash algorithm is supported */
+ char *alg_name = qcrypto_afalg_hash_format_name(alg, false, NULL);
+ QCryptoHash *hash = NULL;
+
+ if (alg_name == NULL) {
+ error_setg(errp,
+ "Unknown hash algorithm %d",
+ alg);
+ } else {
+ hash = g_new(QCryptoHash, 1);
+ hash->alg = alg;
+ hash->opaque = qcrypto_afalg_hash_ctx_new(alg, errp);
+ }
+
+ return hash;
+}
+
+static
+void qcrypto_afalg_hash_free(QCryptoHash *hash)
+{
+ QCryptoAFAlg *ctx = hash->opaque;
+
+ if (ctx) {
+ qcrypto_afalg_comm_free(ctx);
+ }
+
+ g_free(hash);
+}
+
+/**
+ * Send data to the kernel's crypto core.
+ *
+ * The more_data parameter is used to notify the crypto engine
+ * that this is an "update" operation, and that more data will
+ * be provided to calculate the final hash.
+ */
+static
+int qcrypto_afalg_send_to_kernel(QCryptoAFAlg *afalg,
+ const struct iovec *iov,
+ size_t niov,
+ bool more_data,
+ Error **errp)
+{
+ int ret = 0;
+ int flags = (more_data ? MSG_MORE : 0);
+
+ /* send data to kernel's crypto core */
+ ret = iov_send_recv_with_flags(afalg->opfd, flags, iov, niov,
+ 0, iov_size(iov, niov), true);
+ if (ret < 0) {
+ error_setg_errno(errp, errno, "Send data to afalg-core failed");
+ ret = -1;
+ } else {
+ /* No error, so return 0 */
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static
+int qcrypto_afalg_recv_from_kernel(QCryptoAFAlg *afalg,
+ QCryptoHashAlgorithm alg,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp)
+{
+ struct iovec outv;
+ int ret = 0;
+ const int expected_len = qcrypto_hash_digest_len(alg);
+
+ if (*result_len == 0) {
+ *result_len = expected_len;
+ *result = g_new0(uint8_t, *result_len);
+ } else if (*result_len != expected_len) {
+ error_setg(errp,
+ "Result buffer size %zu is not match hash %d",
+ *result_len, expected_len);
+ ret = -1;
+ }
+
+ if (ret == 0) {
+ /* hash && get result */
+ outv.iov_base = *result;
+ outv.iov_len = *result_len;
+ ret = iov_send_recv(afalg->opfd, &outv, 1,
+ 0, iov_size(&outv, 1), false);
+ if (ret < 0) {
+ error_setg_errno(errp, errno, "Recv result from afalg-core failed");
+ ret = -1;
+ } else {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static
+int qcrypto_afalg_hash_update(QCryptoHash *hash,
+ const struct iovec *iov,
+ size_t niov,
+ Error **errp)
+{
+ return qcrypto_afalg_send_to_kernel((QCryptoAFAlg *) hash->opaque,
+ iov, niov, true, errp);
+}
+
+static
+int qcrypto_afalg_hash_finalize(QCryptoHash *hash,
+ uint8_t **result,
+ size_t *result_len,
+ Error **errp)
+{
+ return qcrypto_afalg_recv_from_kernel((QCryptoAFAlg *) hash->opaque,
+ hash->alg, result, result_len, errp);
+}
+
static int
qcrypto_afalg_hash_hmac_bytesv(QCryptoAFAlg *hmac,
QCryptoHashAlgorithm alg,
@@ -205,6 +327,10 @@ static void qcrypto_afalg_hmac_ctx_free(QCryptoHmac *hmac)
QCryptoHashDriver qcrypto_hash_afalg_driver = {
.hash_bytesv = qcrypto_afalg_hash_bytesv,
+ .hash_new = qcrypto_afalg_hash_new,
+ .hash_free = qcrypto_afalg_hash_free,
+ .hash_update = qcrypto_afalg_hash_update,
+ .hash_finalize = qcrypto_afalg_hash_finalize
};
QCryptoHmacDriver qcrypto_hmac_afalg_driver = {
@@ -1,6 +1,7 @@
/*
* Helpers for using (partial) iovecs.
*
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
* Copyright (C) 2010 Red Hat, Inc.
*
* Author(s):
@@ -75,6 +76,31 @@ iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt,
size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
size_t offset, int fillc, size_t bytes);
+/*
+ * Send/recv data from/to iovec buffers directly, with the provided
+ * socket flags.
+ *
+ * `offset' bytes in the beginning of iovec buffer are skipped and
+ * next `bytes' bytes are used, which must be within data of iovec.
+ *
+ * r = iov_send_recv_with_flags(sockfd, sockflags, iov, iovcnt, offset, bytes, true);
+ *
+ * is logically equivalent to
+ *
+ * char *buf = malloc(bytes);
+ * iov_to_buf(iov, iovcnt, offset, buf, bytes);
+ * r = send(sockfd, buf, bytes, sockflags);
+ * free(buf);
+ *
+ * For iov_send_recv_with_flags() _whole_ area being sent or received
+ * should be within the iovec, not only beginning of it.
+ */
+ssize_t iov_send_recv_with_flags(int sockfd, int sockflags,
+ const struct iovec *iov,
+ unsigned iov_cnt, size_t offset,
+ size_t bytes,
+ bool do_send);
+
/*
* Send/recv data from/to iovec buffers directly
*
@@ -3,6 +3,7 @@
*
* Copyright IBM, Corp. 2007, 2008
* Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (c) 2024 Seagate Technology LLC and/or its Affiliates
*
* Author(s):
* Anthony Liguori <aliguori@us.ibm.com>
@@ -92,7 +93,7 @@ size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt)
/* helper function for iov_send_recv() */
static ssize_t
-do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
+do_send_recv(int sockfd, int flags, struct iovec *iov, unsigned iov_cnt, bool do_send)
{
#ifdef CONFIG_POSIX
ssize_t ret;
@@ -102,8 +103,8 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
msg.msg_iovlen = iov_cnt;
do {
ret = do_send
- ? sendmsg(sockfd, &msg, 0)
- : recvmsg(sockfd, &msg, 0);
+ ? sendmsg(sockfd, &msg, flags)
+ : recvmsg(sockfd, &msg, flags);
} while (ret < 0 && errno == EINTR);
return ret;
#else
@@ -114,8 +115,8 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
ssize_t off = 0;
while (i < iov_cnt) {
ssize_t r = do_send
- ? send(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0)
- : recv(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, 0);
+ ? send(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, flags)
+ : recv(sockfd, iov[i].iov_base + off, iov[i].iov_len - off, flags);
if (r > 0) {
ret += r;
off += r;
@@ -144,6 +145,13 @@ do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
ssize_t iov_send_recv(int sockfd, const struct iovec *_iov, unsigned iov_cnt,
size_t offset, size_t bytes,
bool do_send)
+{
+ return iov_send_recv_with_flags(sockfd, 0, _iov, iov_cnt, offset, bytes, do_send);
+}
+
+ssize_t iov_send_recv_with_flags(int sockfd, int sockflags, const struct iovec *_iov,
+ unsigned iov_cnt, size_t offset, size_t bytes,
+ bool do_send)
{
ssize_t total = 0;
ssize_t ret;
@@ -192,11 +200,11 @@ ssize_t iov_send_recv(int sockfd, const struct iovec *_iov, unsigned iov_cnt,
assert(iov[niov].iov_len > tail);
orig_len = iov[niov].iov_len;
iov[niov++].iov_len = tail;
- ret = do_send_recv(sockfd, iov, niov, do_send);
+ ret = do_send_recv(sockfd, sockflags, iov, niov, do_send);
/* Undo the changes above before checking for errors */
iov[niov-1].iov_len = orig_len;
} else {
- ret = do_send_recv(sockfd, iov, niov, do_send);
+ ret = do_send_recv(sockfd, sockflags, iov, niov, do_send);
}
if (offset) {
iov[0].iov_base -= offset;
Updates the afalg hash driver to support the new accumulative hashing changes as part of the patch series. Implements opening/closing of contexts, updating hash data and finalizing the hash digest. In order to support the update function, a flag needs to be passed to the kernel via the socket send call (MSG_MORE) to notify it that more data is to be expected to calculate the hash correctly. As a result, a new function was added to the iov helper utils to allow passing a flag to the socket send call. Signed-off-by: Alejandro Zeise <alejandro.zeise@seagate.com> --- crypto/hash-afalg.c | 126 ++++++++++++++++++++++++++++++++++++++++++++ include/qemu/iov.h | 26 +++++++++ util/iov.c | 22 +++++--- 3 files changed, 167 insertions(+), 7 deletions(-)