@@ -261,9 +261,447 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
}
+static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
+ VirtIOCryptoReq *req)
+{
+ req->vcrypto = vcrypto;
+ req->vq = vq;
+ req->idata_hva = NULL;
+}
+
+static void virtio_crypto_free_request(VirtIOCryptoReq *req)
+{
+ if (req) {
+ if (req->flags == QCRYPTO_CRYPTODEV_BACKEND_ALG_SYM) {
+ g_free(req->u.sym_op_info);
+ }
+ g_free(req);
+ }
+}
+
+static void
+virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
+ void *idata_hva,
+ uint32_t status,
+ QCryptoCryptoDevBackendSymOpInfo *sym_op_info);
+
+static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint32_t status)
+{
+ VirtIOCrypto *vcrypto = req->vcrypto;
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+
+ if (req->flags == QCRYPTO_CRYPTODEV_BACKEND_ALG_SYM) {
+ virtio_crypto_sym_input_data_helper(vdev, req->idata_hva, status,
+ req->u.sym_op_info);
+ }
+
+ virtqueue_push(req->vq, &req->elem,
+ sizeof(struct virtio_crypto_op_data_req));
+ virtio_notify(vdev, req->vq);
+}
+
+static VirtIOCryptoReq *
+virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
+{
+ VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
+
+ if (req) {
+ virtio_crypto_init_request(s, vq, req);
+ }
+ return req;
+}
+
+static void virtio_crypto_map_iovec(unsigned int *p_num_sg, hwaddr *addr,
+ struct iovec *iov,
+ unsigned int max_num_sg,
+ hwaddr pa, size_t sz,
+ bool is_write)
+{
+ unsigned num_sg = *p_num_sg;
+ assert(num_sg <= max_num_sg);
+
+ if (!sz) {
+ error_report("virtio-crypto: zero sized buffers are not allowed");
+ exit(1);
+ }
+
+ while (sz) {
+ hwaddr len = sz;
+
+ if (num_sg == max_num_sg) {
+ error_report("virtio-crypto: too many entries "
+ "in the scatter gather list");
+ exit(1);
+ }
+
+ iov[num_sg].iov_base = cpu_physical_memory_map(pa, &len, is_write);
+ iov[num_sg].iov_len = len;
+ addr[num_sg] = pa;
+
+ sz -= len;
+ pa += len;
+ num_sg++;
+ }
+ *p_num_sg = num_sg;
+}
+
+static void virtio_crypto_unmap_iovec(VirtIOCryptoBuffer *buf,
+ unsigned int len,
+ bool is_write)
+{
+ unsigned int offset;
+ int i;
+
+ if (is_write) {
+ offset = 0;
+ for (i = 0; i < buf->num; i++) {
+ size_t size = MIN(len - offset, buf->sg[i].iov_len);
+
+ cpu_physical_memory_unmap(buf->sg[i].iov_base,
+ buf->sg[i].iov_len,
+ 1, size);
+
+ offset += size;
+ }
+ } else {
+ for (i = 0; i < buf->num; i++) {
+ cpu_physical_memory_unmap(buf->sg[i].iov_base,
+ buf->sg[i].iov_len,
+ 0, buf->sg[i].iov_len);
+ }
+ }
+}
+
+static void *virtio_crypto_read_next_iovec(VirtIODevice *vdev,
+ struct virtio_crypto_iovec *iovec,
+ bool is_write,
+ struct iovec *iov,
+ unsigned int *num)
+{
+ struct virtio_crypto_iovec *iovec_hva;
+ hwaddr pa;
+ hwaddr len;
+
+ /* If this descriptor says it doesn't chain, we're done. */
+ if (!(iovec->flags & VIRTIO_CRYPTO_IOVEC_F_NEXT)) {
+ return NULL;
+ }
+
+ pa = iovec->next_iovec;
+ len = sizeof(*iovec_hva);
+ iovec_hva = cpu_physical_memory_map(pa, &len, is_write);
+ assert(len == sizeof(*iovec_hva));
+
+ iov[*num].iov_base = iovec_hva;
+ iov[*num].iov_len = len;
+ (*num)++;
+
+ return iovec_hva;
+}
+
+static void *virtio_crypto_alloc_buf(unsigned num)
+{
+ VirtIOCryptoBuffer *buf;
+ size_t addr_ofs = QEMU_ALIGN_UP(sizeof(*buf), __alignof__(buf->addr[0]));
+ size_t addr_end = addr_ofs + num * sizeof(buf->addr[0]);
+ size_t sg_ofs = QEMU_ALIGN_UP(addr_end, __alignof__(buf->sg[0]));
+ size_t sg_end = sg_ofs + num * sizeof(buf->sg[0]);
+
+ buf = g_malloc(sg_end);
+ buf->num = num;
+
+ buf->addr = (void *)buf + addr_ofs;
+ buf->sg = (void *)buf + sg_ofs;
+ return buf;
+}
+
+static void *virtio_crypto_iovec_read(VirtIODevice *vdev,
+ struct virtio_crypto_iovec *iovec,
+ bool is_write)
+{
+
+ VirtIOCryptoBuffer *buf;
+ hwaddr addr[VIRTIO_CRYPTO_SG_MAX];
+ struct iovec iov[VIRTIO_CRYPTO_SG_MAX];
+ unsigned int num = 0;
+ /* Save virtio_crypto_iov structure's hva information in sg_list */
+ struct iovec vc_iov[VIRTIO_CRYPTO_SG_MAX];
+ unsigned int vc_num = 0;
+ unsigned int i;
+
+ struct virtio_crypto_iovec *p_iovec = iovec;
+
+ /* Collect all the sgs */
+ do {
+ virtio_crypto_map_iovec(&num, addr, iov,
+ VIRTIO_CRYPTO_SG_MAX,
+ p_iovec->addr, p_iovec->len,
+ is_write);
+ } while ((p_iovec = virtio_crypto_read_next_iovec(vdev,
+ p_iovec, false, vc_iov, &vc_num))
+ != NULL);
+
+ /* Now copy what we have collected and mapped */
+ buf = virtio_crypto_alloc_buf(num);
+ for (i = 0; i < num; i++) {
+ buf->addr[i] = addr[i];
+ buf->sg[i] = iov[i];
+ }
+ /* Unmap all virtio_crypto_iov structure if exists */
+ for (i = 0; i < vc_num; i++) {
+ cpu_physical_memory_unmap(vc_iov[i].iov_base,
+ vc_iov[i].iov_len,
+ false, vc_iov[i].iov_len);
+ }
+
+ return buf;
+}
+
+static QCryptoCryptoDevBackendSymOpInfo *
+virtio_crypto_cipher_op_helper(VirtIODevice *vdev,
+ struct virtio_crypto_cipher_para *para,
+ struct virtio_crypto_cipher_output *out,
+ struct virtio_crypto_iovec *add_data)
+{
+ QCryptoCryptoDevBackendSymOpInfo *op_info;
+ uint32_t src_len, dst_len;
+ uint32_t iv_len;
+ size_t max_len, curr_size = 0;
+ hwaddr iv_gpa;
+ void *iv_hva;
+ hwaddr len;
+ uint32_t aad_len = 0;
+ VirtIOCryptoBuffer *buf;
+ size_t s;
+
+ iv_len = para->iv_len;
+ src_len = para->src_data_len;
+ dst_len = para->dst_data_len;
+
+ if (add_data) {
+ aad_len = add_data->len;
+ }
+
+ max_len = iv_len + aad_len + src_len + dst_len;
+ op_info = g_malloc0(sizeof(QCryptoCryptoDevBackendSymOpInfo) + max_len);
+ op_info->iv_len = iv_len;
+ op_info->src_len = src_len;
+ op_info->dst_len = dst_len;
+ op_info->aad_len = aad_len;
+ /* handle the initilization vector */
+ if (op_info->iv_len > 0) {
+ len = op_info->iv_len;
+ DPRINTF("iv_len=%" PRIu32 "\n", len);
+ op_info->iv = op_info->data + curr_size;
+
+ iv_gpa = out->iv_addr;
+ iv_hva = cpu_physical_memory_map(iv_gpa, &len, false);
+ memcpy(op_info->iv, iv_hva, len);
+ cpu_physical_memory_unmap(iv_hva, len, false, len);
+ curr_size += len;
+ }
+
+ /* handle additional authentication data if exist */
+ if (op_info->aad_len > 0) {
+ DPRINTF("aad_len=%" PRIu32 "\n", len);
+ op_info->aad_data = op_info->data + curr_size;
+
+ buf = virtio_crypto_iovec_read(vdev, add_data, false);
+ s = iov_to_buf(buf->sg, buf->num, 0, op_info->aad_data,
+ op_info->aad_len);
+ assert(s == op_info->aad_len);
+
+ virtio_crypto_unmap_iovec(buf, op_info->aad_len, false);
+ g_free(buf);
+ curr_size += op_info->aad_len;
+ }
+
+ /* handle the source data */
+ if (op_info->src_len > 0) {
+ DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
+ op_info->src = op_info->data + curr_size;
+
+ buf = virtio_crypto_iovec_read(vdev, &out->src_data, false);
+ s = iov_to_buf(buf->sg, buf->num, 0, op_info->src, op_info->src_len);
+ assert(s == op_info->src_len);
+
+ virtio_crypto_unmap_iovec(buf, op_info->src_len, false);
+ g_free(buf);
+
+ curr_size += op_info->src_len;
+ }
+ op_info->dst = op_info->data + curr_size;
+ DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
+
+ return op_info;
+}
+
+static void
+virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
+ void *idata_hva,
+ uint32_t status,
+ QCryptoCryptoDevBackendSymOpInfo *sym_op_info)
+{
+ struct virtio_crypto_sym_input *idata = idata_hva;
+ hwaddr len;
+ VirtIOCryptoBuffer *buf;
+ size_t s;
+
+ idata->status = status;
+ if (status != VIRTIO_CRYPTO_OP_OK) {
+ return;
+ }
+
+ buf = virtio_crypto_iovec_read(vdev, &idata->dst_data, true);
+ /* Note: length of dest_data is equal to length of src_data for cipher */
+ len = sym_op_info->src_len;
+ /* save the cipher result */
+ s = iov_from_buf(buf->sg, buf->num, 0, sym_op_info->dst, len);
+ assert(s == len);
+
+ virtio_crypto_unmap_iovec(buf, len, false);
+ g_free(buf);
+
+ if (sym_op_info->op_type ==
+ VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ hwaddr digest_gpa;
+ void *digest_hva;
+
+ /* save the digest result */
+ digest_gpa = idata->digest_result_addr;
+ len = idata->digest_result_len;
+ if (len != sym_op_info->dst_len - sym_op_info->src_len) {
+ len = sym_op_info->dst_len - sym_op_info->src_len;
+ }
+ digest_hva = cpu_physical_memory_map(digest_gpa, &len, true);
+ /* find the digest result, then copy it into guest's memory */
+ memcpy(digest_hva, sym_op_info->dst + sym_op_info->src_len, len);
+ cpu_physical_memory_unmap(digest_hva, len, true, len);
+ }
+}
+
+static void
+virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
+ struct virtio_crypto_sym_data_req *req,
+ QCryptoCryptoDevBackendSymOpInfo **sym_op_info,
+ void **idata_hva,
+ VirtQueueElement *elem)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+ uint32_t op_type;
+ void *idata;
+ size_t idata_offset;
+ struct iovec *iov = elem->in_sg;
+ QCryptoCryptoDevBackendSymOpInfo *op_info;
+
+ op_type = req->op_type;
+
+ if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+ op_info = virtio_crypto_cipher_op_helper(vdev, &req->u.cipher.para,
+ &req->u.cipher.odata, NULL);
+ op_info->op_type = op_type;
+ /* calculate the offset of input data */
+ idata_offset = offsetof(struct virtio_crypto_op_data_req,
+ u.sym_req.u.cipher.idata.input);
+ idata = (void *)iov[0].iov_base + idata_offset;
+ } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+ /* cipher part */
+ op_info = virtio_crypto_cipher_op_helper(vdev, &req->u.cipher.para,
+ &req->u.cipher.odata,
+ &req->u.chain.odata.add_data);
+ op_info->op_type = op_type;
+
+ /* calculate the offset of input data */
+ idata_offset = offsetof(struct virtio_crypto_op_data_req,
+ u.sym_req.u.chain.idata.input);
+ idata = (void *)iov[0].iov_base + idata_offset;
+ } else {
+ /* VIRTIO_CRYPTO_SYM_OP_NONE */
+ error_report("unsupported cipher type");
+ exit(1);
+ }
+
+ *sym_op_info = op_info;
+ *idata_hva = idata;
+}
+
+static void
+virtio_crypto_handle_request(VirtIOCryptoReq *request)
+{
+ VirtIOCrypto *vcrypto = request->vcrypto;
+ VirtQueueElement *elem = &request->elem;
+ int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
+ struct virtio_crypto_op_data_req req;
+ size_t s;
+ int ret;
+ struct iovec *iov;
+ unsigned int iov_cnt;
+ uint32_t opcode, status = VIRTIO_CRYPTO_OP_ERR;
+ uint64_t session_id;
+ QCryptoCryptoDevBackendSymOpInfo *sym_op_info = NULL;
+ void *idata_hva = NULL;
+ Error *local_err = NULL;
+
+ if (elem->in_num < 1 ||
+ iov_size(elem->in_sg, elem->in_num) < sizeof(req)) {
+ error_report("virtio-crypto dataq missing headers");
+ exit(1);
+ }
+
+ iov_cnt = elem->in_num;
+ iov = elem->in_sg;
+
+ s = iov_to_buf(iov, iov_cnt, 0, &req, sizeof(req));
+ assert(s == sizeof(req));
+ opcode = req.header.opcode;
+ session_id = req.header.session_id;
+
+ switch (opcode) {
+ case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
+ case VIRTIO_CRYPTO_CIPHER_DECRYPT:
+ virtio_crypto_handle_sym_req(vcrypto,
+ &req.u.sym_req,
+ &sym_op_info,
+ &idata_hva,
+ elem);
+ sym_op_info->session_id = session_id;
+
+ /* Set request's parameter */
+ request->flags = QCRYPTO_CRYPTODEV_BACKEND_ALG_SYM;
+ request->u.sym_op_info = sym_op_info;
+ request->idata_hva = idata_hva;
+ ret = qcrypto_cryptodev_backend_sym_operation(vcrypto->cryptodev,
+ sym_op_info, queue_index, &local_err);
+ if (ret < 0) {
+ status = VIRTIO_CRYPTO_OP_ERR;
+ if (local_err) {
+ error_report_err(local_err);
+ }
+ } else { /* ret >= 0 */
+ status = VIRTIO_CRYPTO_OP_OK;
+ }
+ virtio_crypto_req_complete(request, status);
+ virtio_crypto_free_request(request);
+ break;
+ case VIRTIO_CRYPTO_HASH:
+ case VIRTIO_CRYPTO_MAC:
+ case VIRTIO_CRYPTO_AEAD_ENCRYPT:
+ case VIRTIO_CRYPTO_AEAD_DECRYPT:
+ default:
+ error_report("virtio-crypto unsupported dataq opcode: %u",
+ opcode);
+ exit(1);
+ }
+}
+
static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
{
+ VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+ VirtIOCryptoReq *req;
+ while ((req = virtio_crypto_get_request(vcrypto, vq))) {
+ virtio_crypto_handle_request(req);
+ }
}
static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
@@ -36,6 +36,17 @@ do { printf("virtio_crypto: " fmt , ## __VA_ARGS__); } while (0)
#define VIRTIO_CRYPTO_GET_PARENT_CLASS(obj) \
OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_CRYPTO)
+/* Max entries of scatter gather list in one virtio-crypto buffer */
+#define VIRTIO_CRYPTO_SG_MAX 256
+
+typedef struct VirtIOCryptoBuffer {
+ unsigned int num;
+ /* Guest physical address */
+ hwaddr *addr;
+ /* Store host virtual address and length */
+ struct iovec *sg;
+ uint8_t data[0];
+} VirtIOCryptoBuffer;
typedef struct VirtIOCryptoConf {
QCryptoCryptoDevBackend *cryptodev;
Firstly I introduce VirtIOCryptoReq structure to store crypto request so that we can support sync and async crypto operation in the future. Secondly, VirtIOCryptoBuffer structure is used to support sg list on source data, destionation data and associated anthentication data according virtio crypto sepcification. At present, we only support cipher and algorithm chainning. Signed-off-by: Gonglei <arei.gonglei@huawei.com> --- hw/virtio/virtio-crypto.c | 438 ++++++++++++++++++++++++++++++++++++++ include/hw/virtio/virtio-crypto.h | 11 + 2 files changed, 449 insertions(+)