diff mbox

[v7,09/12] virtio-crypto: add data queue processing handler

Message ID 1476342726-104488-10-git-send-email-arei.gonglei@huawei.com (mailing list archive)
State New, archived
Headers show

Commit Message

Gonglei (Arei) Oct. 13, 2016, 7:12 a.m. UTC
Introduces VirtIOCryptoReq structure to store
crypto request so that we can support sync and async
crypto operation in the future.

At present, we only support cipher and algorithm
chainning.

Signed-off-by: Gonglei <arei.gonglei@huawei.com>
---
 hw/virtio/virtio-crypto.c         | 338 +++++++++++++++++++++++++++++++++++++-
 include/hw/virtio/virtio-crypto.h |  10 +-
 2 files changed, 345 insertions(+), 3 deletions(-)

Comments

Stefan Hajnoczi Oct. 16, 2016, 1:22 p.m. UTC | #1
On Thu, Oct 13, 2016 at 03:12:03PM +0800, Gonglei wrote:
> Introduces VirtIOCryptoReq structure to store
> crypto request so that we can support sync and async
> crypto operation in the future.

What do you mean by "sync and async" operations?

> 
> At present, we only support cipher and algorithm
> chainning.

s/chainning/chaining/

> 
> Signed-off-by: Gonglei <arei.gonglei@huawei.com>
> ---
>  hw/virtio/virtio-crypto.c         | 338 +++++++++++++++++++++++++++++++++++++-
>  include/hw/virtio/virtio-crypto.h |  10 +-
>  2 files changed, 345 insertions(+), 3 deletions(-)
> 
> diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> index db86796..094bc48 100644
> --- a/hw/virtio/virtio-crypto.c
> +++ b/hw/virtio/virtio-crypto.c
> @@ -306,6 +306,342 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
>      } /* end for loop */
>  }
>  
> +static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
> +                                VirtIOCryptoReq *req)
> +{
> +    req->vcrypto = vcrypto;
> +    req->vq = vq;
> +    req->in = NULL;
> +    req->in_iov = NULL;
> +    req->in_num = 0;
> +    req->in_len = 0;
> +    req->flags = CRYPTODEV_BACKEND_ALG__MAX;
> +    req->u.sym_op_info = NULL;
> +}
> +
> +static void virtio_crypto_free_request(VirtIOCryptoReq *req)
> +{
> +    if (req) {
> +        if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> +            g_free(req->u.sym_op_info);
> +        }
> +        g_free(req);
> +    }
> +}
> +
> +static void
> +virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
> +                VirtIOCryptoReq *req,
> +                uint32_t status,
> +                CryptoDevBackendSymOpInfo *sym_op_info)
> +{
> +    size_t s, len;
> +
> +    if (status != VIRTIO_CRYPTO_OK) {
> +        return;
> +    }
> +
> +    len = sym_op_info->dst_len;
> +    /* Save the cipher result */
> +    s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
> +    if (s != len) {
> +        virtio_error(vdev, "virtio-crypto dest data incorrect");
> +        return;
> +    }
> +
> +    iov_discard_front(&req->in_iov, &req->in_num, len);
> +
> +    if (sym_op_info->op_type ==
> +                      VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> +        /* Save the digest result */
> +        s = iov_from_buf(req->in_iov, req->in_num, 0,
> +                         sym_op_info->digest_result,
> +                         sym_op_info->digest_result_len);
> +        if (s != sym_op_info->digest_result_len) {
> +            virtio_error(vdev, "virtio-crypto digest result incorrect");
> +        }
> +    }
> +}
> +
> +static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint32_t status)
> +{
> +    VirtIOCrypto *vcrypto = req->vcrypto;
> +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> +
> +    if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> +        virtio_crypto_sym_input_data_helper(vdev, req, status,
> +                                            req->u.sym_op_info);
> +    }
> +    stl_he_p(&req->in->status, status);

This should be virtio_stl_p().

> +    virtqueue_push(req->vq, &req->elem, req->in_len);
> +    virtio_notify(vdev, req->vq);
> +}
> +
> +static VirtIOCryptoReq *
> +virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
> +{
> +    VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
> +
> +    if (req) {
> +        virtio_crypto_init_request(s, vq, req);
> +    }
> +    return req;
> +}
> +
> +static CryptoDevBackendSymOpInfo *
> +virtio_crypto_sym_op_helper(VirtIODevice *vdev,
> +           struct virtio_crypto_cipher_para *para,
> +           uint32_t aad_len,
> +           struct iovec *iov, unsigned int out_num,
> +           uint32_t hash_result_len,
> +           uint32_t hash_start_src_offset)
> +{
> +    CryptoDevBackendSymOpInfo *op_info;
> +    uint32_t src_len, dst_len;
> +    uint32_t iv_len;
> +    size_t max_len, curr_size = 0;
> +    size_t s;
> +
> +    iv_len = virtio_ldl_p(vdev, &para->iv_len);
> +    src_len = virtio_ldl_p(vdev, &para->src_data_len);
> +    dst_len = virtio_ldl_p(vdev, &para->dst_data_len);
> +
> +    max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;

Integer overflow checks are needed here to prevent memory corruption
later on.

Imagine a 32-bit host where sizeof(max_len) == 4 and iv_len + aad_len +
... == UINT32_MAX + 1.  The malloc below will allocate just 1 byte
instead of UINT32_MAX + 1 due to overflow.

> +    op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);
> +    op_info->iv_len = iv_len;
> +    op_info->src_len = src_len;
> +    op_info->dst_len = dst_len;
> +    op_info->aad_len = aad_len;
> +    op_info->digest_result_len = hash_result_len;
> +    op_info->hash_start_src_offset = hash_start_src_offset;
> +    /* Handle the initilization vector */
> +    if (op_info->iv_len > 0) {
> +        DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
> +        op_info->iv = op_info->data + curr_size;
> +
> +        s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
> +        if (unlikely(s != op_info->iv_len)) {
> +            virtio_error(vdev, "virtio-crypto iv incorrect");
> +            goto err;
> +        }
> +        iov_discard_front(&iov, &out_num, op_info->iv_len);
> +        curr_size += op_info->iv_len;
> +    }
> +
> +    /* Handle additional authentication data if exist */
> +    if (op_info->aad_len > 0) {
> +        DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
> +        op_info->aad_data = op_info->data + curr_size;
> +
> +        s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len);
> +        if (unlikely(s != op_info->aad_len)) {
> +            virtio_error(vdev, "virtio-crypto additional auth data incorrect");
> +            goto err;
> +        }
> +        iov_discard_front(&iov, &out_num, op_info->aad_len);
> +
> +        curr_size += op_info->aad_len;
> +    }
> +
> +    /* Handle the source data */
> +    if (op_info->src_len > 0) {
> +        DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
> +        op_info->src = op_info->data + curr_size;
> +
> +        s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
> +        if (unlikely(s != op_info->src_len)) {
> +            virtio_error(vdev, "virtio-crypto source data incorrect");
> +            goto err;
> +        }
> +        iov_discard_front(&iov, &out_num, op_info->src_len);
> +
> +        curr_size += op_info->src_len;
> +    }
> +
> +    /* Handle the destination data */
> +    op_info->dst = op_info->data + curr_size;
> +    curr_size += op_info->dst_len;
> +
> +    DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
> +
> +    /* Handle the hash digest result */
> +    if (hash_result_len > 0) {
> +        DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
> +        op_info->digest_result = op_info->data + curr_size;
> +    }
> +
> +    return op_info;
> +
> +err:
> +    g_free(op_info);
> +    return NULL;
> +}
> +
> +static int
> +virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
> +               struct virtio_crypto_sym_data_req *req,
> +               CryptoDevBackendSymOpInfo **sym_op_info,
> +               struct iovec *iov, unsigned int out_num)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> +    uint32_t op_type;
> +    CryptoDevBackendSymOpInfo *op_info;
> +
> +    op_type = virtio_ldl_p(vdev, &req->op_type);
> +
> +    if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
> +        op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
> +                                              0, iov, out_num, 0, 0);
> +        if (!op_info) {
> +            return -EFAULT;
> +        }
> +        op_info->op_type = op_type;
> +    } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> +        uint32_t aad_len, hash_result_len;
> +        uint32_t hash_start_src_offset;
> +
> +        aad_len = virtio_ldl_p(vdev, &req->u.chain.para.aad_len);
> +        hash_result_len = virtio_ldl_p(vdev,
> +                              &req->u.chain.para.hash_result_len);
> +        hash_start_src_offset = virtio_ldl_p(vdev,
> +                         &req->u.chain.para.hash_start_src_offset);
> +        /* cipher part */
> +        op_info = virtio_crypto_sym_op_helper(vdev, &req->u.chain.para.cipher,
> +                                              aad_len, iov, out_num,
> +                                              hash_result_len,
> +                                              hash_start_src_offset);
> +        if (!op_info) {
> +            return -EFAULT;
> +        }
> +        op_info->op_type = op_type;
> +    } else {
> +        /* VIRTIO_CRYPTO_SYM_OP_NONE */
> +        error_report("virtio-crypto unsupported cipher type");
> +        return -VIRTIO_CRYPTO_NOTSUPP;
> +    }
> +
> +    *sym_op_info = op_info;
> +
> +    return 0;
> +}
> +
> +static int
> +virtio_crypto_handle_request(VirtIOCryptoReq *request)
> +{
> +    VirtIOCrypto *vcrypto = request->vcrypto;
> +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> +    VirtQueueElement *elem = &request->elem;
> +    int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
> +    struct virtio_crypto_op_data_req req;
> +    int ret;
> +    struct iovec *in_iov;
> +    struct iovec *out_iov;
> +    unsigned in_num;
> +    unsigned out_num;
> +    uint32_t opcode, status = VIRTIO_CRYPTO_ERR;
> +    uint64_t session_id;
> +    CryptoDevBackendSymOpInfo *sym_op_info = NULL;
> +    Error *local_err = NULL;
> +
> +    if (elem->out_num < 1 || elem->in_num < 1) {
> +        virtio_error(vdev, "virtio-crypto dataq missing headers");
> +        return -1;
> +    }
> +
> +    out_num = elem->out_num;
> +    out_iov = elem->out_sg;
> +    in_num = elem->in_num;
> +    in_iov = elem->in_sg;
> +    if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
> +                != sizeof(req))) {
> +        virtio_error(vdev, "virtio-crypto request outhdr too short");
> +        return -1;
> +    }
> +    iov_discard_front(&out_iov, &out_num, sizeof(req));
> +
> +    if (in_iov[in_num - 1].iov_len <
> +            sizeof(struct virtio_crypto_inhdr)) {
> +        virtio_error(vdev, "virtio-crypto request inhdr too short");
> +        return -1;
> +    }
> +
> +    request->in_len = iov_size(in_iov, in_num);
> +    request->in = (void *)in_iov[in_num - 1].iov_base
> +              + in_iov[in_num - 1].iov_len
> +              - sizeof(struct virtio_crypto_inhdr);
> +    iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));
> +
> +    /*
> +     * The length of operation result, including dest_data
> +     * and digest_result if exist.
> +     */
> +    request->in_num = in_num;
> +    request->in_iov = in_iov;
> +
> +    opcode = virtio_ldl_p(vdev, &req.header.opcode);
> +    session_id = virtio_ldq_p(vdev, &req.header.session_id);
> +
> +    switch (opcode) {
> +    case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
> +    case VIRTIO_CRYPTO_CIPHER_DECRYPT:
> +        ret = virtio_crypto_handle_sym_req(vcrypto,
> +                         &req.u.sym_req,
> +                         &sym_op_info,
> +                         out_iov, out_num);
> +        /* Serious errors, need to reset virtio crypto device */
> +        if (ret == -EFAULT) {
> +            return -1;
> +        } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
> +            virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
> +            virtio_crypto_free_request(request);
> +        } else {
> +            sym_op_info->session_id = session_id;
> +
> +            /* Set request's parameter */
> +            request->flags = CRYPTODEV_BACKEND_ALG_SYM;
> +            request->u.sym_op_info = sym_op_info;
> +            ret = cryptodev_backend_sym_operation(vcrypto->cryptodev,
> +                                    sym_op_info, queue_index, &local_err);
> +            if (ret < 0) {
> +                status = VIRTIO_CRYPTO_ERR;
> +                if (local_err) {
> +                    error_report_err(local_err);
> +                }
> +            } else { /* ret >= 0 */
> +                status = VIRTIO_CRYPTO_OK;
> +            }
> +            virtio_crypto_req_complete(request, status);
> +            virtio_crypto_free_request(request);

Shouldn't the operation be asynchronous from the start?  There is no
point in having a 1024 element virtqueue if you can only process one
element at a time.

> +        }
> +        break;
> +    case VIRTIO_CRYPTO_HASH:
> +    case VIRTIO_CRYPTO_MAC:
> +    case VIRTIO_CRYPTO_AEAD_ENCRYPT:
> +    case VIRTIO_CRYPTO_AEAD_DECRYPT:
> +    default:
> +        error_report("virtio-crypto unsupported dataq opcode: %u",
> +                     opcode);
> +        virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
> +        virtio_crypto_free_request(request);
> +    }
> +
> +    return 0;
> +}
> +
> +static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
> +{
> +    VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
> +    VirtIOCryptoReq *req;
> +
> +    while ((req = virtio_crypto_get_request(vcrypto, vq))) {
> +        if (virtio_crypto_handle_request(req) < 0) {
> +            virtqueue_detach_element(req->vq, &req->elem, 0);
> +            virtio_crypto_free_request(req);
> +            break;
> +        }
> +    }
> +}
> +
>  static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
>                                             uint64_t features,
>                                             Error **errp)
> @@ -365,7 +701,7 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
>      vcrypto->curr_queues = 1;
>  
>      for (i = 0; i < vcrypto->max_queues; i++) {
> -        virtio_add_queue(vdev, 1024, NULL);
> +        virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq);
>      }
>  
>      vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
> diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
> index a5e826c..1427d39 100644
> --- a/include/hw/virtio/virtio-crypto.h
> +++ b/include/hw/virtio/virtio-crypto.h
> @@ -36,6 +36,10 @@ do { \
>  #define VIRTIO_CRYPTO_GET_PARENT_CLASS(obj) \
>          OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_CRYPTO)
>  
> +/* This is the last element of the write scatter-gather list */

This comment is misleading.  The last element of the scatter-gather list
could contain any number of other structs too.  Remember the
specification says virtio devices cannot rely on specific iovec
boundaries.

> +struct virtio_crypto_inhdr {
> +    uint32_t status;
> +};

Why is this defined here?  I think it should be in virtio_crypto.h and
the field type should be __virtio32.

>  
>  typedef struct VirtIOCryptoConf {
>      CryptoDevBackend *cryptodev;
> @@ -61,8 +65,10 @@ typedef struct VirtIOCryptoReq {
>      VirtQueueElement elem;
>      /* flags of operation, such as type of algorithm */
>      uint32_t flags;
> -    /* address of in data (Device to Driver) */
> -    void *idata_hva;

This field was unused.  Please remove its definition and wait until this
patch to define it properly.

This way the code is easier to review because there are no unused fields
that the reviewer has to guess about when reading the code.

> +    struct virtio_crypto_inhdr *in;
> +    struct iovec *in_iov; /* Head address of dest iovec */
> +    unsigned int in_num; /* Number of dest iovec */
> +    size_t in_len;
>      VirtQueue *vq;
>      struct VirtIOCrypto *vcrypto;
>      union {
> -- 
> 1.8.3.1
> 
> 
>
Gonglei (Arei) Oct. 17, 2016, 6:29 a.m. UTC | #2
> -----Original Message-----
> From: Stefan Hajnoczi [mailto:stefanha@gmail.com]
> Sent: Sunday, October 16, 2016 9:23 PM
> Subject: Re: [Qemu-devel] [PATCH v7 09/12] virtio-crypto: add data queue
> processing handler
> 
> On Thu, Oct 13, 2016 at 03:12:03PM +0800, Gonglei wrote:
> > Introduces VirtIOCryptoReq structure to store
> > crypto request so that we can support sync and async
> > crypto operation in the future.
> 
> What do you mean by "sync and async" operations?
> 
Synchronous and asynchronous.

> >
> > At present, we only support cipher and algorithm
> > chainning.
> 
> s/chainning/chaining/
> 
Will fix it.

> >
> > Signed-off-by: Gonglei <arei.gonglei@huawei.com>
> > ---
> >  hw/virtio/virtio-crypto.c         | 338
> +++++++++++++++++++++++++++++++++++++-
> >  include/hw/virtio/virtio-crypto.h |  10 +-
> >  2 files changed, 345 insertions(+), 3 deletions(-)
> >
> > diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> > index db86796..094bc48 100644
> > --- a/hw/virtio/virtio-crypto.c
> > +++ b/hw/virtio/virtio-crypto.c
> > @@ -306,6 +306,342 @@ static void virtio_crypto_handle_ctrl(VirtIODevice
> *vdev, VirtQueue *vq)
> >      } /* end for loop */
> >  }
> >
> > +static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
> > +                                VirtIOCryptoReq *req)
> > +{
> > +    req->vcrypto = vcrypto;
> > +    req->vq = vq;
> > +    req->in = NULL;
> > +    req->in_iov = NULL;
> > +    req->in_num = 0;
> > +    req->in_len = 0;
> > +    req->flags = CRYPTODEV_BACKEND_ALG__MAX;
> > +    req->u.sym_op_info = NULL;
> > +}
> > +
> > +static void virtio_crypto_free_request(VirtIOCryptoReq *req)
> > +{
> > +    if (req) {
> > +        if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> > +            g_free(req->u.sym_op_info);
> > +        }
> > +        g_free(req);
> > +    }
> > +}
> > +
> > +static void
> > +virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
> > +                VirtIOCryptoReq *req,
> > +                uint32_t status,
> > +                CryptoDevBackendSymOpInfo *sym_op_info)
> > +{
> > +    size_t s, len;
> > +
> > +    if (status != VIRTIO_CRYPTO_OK) {
> > +        return;
> > +    }
> > +
> > +    len = sym_op_info->dst_len;
> > +    /* Save the cipher result */
> > +    s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
> > +    if (s != len) {
> > +        virtio_error(vdev, "virtio-crypto dest data incorrect");
> > +        return;
> > +    }
> > +
> > +    iov_discard_front(&req->in_iov, &req->in_num, len);
> > +
> > +    if (sym_op_info->op_type ==
> > +
> VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> > +        /* Save the digest result */
> > +        s = iov_from_buf(req->in_iov, req->in_num, 0,
> > +                         sym_op_info->digest_result,
> > +                         sym_op_info->digest_result_len);
> > +        if (s != sym_op_info->digest_result_len) {
> > +            virtio_error(vdev, "virtio-crypto digest result incorrect");
> > +        }
> > +    }
> > +}
> > +
> > +static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint32_t
> status)
> > +{
> > +    VirtIOCrypto *vcrypto = req->vcrypto;
> > +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> > +
> > +    if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> > +        virtio_crypto_sym_input_data_helper(vdev, req, status,
> > +                                            req->u.sym_op_info);
> > +    }
> > +    stl_he_p(&req->in->status, status);
> 
> This should be virtio_stl_p().
> 
> > +    virtqueue_push(req->vq, &req->elem, req->in_len);
> > +    virtio_notify(vdev, req->vq);
> > +}
> > +
> > +static VirtIOCryptoReq *
> > +virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
> > +{
> > +    VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
> > +
> > +    if (req) {
> > +        virtio_crypto_init_request(s, vq, req);
> > +    }
> > +    return req;
> > +}
> > +
> > +static CryptoDevBackendSymOpInfo *
> > +virtio_crypto_sym_op_helper(VirtIODevice *vdev,
> > +           struct virtio_crypto_cipher_para *para,
> > +           uint32_t aad_len,
> > +           struct iovec *iov, unsigned int out_num,
> > +           uint32_t hash_result_len,
> > +           uint32_t hash_start_src_offset)
> > +{
> > +    CryptoDevBackendSymOpInfo *op_info;
> > +    uint32_t src_len, dst_len;
> > +    uint32_t iv_len;
> > +    size_t max_len, curr_size = 0;
> > +    size_t s;
> > +
> > +    iv_len = virtio_ldl_p(vdev, &para->iv_len);
> > +    src_len = virtio_ldl_p(vdev, &para->src_data_len);
> > +    dst_len = virtio_ldl_p(vdev, &para->dst_data_len);
> > +
> > +    max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
> 
> Integer overflow checks are needed here to prevent memory corruption
> later on.
> 
> Imagine a 32-bit host where sizeof(max_len) == 4 and iv_len + aad_len +
> ... == UINT32_MAX + 1.  The malloc below will allocate just 1 byte
> instead of UINT32_MAX + 1 due to overflow.
> 
OK. Will fix it.

> > +    op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);
> > +    op_info->iv_len = iv_len;
> > +    op_info->src_len = src_len;
> > +    op_info->dst_len = dst_len;
> > +    op_info->aad_len = aad_len;
> > +    op_info->digest_result_len = hash_result_len;
> > +    op_info->hash_start_src_offset = hash_start_src_offset;
> > +    /* Handle the initilization vector */
> > +    if (op_info->iv_len > 0) {
> > +        DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
> > +        op_info->iv = op_info->data + curr_size;
> > +
> > +        s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
> > +        if (unlikely(s != op_info->iv_len)) {
> > +            virtio_error(vdev, "virtio-crypto iv incorrect");
> > +            goto err;
> > +        }
> > +        iov_discard_front(&iov, &out_num, op_info->iv_len);
> > +        curr_size += op_info->iv_len;
> > +    }
> > +
> > +    /* Handle additional authentication data if exist */
> > +    if (op_info->aad_len > 0) {
> > +        DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
> > +        op_info->aad_data = op_info->data + curr_size;
> > +
> > +        s = iov_to_buf(iov, out_num, 0, op_info->aad_data,
> op_info->aad_len);
> > +        if (unlikely(s != op_info->aad_len)) {
> > +            virtio_error(vdev, "virtio-crypto additional auth data
> incorrect");
> > +            goto err;
> > +        }
> > +        iov_discard_front(&iov, &out_num, op_info->aad_len);
> > +
> > +        curr_size += op_info->aad_len;
> > +    }
> > +
> > +    /* Handle the source data */
> > +    if (op_info->src_len > 0) {
> > +        DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
> > +        op_info->src = op_info->data + curr_size;
> > +
> > +        s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
> > +        if (unlikely(s != op_info->src_len)) {
> > +            virtio_error(vdev, "virtio-crypto source data incorrect");
> > +            goto err;
> > +        }
> > +        iov_discard_front(&iov, &out_num, op_info->src_len);
> > +
> > +        curr_size += op_info->src_len;
> > +    }
> > +
> > +    /* Handle the destination data */
> > +    op_info->dst = op_info->data + curr_size;
> > +    curr_size += op_info->dst_len;
> > +
> > +    DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
> > +
> > +    /* Handle the hash digest result */
> > +    if (hash_result_len > 0) {
> > +        DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
> > +        op_info->digest_result = op_info->data + curr_size;
> > +    }
> > +
> > +    return op_info;
> > +
> > +err:
> > +    g_free(op_info);
> > +    return NULL;
> > +}
> > +
> > +static int
> > +virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
> > +               struct virtio_crypto_sym_data_req *req,
> > +               CryptoDevBackendSymOpInfo **sym_op_info,
> > +               struct iovec *iov, unsigned int out_num)
> > +{
> > +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> > +    uint32_t op_type;
> > +    CryptoDevBackendSymOpInfo *op_info;
> > +
> > +    op_type = virtio_ldl_p(vdev, &req->op_type);
> > +
> > +    if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
> > +        op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
> > +                                              0, iov, out_num, 0,
> 0);
> > +        if (!op_info) {
> > +            return -EFAULT;
> > +        }
> > +        op_info->op_type = op_type;
> > +    } else if (op_type ==
> VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> > +        uint32_t aad_len, hash_result_len;
> > +        uint32_t hash_start_src_offset;
> > +
> > +        aad_len = virtio_ldl_p(vdev, &req->u.chain.para.aad_len);
> > +        hash_result_len = virtio_ldl_p(vdev,
> > +                              &req->u.chain.para.hash_result_len);
> > +        hash_start_src_offset = virtio_ldl_p(vdev,
> > +                         &req->u.chain.para.hash_start_src_offset);
> > +        /* cipher part */
> > +        op_info = virtio_crypto_sym_op_helper(vdev,
> &req->u.chain.para.cipher,
> > +                                              aad_len, iov,
> out_num,
> > +                                              hash_result_len,
> > +
> hash_start_src_offset);
> > +        if (!op_info) {
> > +            return -EFAULT;
> > +        }
> > +        op_info->op_type = op_type;
> > +    } else {
> > +        /* VIRTIO_CRYPTO_SYM_OP_NONE */
> > +        error_report("virtio-crypto unsupported cipher type");
> > +        return -VIRTIO_CRYPTO_NOTSUPP;
> > +    }
> > +
> > +    *sym_op_info = op_info;
> > +
> > +    return 0;
> > +}
> > +
> > +static int
> > +virtio_crypto_handle_request(VirtIOCryptoReq *request)
> > +{
> > +    VirtIOCrypto *vcrypto = request->vcrypto;
> > +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> > +    VirtQueueElement *elem = &request->elem;
> > +    int queue_index =
> virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
> > +    struct virtio_crypto_op_data_req req;
> > +    int ret;
> > +    struct iovec *in_iov;
> > +    struct iovec *out_iov;
> > +    unsigned in_num;
> > +    unsigned out_num;
> > +    uint32_t opcode, status = VIRTIO_CRYPTO_ERR;
> > +    uint64_t session_id;
> > +    CryptoDevBackendSymOpInfo *sym_op_info = NULL;
> > +    Error *local_err = NULL;
> > +
> > +    if (elem->out_num < 1 || elem->in_num < 1) {
> > +        virtio_error(vdev, "virtio-crypto dataq missing headers");
> > +        return -1;
> > +    }
> > +
> > +    out_num = elem->out_num;
> > +    out_iov = elem->out_sg;
> > +    in_num = elem->in_num;
> > +    in_iov = elem->in_sg;
> > +    if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
> > +                != sizeof(req))) {
> > +        virtio_error(vdev, "virtio-crypto request outhdr too short");
> > +        return -1;
> > +    }
> > +    iov_discard_front(&out_iov, &out_num, sizeof(req));
> > +
> > +    if (in_iov[in_num - 1].iov_len <
> > +            sizeof(struct virtio_crypto_inhdr)) {
> > +        virtio_error(vdev, "virtio-crypto request inhdr too short");
> > +        return -1;
> > +    }
> > +
> > +    request->in_len = iov_size(in_iov, in_num);
> > +    request->in = (void *)in_iov[in_num - 1].iov_base
> > +              + in_iov[in_num - 1].iov_len
> > +              - sizeof(struct virtio_crypto_inhdr);
> > +    iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));
> > +
> > +    /*
> > +     * The length of operation result, including dest_data
> > +     * and digest_result if exist.
> > +     */
> > +    request->in_num = in_num;
> > +    request->in_iov = in_iov;
> > +
> > +    opcode = virtio_ldl_p(vdev, &req.header.opcode);
> > +    session_id = virtio_ldq_p(vdev, &req.header.session_id);
> > +
> > +    switch (opcode) {
> > +    case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
> > +    case VIRTIO_CRYPTO_CIPHER_DECRYPT:
> > +        ret = virtio_crypto_handle_sym_req(vcrypto,
> > +                         &req.u.sym_req,
> > +                         &sym_op_info,
> > +                         out_iov, out_num);
> > +        /* Serious errors, need to reset virtio crypto device */
> > +        if (ret == -EFAULT) {
> > +            return -1;
> > +        } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
> > +            virtio_crypto_req_complete(request,
> VIRTIO_CRYPTO_NOTSUPP);
> > +            virtio_crypto_free_request(request);
> > +        } else {
> > +            sym_op_info->session_id = session_id;
> > +
> > +            /* Set request's parameter */
> > +            request->flags = CRYPTODEV_BACKEND_ALG_SYM;
> > +            request->u.sym_op_info = sym_op_info;
> > +            ret = cryptodev_backend_sym_operation(vcrypto->cryptodev,
> > +                                    sym_op_info, queue_index,
> &local_err);
> > +            if (ret < 0) {
> > +                status = VIRTIO_CRYPTO_ERR;
> > +                if (local_err) {
> > +                    error_report_err(local_err);
> > +                }
> > +            } else { /* ret >= 0 */
> > +                status = VIRTIO_CRYPTO_OK;
> > +            }
> > +            virtio_crypto_req_complete(request, status);
> > +            virtio_crypto_free_request(request);
> 
> Shouldn't the operation be asynchronous from the start?  There is no
> point in having a 1024 element virtqueue if you can only process one
> element at a time.
> 
Good insight. Currently I only realize the synchronous operation for QEMU
builtin backend, but we can realized the asynchronous operation in future.

And for cryptodev-vhost-user, we can get better performance using 1024
as the data virtuqueue's size, so I'd like to keep it by default.

> > +        }
> > +        break;
> > +    case VIRTIO_CRYPTO_HASH:
> > +    case VIRTIO_CRYPTO_MAC:
> > +    case VIRTIO_CRYPTO_AEAD_ENCRYPT:
> > +    case VIRTIO_CRYPTO_AEAD_DECRYPT:
> > +    default:
> > +        error_report("virtio-crypto unsupported dataq opcode: %u",
> > +                     opcode);
> > +        virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
> > +        virtio_crypto_free_request(request);
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> > +static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
> > +{
> > +    VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
> > +    VirtIOCryptoReq *req;
> > +
> > +    while ((req = virtio_crypto_get_request(vcrypto, vq))) {
> > +        if (virtio_crypto_handle_request(req) < 0) {
> > +            virtqueue_detach_element(req->vq, &req->elem, 0);
> > +            virtio_crypto_free_request(req);
> > +            break;
> > +        }
> > +    }
> > +}
> > +
> >  static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
> >                                             uint64_t features,
> >                                             Error **errp)
> > @@ -365,7 +701,7 @@ static void virtio_crypto_device_realize(DeviceState
> *dev, Error **errp)
> >      vcrypto->curr_queues = 1;
> >
> >      for (i = 0; i < vcrypto->max_queues; i++) {
> > -        virtio_add_queue(vdev, 1024, NULL);
> > +        virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq);
> >      }
> >
> >      vcrypto->ctrl_vq = virtio_add_queue(vdev, 64,
> virtio_crypto_handle_ctrl);
> > diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
> > index a5e826c..1427d39 100644
> > --- a/include/hw/virtio/virtio-crypto.h
> > +++ b/include/hw/virtio/virtio-crypto.h
> > @@ -36,6 +36,10 @@ do { \
> >  #define VIRTIO_CRYPTO_GET_PARENT_CLASS(obj) \
> >          OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_CRYPTO)
> >
> > +/* This is the last element of the write scatter-gather list */
> 
> This comment is misleading.  The last element of the scatter-gather list
> could contain any number of other structs too.  Remember the
> specification says virtio devices cannot rely on specific iovec
> boundaries.
> 
OK, will remove it in the next version.

> > +struct virtio_crypto_inhdr {
> > +    uint32_t status;
> > +};
> 
> Why is this defined here?  I think it should be in virtio_crypto.h and
> the field type should be __virtio32.
> 
Yes. I'd like to define the status to uint8_t because it's enough.

> >
> >  typedef struct VirtIOCryptoConf {
> >      CryptoDevBackend *cryptodev;
> > @@ -61,8 +65,10 @@ typedef struct VirtIOCryptoReq {
> >      VirtQueueElement elem;
> >      /* flags of operation, such as type of algorithm */
> >      uint32_t flags;
> > -    /* address of in data (Device to Driver) */
> > -    void *idata_hva;
> 
> This field was unused.  Please remove its definition and wait until this
> patch to define it properly.
> 
> This way the code is easier to review because there are no unused fields
> that the reviewer has to guess about when reading the code.
> 
You are right, it's useless here. Let's drop it.

Regards,
-Gonglei
> >
Stefan Hajnoczi Oct. 18, 2016, 10:08 a.m. UTC | #3
On Mon, Oct 17, 2016 at 06:29:42AM +0000, Gonglei (Arei) wrote:
> 
> > -----Original Message-----
> > From: Stefan Hajnoczi [mailto:stefanha@gmail.com]
> > Sent: Sunday, October 16, 2016 9:23 PM
> > Subject: Re: [Qemu-devel] [PATCH v7 09/12] virtio-crypto: add data queue
> > processing handler
> > 
> > On Thu, Oct 13, 2016 at 03:12:03PM +0800, Gonglei wrote:
> > > Introduces VirtIOCryptoReq structure to store
> > > crypto request so that we can support sync and async
> > > crypto operation in the future.
> > 
> > What do you mean by "sync and async" operations?
> > 
> Synchronous and asynchronous.

I understand the words but:

1. The virtio interface is always asynchronous because request
   submission and completion on the virtqueue are separate steps (kick
   and irq).  The guest always thinks the operation is asynchronous.

2. If you implement operations synchronously inside QEMU then monitor
   and other QEMU threads could be blocked.  Also you will not be able
   to take advantage of parallel requests to the hardware crypto
   accelerator!

So I think it makes sense to implement everything asynchronously.
Gonglei (Arei) Oct. 19, 2016, 3:43 a.m. UTC | #4
> -----Original Message-----
> From: Stefan Hajnoczi [mailto:stefanha@redhat.com]
> Sent: Tuesday, October 18, 2016 6:09 PM
> Subject: Re: [virtio-dev] RE: [Qemu-devel] [PATCH v7 09/12] virtio-crypto: add
> data queue processing handler
> 
> On Mon, Oct 17, 2016 at 06:29:42AM +0000, Gonglei (Arei) wrote:
> >
> > > -----Original Message-----
> > > From: Stefan Hajnoczi [mailto:stefanha@gmail.com]
> > > Sent: Sunday, October 16, 2016 9:23 PM
> > > Subject: Re: [Qemu-devel] [PATCH v7 09/12] virtio-crypto: add data queue
> > > processing handler
> > >
> > > On Thu, Oct 13, 2016 at 03:12:03PM +0800, Gonglei wrote:
> > > > Introduces VirtIOCryptoReq structure to store
> > > > crypto request so that we can support sync and async
> > > > crypto operation in the future.
> > >
> > > What do you mean by "sync and async" operations?
> > >
> > Synchronous and asynchronous.
> 
> I understand the words but:
> 
Sorry for my misunderstanding ;)

> 1. The virtio interface is always asynchronous because request
>    submission and completion on the virtqueue are separate steps (kick
>    and irq).  The guest always thinks the operation is asynchronous.
> 
Yes, that's right.

> 2. If you implement operations synchronously inside QEMU then monitor
>    and other QEMU threads could be blocked. Also you will not be able
>    to take advantage of parallel requests to the hardware crypto
>    accelerator!
> 
Correct. 

For different cryptodev backends, especially for hardware crypto
accelerators, usually provide asynchronous APIs for crypto operations,
such as registering callback for each crypto operations. When the crypto
operations are finished, the callback is invoked.

> So I think it makes sense to implement everything asynchronously.

Yes. I can register a bh to execute asynchronous crypto operation instead
of direct calling cryptodev crypto API in data virtqueue handling context.


Regards,
-Gonglei
diff mbox

Patch

diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index db86796..094bc48 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -306,6 +306,342 @@  static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
     } /* end for loop */
 }
 
+static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
+                                VirtIOCryptoReq *req)
+{
+    req->vcrypto = vcrypto;
+    req->vq = vq;
+    req->in = NULL;
+    req->in_iov = NULL;
+    req->in_num = 0;
+    req->in_len = 0;
+    req->flags = CRYPTODEV_BACKEND_ALG__MAX;
+    req->u.sym_op_info = NULL;
+}
+
+static void virtio_crypto_free_request(VirtIOCryptoReq *req)
+{
+    if (req) {
+        if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+            g_free(req->u.sym_op_info);
+        }
+        g_free(req);
+    }
+}
+
+static void
+virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
+                VirtIOCryptoReq *req,
+                uint32_t status,
+                CryptoDevBackendSymOpInfo *sym_op_info)
+{
+    size_t s, len;
+
+    if (status != VIRTIO_CRYPTO_OK) {
+        return;
+    }
+
+    len = sym_op_info->dst_len;
+    /* Save the cipher result */
+    s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
+    if (s != len) {
+        virtio_error(vdev, "virtio-crypto dest data incorrect");
+        return;
+    }
+
+    iov_discard_front(&req->in_iov, &req->in_num, len);
+
+    if (sym_op_info->op_type ==
+                      VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+        /* Save the digest result */
+        s = iov_from_buf(req->in_iov, req->in_num, 0,
+                         sym_op_info->digest_result,
+                         sym_op_info->digest_result_len);
+        if (s != sym_op_info->digest_result_len) {
+            virtio_error(vdev, "virtio-crypto digest result incorrect");
+        }
+    }
+}
+
+static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint32_t status)
+{
+    VirtIOCrypto *vcrypto = req->vcrypto;
+    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+
+    if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+        virtio_crypto_sym_input_data_helper(vdev, req, status,
+                                            req->u.sym_op_info);
+    }
+    stl_he_p(&req->in->status, status);
+    virtqueue_push(req->vq, &req->elem, req->in_len);
+    virtio_notify(vdev, req->vq);
+}
+
+static VirtIOCryptoReq *
+virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
+{
+    VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
+
+    if (req) {
+        virtio_crypto_init_request(s, vq, req);
+    }
+    return req;
+}
+
+static CryptoDevBackendSymOpInfo *
+virtio_crypto_sym_op_helper(VirtIODevice *vdev,
+           struct virtio_crypto_cipher_para *para,
+           uint32_t aad_len,
+           struct iovec *iov, unsigned int out_num,
+           uint32_t hash_result_len,
+           uint32_t hash_start_src_offset)
+{
+    CryptoDevBackendSymOpInfo *op_info;
+    uint32_t src_len, dst_len;
+    uint32_t iv_len;
+    size_t max_len, curr_size = 0;
+    size_t s;
+
+    iv_len = virtio_ldl_p(vdev, &para->iv_len);
+    src_len = virtio_ldl_p(vdev, &para->src_data_len);
+    dst_len = virtio_ldl_p(vdev, &para->dst_data_len);
+
+    max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
+    op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);
+    op_info->iv_len = iv_len;
+    op_info->src_len = src_len;
+    op_info->dst_len = dst_len;
+    op_info->aad_len = aad_len;
+    op_info->digest_result_len = hash_result_len;
+    op_info->hash_start_src_offset = hash_start_src_offset;
+    /* Handle the initilization vector */
+    if (op_info->iv_len > 0) {
+        DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
+        op_info->iv = op_info->data + curr_size;
+
+        s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
+        if (unlikely(s != op_info->iv_len)) {
+            virtio_error(vdev, "virtio-crypto iv incorrect");
+            goto err;
+        }
+        iov_discard_front(&iov, &out_num, op_info->iv_len);
+        curr_size += op_info->iv_len;
+    }
+
+    /* Handle additional authentication data if exist */
+    if (op_info->aad_len > 0) {
+        DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
+        op_info->aad_data = op_info->data + curr_size;
+
+        s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len);
+        if (unlikely(s != op_info->aad_len)) {
+            virtio_error(vdev, "virtio-crypto additional auth data incorrect");
+            goto err;
+        }
+        iov_discard_front(&iov, &out_num, op_info->aad_len);
+
+        curr_size += op_info->aad_len;
+    }
+
+    /* Handle the source data */
+    if (op_info->src_len > 0) {
+        DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
+        op_info->src = op_info->data + curr_size;
+
+        s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
+        if (unlikely(s != op_info->src_len)) {
+            virtio_error(vdev, "virtio-crypto source data incorrect");
+            goto err;
+        }
+        iov_discard_front(&iov, &out_num, op_info->src_len);
+
+        curr_size += op_info->src_len;
+    }
+
+    /* Handle the destination data */
+    op_info->dst = op_info->data + curr_size;
+    curr_size += op_info->dst_len;
+
+    DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
+
+    /* Handle the hash digest result */
+    if (hash_result_len > 0) {
+        DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
+        op_info->digest_result = op_info->data + curr_size;
+    }
+
+    return op_info;
+
+err:
+    g_free(op_info);
+    return NULL;
+}
+
+static int
+virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
+               struct virtio_crypto_sym_data_req *req,
+               CryptoDevBackendSymOpInfo **sym_op_info,
+               struct iovec *iov, unsigned int out_num)
+{
+    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+    uint32_t op_type;
+    CryptoDevBackendSymOpInfo *op_info;
+
+    op_type = virtio_ldl_p(vdev, &req->op_type);
+
+    if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+        op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
+                                              0, iov, out_num, 0, 0);
+        if (!op_info) {
+            return -EFAULT;
+        }
+        op_info->op_type = op_type;
+    } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+        uint32_t aad_len, hash_result_len;
+        uint32_t hash_start_src_offset;
+
+        aad_len = virtio_ldl_p(vdev, &req->u.chain.para.aad_len);
+        hash_result_len = virtio_ldl_p(vdev,
+                              &req->u.chain.para.hash_result_len);
+        hash_start_src_offset = virtio_ldl_p(vdev,
+                         &req->u.chain.para.hash_start_src_offset);
+        /* cipher part */
+        op_info = virtio_crypto_sym_op_helper(vdev, &req->u.chain.para.cipher,
+                                              aad_len, iov, out_num,
+                                              hash_result_len,
+                                              hash_start_src_offset);
+        if (!op_info) {
+            return -EFAULT;
+        }
+        op_info->op_type = op_type;
+    } else {
+        /* VIRTIO_CRYPTO_SYM_OP_NONE */
+        error_report("virtio-crypto unsupported cipher type");
+        return -VIRTIO_CRYPTO_NOTSUPP;
+    }
+
+    *sym_op_info = op_info;
+
+    return 0;
+}
+
+static int
+virtio_crypto_handle_request(VirtIOCryptoReq *request)
+{
+    VirtIOCrypto *vcrypto = request->vcrypto;
+    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+    VirtQueueElement *elem = &request->elem;
+    int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
+    struct virtio_crypto_op_data_req req;
+    int ret;
+    struct iovec *in_iov;
+    struct iovec *out_iov;
+    unsigned in_num;
+    unsigned out_num;
+    uint32_t opcode, status = VIRTIO_CRYPTO_ERR;
+    uint64_t session_id;
+    CryptoDevBackendSymOpInfo *sym_op_info = NULL;
+    Error *local_err = NULL;
+
+    if (elem->out_num < 1 || elem->in_num < 1) {
+        virtio_error(vdev, "virtio-crypto dataq missing headers");
+        return -1;
+    }
+
+    out_num = elem->out_num;
+    out_iov = elem->out_sg;
+    in_num = elem->in_num;
+    in_iov = elem->in_sg;
+    if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
+                != sizeof(req))) {
+        virtio_error(vdev, "virtio-crypto request outhdr too short");
+        return -1;
+    }
+    iov_discard_front(&out_iov, &out_num, sizeof(req));
+
+    if (in_iov[in_num - 1].iov_len <
+            sizeof(struct virtio_crypto_inhdr)) {
+        virtio_error(vdev, "virtio-crypto request inhdr too short");
+        return -1;
+    }
+
+    request->in_len = iov_size(in_iov, in_num);
+    request->in = (void *)in_iov[in_num - 1].iov_base
+              + in_iov[in_num - 1].iov_len
+              - sizeof(struct virtio_crypto_inhdr);
+    iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));
+
+    /*
+     * The length of operation result, including dest_data
+     * and digest_result if exist.
+     */
+    request->in_num = in_num;
+    request->in_iov = in_iov;
+
+    opcode = virtio_ldl_p(vdev, &req.header.opcode);
+    session_id = virtio_ldq_p(vdev, &req.header.session_id);
+
+    switch (opcode) {
+    case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
+    case VIRTIO_CRYPTO_CIPHER_DECRYPT:
+        ret = virtio_crypto_handle_sym_req(vcrypto,
+                         &req.u.sym_req,
+                         &sym_op_info,
+                         out_iov, out_num);
+        /* Serious errors, need to reset virtio crypto device */
+        if (ret == -EFAULT) {
+            return -1;
+        } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
+            virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
+            virtio_crypto_free_request(request);
+        } else {
+            sym_op_info->session_id = session_id;
+
+            /* Set request's parameter */
+            request->flags = CRYPTODEV_BACKEND_ALG_SYM;
+            request->u.sym_op_info = sym_op_info;
+            ret = cryptodev_backend_sym_operation(vcrypto->cryptodev,
+                                    sym_op_info, queue_index, &local_err);
+            if (ret < 0) {
+                status = VIRTIO_CRYPTO_ERR;
+                if (local_err) {
+                    error_report_err(local_err);
+                }
+            } else { /* ret >= 0 */
+                status = VIRTIO_CRYPTO_OK;
+            }
+            virtio_crypto_req_complete(request, status);
+            virtio_crypto_free_request(request);
+        }
+        break;
+    case VIRTIO_CRYPTO_HASH:
+    case VIRTIO_CRYPTO_MAC:
+    case VIRTIO_CRYPTO_AEAD_ENCRYPT:
+    case VIRTIO_CRYPTO_AEAD_DECRYPT:
+    default:
+        error_report("virtio-crypto unsupported dataq opcode: %u",
+                     opcode);
+        virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
+        virtio_crypto_free_request(request);
+    }
+
+    return 0;
+}
+
+static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
+{
+    VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+    VirtIOCryptoReq *req;
+
+    while ((req = virtio_crypto_get_request(vcrypto, vq))) {
+        if (virtio_crypto_handle_request(req) < 0) {
+            virtqueue_detach_element(req->vq, &req->elem, 0);
+            virtio_crypto_free_request(req);
+            break;
+        }
+    }
+}
+
 static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
                                            uint64_t features,
                                            Error **errp)
@@ -365,7 +701,7 @@  static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
     vcrypto->curr_queues = 1;
 
     for (i = 0; i < vcrypto->max_queues; i++) {
-        virtio_add_queue(vdev, 1024, NULL);
+        virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq);
     }
 
     vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
index a5e826c..1427d39 100644
--- a/include/hw/virtio/virtio-crypto.h
+++ b/include/hw/virtio/virtio-crypto.h
@@ -36,6 +36,10 @@  do { \
 #define VIRTIO_CRYPTO_GET_PARENT_CLASS(obj) \
         OBJECT_GET_PARENT_CLASS(obj, TYPE_VIRTIO_CRYPTO)
 
+/* This is the last element of the write scatter-gather list */
+struct virtio_crypto_inhdr {
+    uint32_t status;
+};
 
 typedef struct VirtIOCryptoConf {
     CryptoDevBackend *cryptodev;
@@ -61,8 +65,10 @@  typedef struct VirtIOCryptoReq {
     VirtQueueElement elem;
     /* flags of operation, such as type of algorithm */
     uint32_t flags;
-    /* address of in data (Device to Driver) */
-    void *idata_hva;
+    struct virtio_crypto_inhdr *in;
+    struct iovec *in_iov; /* Head address of dest iovec */
+    unsigned int in_num; /* Number of dest iovec */
+    size_t in_len;
     VirtQueue *vq;
     struct VirtIOCrypto *vcrypto;
     union {