diff mbox

[v9,09/12] virtio-crypto: add data queue processing handler

Message ID 1476963957-176296-10-git-send-email-arei.gonglei@huawei.com
State New
Headers show

Commit Message

Gonglei (Arei) Oct. 20, 2016, 11:45 a.m. UTC
Introduces VirtIOCryptoReq structure to store
crypto request so that we can easily support
asynchronous crypto operation in the future.

At present, we only support cipher and algorithm
chaining.

Signed-off-by: Gonglei <arei.gonglei@huawei.com>
---
 hw/virtio/virtio-crypto.c         | 358 +++++++++++++++++++++++++++++++++++++-
 include/hw/virtio/virtio-crypto.h |   4 +
 2 files changed, 361 insertions(+), 1 deletion(-)

Comments

Michael S. Tsirkin Oct. 27, 2016, 4:59 p.m. UTC | #1
On Thu, Oct 20, 2016 at 07:45:54PM +0800, Gonglei wrote:
> Introduces VirtIOCryptoReq structure to store
> crypto request so that we can easily support
> asynchronous crypto operation in the future.
> 
> At present, we only support cipher and algorithm
> chaining.
> 
> Signed-off-by: Gonglei <arei.gonglei@huawei.com>
> ---
>  hw/virtio/virtio-crypto.c         | 358 +++++++++++++++++++++++++++++++++++++-
>  include/hw/virtio/virtio-crypto.h |   4 +
>  2 files changed, 361 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> index 4be65e0..eabc61f 100644
> --- a/hw/virtio/virtio-crypto.c
> +++ b/hw/virtio/virtio-crypto.c
> @@ -311,6 +311,362 @@ static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
>      } /* end for loop */
>  }
>  
> +static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
> +                                VirtIOCryptoReq *req)
> +{
> +    req->vcrypto = vcrypto;
> +    req->vq = vq;
> +    req->in = NULL;
> +    req->in_iov = NULL;
> +    req->in_num = 0;
> +    req->in_len = 0;
> +    req->flags = CRYPTODEV_BACKEND_ALG__MAX;
> +    req->u.sym_op_info = NULL;
> +}
> +
> +static void virtio_crypto_free_request(VirtIOCryptoReq *req)
> +{
> +    if (req) {
> +        if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> +            g_free(req->u.sym_op_info);
> +        }
> +        g_free(req);
> +    }
> +}
> +
> +static void
> +virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
> +                VirtIOCryptoReq *req,
> +                uint32_t status,
> +                CryptoDevBackendSymOpInfo *sym_op_info)
> +{
> +    size_t s, len;
> +
> +    if (status != VIRTIO_CRYPTO_OK) {
> +        return;
> +    }
> +
> +    len = sym_op_info->dst_len;
> +    /* Save the cipher result */
> +    s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
> +    if (s != len) {
> +        virtio_error(vdev, "virtio-crypto dest data incorrect");
> +        return;
> +    }
> +
> +    iov_discard_front(&req->in_iov, &req->in_num, len);
> +
> +    if (sym_op_info->op_type ==
> +                      VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> +        /* Save the digest result */
> +        s = iov_from_buf(req->in_iov, req->in_num, 0,
> +                         sym_op_info->digest_result,
> +                         sym_op_info->digest_result_len);
> +        if (s != sym_op_info->digest_result_len) {
> +            virtio_error(vdev, "virtio-crypto digest result incorrect");
> +        }
> +    }
> +}
> +
> +static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
> +{
> +    VirtIOCrypto *vcrypto = req->vcrypto;
> +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> +
> +    if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> +        virtio_crypto_sym_input_data_helper(vdev, req, status,
> +                                            req->u.sym_op_info);
> +    }
> +    stb_p(&req->in->status, status);
> +    virtqueue_push(req->vq, &req->elem, req->in_len);
> +    virtio_notify(vdev, req->vq);
> +}
> +
> +static VirtIOCryptoReq *
> +virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
> +{
> +    VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
> +
> +    if (req) {
> +        virtio_crypto_init_request(s, vq, req);
> +    }
> +    return req;
> +}
> +
> +static CryptoDevBackendSymOpInfo *
> +virtio_crypto_sym_op_helper(VirtIODevice *vdev,
> +           struct virtio_crypto_cipher_para *cipher_para,
> +           struct virtio_crypto_alg_chain_data_para *alg_chain_para,
> +           struct iovec *iov, unsigned int out_num)
> +{
> +    CryptoDevBackendSymOpInfo *op_info;
> +    uint32_t src_len = 0, dst_len = 0;
> +    uint32_t iv_len = 0;
> +    uint32_t aad_len = 0, hash_result_len = 0;
> +    uint32_t hash_start_src_offset = 0, len_to_hash = 0;
> +    uint32_t cipher_start_src_offset = 0, len_to_cipher = 0;
> +
> +    size_t max_len, curr_size = 0;
> +    size_t s;
> +
> +    /* Plain cipher */
> +    if (cipher_para) {
> +        iv_len = virtio_ldl_p(vdev, &cipher_para->iv_len);
> +        src_len = virtio_ldl_p(vdev, &cipher_para->src_data_len);
> +        dst_len = virtio_ldl_p(vdev, &cipher_para->dst_data_len);
> +    } else if (alg_chain_para) { /* Algorithm chain */
> +        iv_len = virtio_ldl_p(vdev, &alg_chain_para->iv_len);
> +        src_len = virtio_ldl_p(vdev, &alg_chain_para->src_data_len);
> +        dst_len = virtio_ldl_p(vdev, &alg_chain_para->dst_data_len);
> +
> +        aad_len = virtio_ldl_p(vdev, &alg_chain_para->aad_len);
> +        hash_result_len = virtio_ldl_p(vdev,
> +                              &alg_chain_para->hash_result_len);
> +        hash_start_src_offset = virtio_ldl_p(vdev,
> +                         &alg_chain_para->hash_start_src_offset);
> +        cipher_start_src_offset = virtio_ldl_p(vdev,
> +                         &alg_chain_para->cipher_start_src_offset);
> +        len_to_cipher = virtio_ldl_p(vdev, &alg_chain_para->len_to_cipher);
> +        len_to_hash = virtio_ldl_p(vdev, &alg_chain_para->len_to_hash);
> +    } else {
> +        return NULL;
> +    }
> +


You don't need virtio_ wrappers for modern devices I think. Just use LE
accessors.

> +    max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
> +    if (unlikely(max_len > LONG_MAX - sizeof(CryptoDevBackendSymOpInfo))) {
> +        virtio_error(vdev, "virtio-crypto too big length");
> +        return NULL;
> +    }
> +
> +    op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);

Wow this can allocate up to 2^64 bytes, triggerable by guest. Not nice.
Also, max size depends on long size and guest has no way
to know it. Not nice either.

Add max size in device config?

> +    op_info->iv_len = iv_len;
> +    op_info->src_len = src_len;
> +    op_info->dst_len = dst_len;
> +    op_info->aad_len = aad_len;
> +    op_info->digest_result_len = hash_result_len;
> +    op_info->hash_start_src_offset = hash_start_src_offset;
> +    op_info->len_to_hash = len_to_hash;
> +    op_info->cipher_start_src_offset = cipher_start_src_offset;
> +    op_info->len_to_cipher = len_to_cipher;
> +    /* Handle the initilization vector */
> +    if (op_info->iv_len > 0) {
> +        DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
> +        op_info->iv = op_info->data + curr_size;

This shows that splitting a single file like this
is not helpful. I wanted to know how is data initialized
and what makes this math safe, and couldn't figure it out
from this patch alone.


> +
> +        s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
> +        if (unlikely(s != op_info->iv_len)) {
> +            virtio_error(vdev, "virtio-crypto iv incorrect");
> +            goto err;
> +        }
> +        iov_discard_front(&iov, &out_num, op_info->iv_len);
> +        curr_size += op_info->iv_len;
> +    }
> +
> +    /* Handle additional authentication data if exist */

-> if it exists

> +    if (op_info->aad_len > 0) {
> +        DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
> +        op_info->aad_data = op_info->data + curr_size;
> +
> +        s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len);
> +        if (unlikely(s != op_info->aad_len)) {
> +            virtio_error(vdev, "virtio-crypto additional auth data incorrect");
> +            goto err;
> +        }
> +        iov_discard_front(&iov, &out_num, op_info->aad_len);
> +
> +        curr_size += op_info->aad_len;
> +    }
> +
> +    /* Handle the source data */
> +    if (op_info->src_len > 0) {
> +        DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
> +        op_info->src = op_info->data + curr_size;
> +
> +        s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
> +        if (unlikely(s != op_info->src_len)) {
> +            virtio_error(vdev, "virtio-crypto source data incorrect");
> +            goto err;
> +        }
> +        iov_discard_front(&iov, &out_num, op_info->src_len);
> +
> +        curr_size += op_info->src_len;
> +    }
> +
> +    /* Handle the destination data */
> +    op_info->dst = op_info->data + curr_size;
> +    curr_size += op_info->dst_len;
> +
> +    DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
> +
> +    /* Handle the hash digest result */
> +    if (hash_result_len > 0) {
> +        DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
> +        op_info->digest_result = op_info->data + curr_size;
> +    }
> +
> +    return op_info;
> +
> +err:
> +    g_free(op_info);
> +    return NULL;
> +}
> +
> +static int
> +virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
> +               struct virtio_crypto_sym_data_req *req,
> +               CryptoDevBackendSymOpInfo **sym_op_info,
> +               struct iovec *iov, unsigned int out_num)
> +{
> +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> +    uint32_t op_type;
> +    CryptoDevBackendSymOpInfo *op_info;
> +
> +    op_type = virtio_ldl_p(vdev, &req->op_type);
> +
> +    if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
> +        op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
> +                                              NULL, iov, out_num);
> +        if (!op_info) {
> +            return -EFAULT;
> +        }
> +        op_info->op_type = op_type;
> +    } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> +        op_info = virtio_crypto_sym_op_helper(vdev, NULL,
> +                                              &req->u.chain.para,
> +                                              iov, out_num);
> +        if (!op_info) {
> +            return -EFAULT;
> +        }
> +        op_info->op_type = op_type;
> +    } else {
> +        /* VIRTIO_CRYPTO_SYM_OP_NONE */
> +        error_report("virtio-crypto unsupported cipher type");
> +        return -VIRTIO_CRYPTO_NOTSUPP;

virtio_error?

> +    }
> +
> +    *sym_op_info = op_info;
> +
> +    return 0;
> +}
> +
> +static int
> +virtio_crypto_handle_request(VirtIOCryptoReq *request)
> +{
> +    VirtIOCrypto *vcrypto = request->vcrypto;
> +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> +    VirtQueueElement *elem = &request->elem;
> +    int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
> +    struct virtio_crypto_op_data_req req;
> +    int ret;
> +    struct iovec *in_iov;
> +    struct iovec *out_iov;
> +    unsigned in_num;
> +    unsigned out_num;
> +    uint32_t opcode;
> +    uint8_t status = VIRTIO_CRYPTO_ERR;
> +    uint64_t session_id;
> +    CryptoDevBackendSymOpInfo *sym_op_info = NULL;
> +    Error *local_err = NULL;
> +
> +    if (elem->out_num < 1 || elem->in_num < 1) {
> +        virtio_error(vdev, "virtio-crypto dataq missing headers");
> +        return -1;
> +    }
> +
> +    out_num = elem->out_num;
> +    out_iov = elem->out_sg;
> +    in_num = elem->in_num;
> +    in_iov = elem->in_sg;
> +    if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
> +                != sizeof(req))) {
> +        virtio_error(vdev, "virtio-crypto request outhdr too short");
> +        return -1;
> +    }
> +    iov_discard_front(&out_iov, &out_num, sizeof(req));
> +
> +    if (in_iov[in_num - 1].iov_len <
> +            sizeof(struct virtio_crypto_inhdr)) {
> +        virtio_error(vdev, "virtio-crypto request inhdr too short");
> +        return -1;
> +    }
> +    /* We always touch the last byte, so just see how big in_iov is. */
> +    request->in_len = iov_size(in_iov, in_num);
> +    request->in = (void *)in_iov[in_num - 1].iov_base
> +              + in_iov[in_num - 1].iov_len
> +              - sizeof(struct virtio_crypto_inhdr);
> +    iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));
> +
> +    /*
> +     * The length of operation result, including dest_data
> +     * and digest_result if exist.
> +     */
> +    request->in_num = in_num;
> +    request->in_iov = in_iov;
> +
> +    opcode = virtio_ldl_p(vdev, &req.header.opcode);
> +    session_id = virtio_ldq_p(vdev, &req.header.session_id);
> +
> +    switch (opcode) {
> +    case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
> +    case VIRTIO_CRYPTO_CIPHER_DECRYPT:
> +        ret = virtio_crypto_handle_sym_req(vcrypto,
> +                         &req.u.sym_req,
> +                         &sym_op_info,
> +                         out_iov, out_num);
> +        /* Serious errors, need to reset virtio crypto device */
> +        if (ret == -EFAULT) {
> +            return -1;
> +        } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
> +            virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
> +            virtio_crypto_free_request(request);
> +        } else {
> +            sym_op_info->session_id = session_id;
> +
> +            /* Set request's parameter */
> +            request->flags = CRYPTODEV_BACKEND_ALG_SYM;
> +            request->u.sym_op_info = sym_op_info;
> +            ret = cryptodev_backend_sym_operation(vcrypto->cryptodev,
> +                                    sym_op_info, queue_index, &local_err);
> +            if (ret < 0) {
> +                status = VIRTIO_CRYPTO_ERR;
> +                if (local_err) {
> +                    error_report_err(local_err);
> +                }
> +            } else { /* ret >= 0 */
> +                status = VIRTIO_CRYPTO_OK;
> +            }
> +            virtio_crypto_req_complete(request, status);
> +            virtio_crypto_free_request(request);
> +        }
> +        break;
> +    case VIRTIO_CRYPTO_HASH:
> +    case VIRTIO_CRYPTO_MAC:
> +    case VIRTIO_CRYPTO_AEAD_ENCRYPT:
> +    case VIRTIO_CRYPTO_AEAD_DECRYPT:
> +    default:
> +        error_report("virtio-crypto unsupported dataq opcode: %u",
> +                     opcode);
> +        virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
> +        virtio_crypto_free_request(request);
> +    }
> +
> +    return 0;
> +}
> +
> +static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
> +{
> +    VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
> +    VirtIOCryptoReq *req;
> +
> +    while ((req = virtio_crypto_get_request(vcrypto, vq))) {
> +        if (virtio_crypto_handle_request(req) < 0) {
> +            virtqueue_detach_element(req->vq, &req->elem, 0);
> +            virtio_crypto_free_request(req);
> +            break;
> +        }
> +    }
> +}
> +
>  static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
>                                             uint64_t features,
>                                             Error **errp)
> @@ -370,7 +726,7 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
>      vcrypto->curr_queues = 1;
>  
>      for (i = 0; i < vcrypto->max_queues; i++) {
> -        virtio_add_queue(vdev, 1024, NULL);
> +        virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq);
>      }
>  
>      vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
> diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
> index 4a4b3da..2628056 100644
> --- a/include/hw/virtio/virtio-crypto.h
> +++ b/include/hw/virtio/virtio-crypto.h
> @@ -58,6 +58,10 @@ typedef struct VirtIOCryptoReq {
>      VirtQueueElement elem;
>      /* flags of operation, such as type of algorithm */
>      uint32_t flags;
> +    struct virtio_crypto_inhdr *in;
> +    struct iovec *in_iov; /* Head address of dest iovec */
> +    unsigned int in_num; /* Number of dest iovec */
> +    size_t in_len;
>      VirtQueue *vq;
>      struct VirtIOCrypto *vcrypto;
>      union {
> -- 
> 1.8.3.1
>
Gonglei (Arei) Oct. 28, 2016, 12:50 a.m. UTC | #2
Hi Michael,

Thanks for your comments.

> -----Original Message-----
> From: virtio-dev@lists.oasis-open.org [mailto:virtio-dev@lists.oasis-open.org]
> On Behalf Of Michael S. Tsirkin
> Sent: Friday, October 28, 2016 12:59 AM
> Subject: [virtio-dev] Re: [PATCH v9 09/12] virtio-crypto: add data queue
> processing handler
> 
> On Thu, Oct 20, 2016 at 07:45:54PM +0800, Gonglei wrote:
> > Introduces VirtIOCryptoReq structure to store
> > crypto request so that we can easily support
> > asynchronous crypto operation in the future.
> >
> > At present, we only support cipher and algorithm
> > chaining.
> >
> > Signed-off-by: Gonglei <arei.gonglei@huawei.com>
> > ---
> >  hw/virtio/virtio-crypto.c         | 358
> +++++++++++++++++++++++++++++++++++++-
> >  include/hw/virtio/virtio-crypto.h |   4 +
> >  2 files changed, 361 insertions(+), 1 deletion(-)
> >
> > diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
> > index 4be65e0..eabc61f 100644
> > --- a/hw/virtio/virtio-crypto.c
> > +++ b/hw/virtio/virtio-crypto.c
> > @@ -311,6 +311,362 @@ static void virtio_crypto_handle_ctrl(VirtIODevice
> *vdev, VirtQueue *vq)
> >      } /* end for loop */
> >  }
> >
> > +static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
> > +                                VirtIOCryptoReq *req)
> > +{
> > +    req->vcrypto = vcrypto;
> > +    req->vq = vq;
> > +    req->in = NULL;
> > +    req->in_iov = NULL;
> > +    req->in_num = 0;
> > +    req->in_len = 0;
> > +    req->flags = CRYPTODEV_BACKEND_ALG__MAX;
> > +    req->u.sym_op_info = NULL;
> > +}
> > +
> > +static void virtio_crypto_free_request(VirtIOCryptoReq *req)
> > +{
> > +    if (req) {
> > +        if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> > +            g_free(req->u.sym_op_info);
> > +        }
> > +        g_free(req);
> > +    }
> > +}
> > +
> > +static void
> > +virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
> > +                VirtIOCryptoReq *req,
> > +                uint32_t status,
> > +                CryptoDevBackendSymOpInfo *sym_op_info)
> > +{
> > +    size_t s, len;
> > +
> > +    if (status != VIRTIO_CRYPTO_OK) {
> > +        return;
> > +    }
> > +
> > +    len = sym_op_info->dst_len;
> > +    /* Save the cipher result */
> > +    s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
> > +    if (s != len) {
> > +        virtio_error(vdev, "virtio-crypto dest data incorrect");
> > +        return;
> > +    }
> > +
> > +    iov_discard_front(&req->in_iov, &req->in_num, len);
> > +
> > +    if (sym_op_info->op_type ==
> > +
> VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> > +        /* Save the digest result */
> > +        s = iov_from_buf(req->in_iov, req->in_num, 0,
> > +                         sym_op_info->digest_result,
> > +                         sym_op_info->digest_result_len);
> > +        if (s != sym_op_info->digest_result_len) {
> > +            virtio_error(vdev, "virtio-crypto digest result incorrect");
> > +        }
> > +    }
> > +}
> > +
> > +static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t
> status)
> > +{
> > +    VirtIOCrypto *vcrypto = req->vcrypto;
> > +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> > +
> > +    if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
> > +        virtio_crypto_sym_input_data_helper(vdev, req, status,
> > +                                            req->u.sym_op_info);
> > +    }
> > +    stb_p(&req->in->status, status);
> > +    virtqueue_push(req->vq, &req->elem, req->in_len);
> > +    virtio_notify(vdev, req->vq);
> > +}
> > +
> > +static VirtIOCryptoReq *
> > +virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
> > +{
> > +    VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
> > +
> > +    if (req) {
> > +        virtio_crypto_init_request(s, vq, req);
> > +    }
> > +    return req;
> > +}
> > +
> > +static CryptoDevBackendSymOpInfo *
> > +virtio_crypto_sym_op_helper(VirtIODevice *vdev,
> > +           struct virtio_crypto_cipher_para *cipher_para,
> > +           struct virtio_crypto_alg_chain_data_para *alg_chain_para,
> > +           struct iovec *iov, unsigned int out_num)
> > +{
> > +    CryptoDevBackendSymOpInfo *op_info;
> > +    uint32_t src_len = 0, dst_len = 0;
> > +    uint32_t iv_len = 0;
> > +    uint32_t aad_len = 0, hash_result_len = 0;
> > +    uint32_t hash_start_src_offset = 0, len_to_hash = 0;
> > +    uint32_t cipher_start_src_offset = 0, len_to_cipher = 0;
> > +
> > +    size_t max_len, curr_size = 0;
> > +    size_t s;
> > +
> > +    /* Plain cipher */
> > +    if (cipher_para) {
> > +        iv_len = virtio_ldl_p(vdev, &cipher_para->iv_len);
> > +        src_len = virtio_ldl_p(vdev, &cipher_para->src_data_len);
> > +        dst_len = virtio_ldl_p(vdev, &cipher_para->dst_data_len);
> > +    } else if (alg_chain_para) { /* Algorithm chain */
> > +        iv_len = virtio_ldl_p(vdev, &alg_chain_para->iv_len);
> > +        src_len = virtio_ldl_p(vdev, &alg_chain_para->src_data_len);
> > +        dst_len = virtio_ldl_p(vdev, &alg_chain_para->dst_data_len);
> > +
> > +        aad_len = virtio_ldl_p(vdev, &alg_chain_para->aad_len);
> > +        hash_result_len = virtio_ldl_p(vdev,
> > +                              &alg_chain_para->hash_result_len);
> > +        hash_start_src_offset = virtio_ldl_p(vdev,
> > +                         &alg_chain_para->hash_start_src_offset);
> > +        cipher_start_src_offset = virtio_ldl_p(vdev,
> > +                         &alg_chain_para->cipher_start_src_offset);
> > +        len_to_cipher = virtio_ldl_p(vdev,
> &alg_chain_para->len_to_cipher);
> > +        len_to_hash = virtio_ldl_p(vdev, &alg_chain_para->len_to_hash);
> > +    } else {
> > +        return NULL;
> > +    }
> > +
> 
> 
> You don't need virtio_ wrappers for modern devices I think. Just use LE
> accessors.
> 

I think I get your point, you mean using *_le_* function family directly.
Will do.

> > +    max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
> > +    if (unlikely(max_len > LONG_MAX -
> sizeof(CryptoDevBackendSymOpInfo))) {
> > +        virtio_error(vdev, "virtio-crypto too big length");
> > +        return NULL;
> > +    }
> > +
> > +    op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);
> 
> Wow this can allocate up to 2^64 bytes, triggerable by guest. Not nice.

That's true as the boundary value in theory.

> Also, max size depends on long size and guest has no way
> to know it. Not nice either.
> 
Currently yes, the driver only receive a virtio_CRYPTO_ERR if
The total length > LONG_MAX - sizeof(CryptoDevBackendSymOpInfo)

> Add max size in device config?
> 
It makes sense I think.

The max_size expresses the maximum size of per crypto request's content. 
And we can set different max_size values based on different cryptdoev
backends on the device side.

The driver can read it from the device configuration and add corresponding checks.

Let me add it in the following version.

> > +    op_info->iv_len = iv_len;
> > +    op_info->src_len = src_len;
> > +    op_info->dst_len = dst_len;
> > +    op_info->aad_len = aad_len;
> > +    op_info->digest_result_len = hash_result_len;
> > +    op_info->hash_start_src_offset = hash_start_src_offset;
> > +    op_info->len_to_hash = len_to_hash;
> > +    op_info->cipher_start_src_offset = cipher_start_src_offset;
> > +    op_info->len_to_cipher = len_to_cipher;
> > +    /* Handle the initilization vector */
> > +    if (op_info->iv_len > 0) {
> > +        DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
> > +        op_info->iv = op_info->data + curr_size;
> 
> This shows that splitting a single file like this
> is not helpful. I wanted to know how is data initialized
> and what makes this math safe, and couldn't figure it out
> from this patch alone.
> 
Sorry , I couldn't catch your mean here completely. :(

You mean the method of splitting the patches
about this virtio-crypto.c is not nice? If yes, I
think it's too big if put them in one patch to review. 

> 
> > +
> > +        s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
> > +        if (unlikely(s != op_info->iv_len)) {
> > +            virtio_error(vdev, "virtio-crypto iv incorrect");
> > +            goto err;
> > +        }
> > +        iov_discard_front(&iov, &out_num, op_info->iv_len);
> > +        curr_size += op_info->iv_len;
> > +    }
> > +
> > +    /* Handle additional authentication data if exist */
> 
> -> if it exists
> 
Yes, will fix it.

> > +    if (op_info->aad_len > 0) {
> > +        DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
> > +        op_info->aad_data = op_info->data + curr_size;
> > +
> > +        s = iov_to_buf(iov, out_num, 0, op_info->aad_data,
> op_info->aad_len);
> > +        if (unlikely(s != op_info->aad_len)) {
> > +            virtio_error(vdev, "virtio-crypto additional auth data
> incorrect");
> > +            goto err;
> > +        }
> > +        iov_discard_front(&iov, &out_num, op_info->aad_len);
> > +
> > +        curr_size += op_info->aad_len;
> > +    }
> > +
> > +    /* Handle the source data */
> > +    if (op_info->src_len > 0) {
> > +        DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
> > +        op_info->src = op_info->data + curr_size;
> > +
> > +        s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
> > +        if (unlikely(s != op_info->src_len)) {
> > +            virtio_error(vdev, "virtio-crypto source data incorrect");
> > +            goto err;
> > +        }
> > +        iov_discard_front(&iov, &out_num, op_info->src_len);
> > +
> > +        curr_size += op_info->src_len;
> > +    }
> > +
> > +    /* Handle the destination data */
> > +    op_info->dst = op_info->data + curr_size;
> > +    curr_size += op_info->dst_len;
> > +
> > +    DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
> > +
> > +    /* Handle the hash digest result */
> > +    if (hash_result_len > 0) {
> > +        DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
> > +        op_info->digest_result = op_info->data + curr_size;
> > +    }
> > +
> > +    return op_info;
> > +
> > +err:
> > +    g_free(op_info);
> > +    return NULL;
> > +}
> > +
> > +static int
> > +virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
> > +               struct virtio_crypto_sym_data_req *req,
> > +               CryptoDevBackendSymOpInfo **sym_op_info,
> > +               struct iovec *iov, unsigned int out_num)
> > +{
> > +    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
> > +    uint32_t op_type;
> > +    CryptoDevBackendSymOpInfo *op_info;
> > +
> > +    op_type = virtio_ldl_p(vdev, &req->op_type);
> > +
> > +    if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
> > +        op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
> > +                                              NULL, iov,
> out_num);
> > +        if (!op_info) {
> > +            return -EFAULT;
> > +        }
> > +        op_info->op_type = op_type;
> > +    } else if (op_type ==
> VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
> > +        op_info = virtio_crypto_sym_op_helper(vdev, NULL,
> > +
> &req->u.chain.para,
> > +                                              iov, out_num);
> > +        if (!op_info) {
> > +            return -EFAULT;
> > +        }
> > +        op_info->op_type = op_type;
> > +    } else {
> > +        /* VIRTIO_CRYPTO_SYM_OP_NONE */
> > +        error_report("virtio-crypto unsupported cipher type");
> > +        return -VIRTIO_CRYPTO_NOTSUPP;
> 
> virtio_error?
> 
Stefan also mentioned this. It's just a reminder for host side, not a serious
error that need the frontend driver to reset the device. The driver will
receive VIRTIO_CRYPTO_NOTSUPP to decide what to do next.


Regards,
-Gonglei
diff mbox

Patch

diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index 4be65e0..eabc61f 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -311,6 +311,362 @@  static void virtio_crypto_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
     } /* end for loop */
 }
 
+static void virtio_crypto_init_request(VirtIOCrypto *vcrypto, VirtQueue *vq,
+                                VirtIOCryptoReq *req)
+{
+    req->vcrypto = vcrypto;
+    req->vq = vq;
+    req->in = NULL;
+    req->in_iov = NULL;
+    req->in_num = 0;
+    req->in_len = 0;
+    req->flags = CRYPTODEV_BACKEND_ALG__MAX;
+    req->u.sym_op_info = NULL;
+}
+
+static void virtio_crypto_free_request(VirtIOCryptoReq *req)
+{
+    if (req) {
+        if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+            g_free(req->u.sym_op_info);
+        }
+        g_free(req);
+    }
+}
+
+static void
+virtio_crypto_sym_input_data_helper(VirtIODevice *vdev,
+                VirtIOCryptoReq *req,
+                uint32_t status,
+                CryptoDevBackendSymOpInfo *sym_op_info)
+{
+    size_t s, len;
+
+    if (status != VIRTIO_CRYPTO_OK) {
+        return;
+    }
+
+    len = sym_op_info->dst_len;
+    /* Save the cipher result */
+    s = iov_from_buf(req->in_iov, req->in_num, 0, sym_op_info->dst, len);
+    if (s != len) {
+        virtio_error(vdev, "virtio-crypto dest data incorrect");
+        return;
+    }
+
+    iov_discard_front(&req->in_iov, &req->in_num, len);
+
+    if (sym_op_info->op_type ==
+                      VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+        /* Save the digest result */
+        s = iov_from_buf(req->in_iov, req->in_num, 0,
+                         sym_op_info->digest_result,
+                         sym_op_info->digest_result_len);
+        if (s != sym_op_info->digest_result_len) {
+            virtio_error(vdev, "virtio-crypto digest result incorrect");
+        }
+    }
+}
+
+static void virtio_crypto_req_complete(VirtIOCryptoReq *req, uint8_t status)
+{
+    VirtIOCrypto *vcrypto = req->vcrypto;
+    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+
+    if (req->flags == CRYPTODEV_BACKEND_ALG_SYM) {
+        virtio_crypto_sym_input_data_helper(vdev, req, status,
+                                            req->u.sym_op_info);
+    }
+    stb_p(&req->in->status, status);
+    virtqueue_push(req->vq, &req->elem, req->in_len);
+    virtio_notify(vdev, req->vq);
+}
+
+static VirtIOCryptoReq *
+virtio_crypto_get_request(VirtIOCrypto *s, VirtQueue *vq)
+{
+    VirtIOCryptoReq *req = virtqueue_pop(vq, sizeof(VirtIOCryptoReq));
+
+    if (req) {
+        virtio_crypto_init_request(s, vq, req);
+    }
+    return req;
+}
+
+static CryptoDevBackendSymOpInfo *
+virtio_crypto_sym_op_helper(VirtIODevice *vdev,
+           struct virtio_crypto_cipher_para *cipher_para,
+           struct virtio_crypto_alg_chain_data_para *alg_chain_para,
+           struct iovec *iov, unsigned int out_num)
+{
+    CryptoDevBackendSymOpInfo *op_info;
+    uint32_t src_len = 0, dst_len = 0;
+    uint32_t iv_len = 0;
+    uint32_t aad_len = 0, hash_result_len = 0;
+    uint32_t hash_start_src_offset = 0, len_to_hash = 0;
+    uint32_t cipher_start_src_offset = 0, len_to_cipher = 0;
+
+    size_t max_len, curr_size = 0;
+    size_t s;
+
+    /* Plain cipher */
+    if (cipher_para) {
+        iv_len = virtio_ldl_p(vdev, &cipher_para->iv_len);
+        src_len = virtio_ldl_p(vdev, &cipher_para->src_data_len);
+        dst_len = virtio_ldl_p(vdev, &cipher_para->dst_data_len);
+    } else if (alg_chain_para) { /* Algorithm chain */
+        iv_len = virtio_ldl_p(vdev, &alg_chain_para->iv_len);
+        src_len = virtio_ldl_p(vdev, &alg_chain_para->src_data_len);
+        dst_len = virtio_ldl_p(vdev, &alg_chain_para->dst_data_len);
+
+        aad_len = virtio_ldl_p(vdev, &alg_chain_para->aad_len);
+        hash_result_len = virtio_ldl_p(vdev,
+                              &alg_chain_para->hash_result_len);
+        hash_start_src_offset = virtio_ldl_p(vdev,
+                         &alg_chain_para->hash_start_src_offset);
+        cipher_start_src_offset = virtio_ldl_p(vdev,
+                         &alg_chain_para->cipher_start_src_offset);
+        len_to_cipher = virtio_ldl_p(vdev, &alg_chain_para->len_to_cipher);
+        len_to_hash = virtio_ldl_p(vdev, &alg_chain_para->len_to_hash);
+    } else {
+        return NULL;
+    }
+
+    max_len = iv_len + aad_len + src_len + dst_len + hash_result_len;
+    if (unlikely(max_len > LONG_MAX - sizeof(CryptoDevBackendSymOpInfo))) {
+        virtio_error(vdev, "virtio-crypto too big length");
+        return NULL;
+    }
+
+    op_info = g_malloc0(sizeof(CryptoDevBackendSymOpInfo) + max_len);
+    op_info->iv_len = iv_len;
+    op_info->src_len = src_len;
+    op_info->dst_len = dst_len;
+    op_info->aad_len = aad_len;
+    op_info->digest_result_len = hash_result_len;
+    op_info->hash_start_src_offset = hash_start_src_offset;
+    op_info->len_to_hash = len_to_hash;
+    op_info->cipher_start_src_offset = cipher_start_src_offset;
+    op_info->len_to_cipher = len_to_cipher;
+    /* Handle the initilization vector */
+    if (op_info->iv_len > 0) {
+        DPRINTF("iv_len=%" PRIu32 "\n", op_info->iv_len);
+        op_info->iv = op_info->data + curr_size;
+
+        s = iov_to_buf(iov, out_num, 0, op_info->iv, op_info->iv_len);
+        if (unlikely(s != op_info->iv_len)) {
+            virtio_error(vdev, "virtio-crypto iv incorrect");
+            goto err;
+        }
+        iov_discard_front(&iov, &out_num, op_info->iv_len);
+        curr_size += op_info->iv_len;
+    }
+
+    /* Handle additional authentication data if exist */
+    if (op_info->aad_len > 0) {
+        DPRINTF("aad_len=%" PRIu32 "\n", op_info->aad_len);
+        op_info->aad_data = op_info->data + curr_size;
+
+        s = iov_to_buf(iov, out_num, 0, op_info->aad_data, op_info->aad_len);
+        if (unlikely(s != op_info->aad_len)) {
+            virtio_error(vdev, "virtio-crypto additional auth data incorrect");
+            goto err;
+        }
+        iov_discard_front(&iov, &out_num, op_info->aad_len);
+
+        curr_size += op_info->aad_len;
+    }
+
+    /* Handle the source data */
+    if (op_info->src_len > 0) {
+        DPRINTF("src_len=%" PRIu32 "\n", op_info->src_len);
+        op_info->src = op_info->data + curr_size;
+
+        s = iov_to_buf(iov, out_num, 0, op_info->src, op_info->src_len);
+        if (unlikely(s != op_info->src_len)) {
+            virtio_error(vdev, "virtio-crypto source data incorrect");
+            goto err;
+        }
+        iov_discard_front(&iov, &out_num, op_info->src_len);
+
+        curr_size += op_info->src_len;
+    }
+
+    /* Handle the destination data */
+    op_info->dst = op_info->data + curr_size;
+    curr_size += op_info->dst_len;
+
+    DPRINTF("dst_len=%" PRIu32 "\n", op_info->dst_len);
+
+    /* Handle the hash digest result */
+    if (hash_result_len > 0) {
+        DPRINTF("hash_result_len=%" PRIu32 "\n", hash_result_len);
+        op_info->digest_result = op_info->data + curr_size;
+    }
+
+    return op_info;
+
+err:
+    g_free(op_info);
+    return NULL;
+}
+
+static int
+virtio_crypto_handle_sym_req(VirtIOCrypto *vcrypto,
+               struct virtio_crypto_sym_data_req *req,
+               CryptoDevBackendSymOpInfo **sym_op_info,
+               struct iovec *iov, unsigned int out_num)
+{
+    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+    uint32_t op_type;
+    CryptoDevBackendSymOpInfo *op_info;
+
+    op_type = virtio_ldl_p(vdev, &req->op_type);
+
+    if (op_type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
+        op_info = virtio_crypto_sym_op_helper(vdev, &req->u.cipher.para,
+                                              NULL, iov, out_num);
+        if (!op_info) {
+            return -EFAULT;
+        }
+        op_info->op_type = op_type;
+    } else if (op_type == VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING) {
+        op_info = virtio_crypto_sym_op_helper(vdev, NULL,
+                                              &req->u.chain.para,
+                                              iov, out_num);
+        if (!op_info) {
+            return -EFAULT;
+        }
+        op_info->op_type = op_type;
+    } else {
+        /* VIRTIO_CRYPTO_SYM_OP_NONE */
+        error_report("virtio-crypto unsupported cipher type");
+        return -VIRTIO_CRYPTO_NOTSUPP;
+    }
+
+    *sym_op_info = op_info;
+
+    return 0;
+}
+
+static int
+virtio_crypto_handle_request(VirtIOCryptoReq *request)
+{
+    VirtIOCrypto *vcrypto = request->vcrypto;
+    VirtIODevice *vdev = VIRTIO_DEVICE(vcrypto);
+    VirtQueueElement *elem = &request->elem;
+    int queue_index = virtio_crypto_vq2q(virtio_get_queue_index(request->vq));
+    struct virtio_crypto_op_data_req req;
+    int ret;
+    struct iovec *in_iov;
+    struct iovec *out_iov;
+    unsigned in_num;
+    unsigned out_num;
+    uint32_t opcode;
+    uint8_t status = VIRTIO_CRYPTO_ERR;
+    uint64_t session_id;
+    CryptoDevBackendSymOpInfo *sym_op_info = NULL;
+    Error *local_err = NULL;
+
+    if (elem->out_num < 1 || elem->in_num < 1) {
+        virtio_error(vdev, "virtio-crypto dataq missing headers");
+        return -1;
+    }
+
+    out_num = elem->out_num;
+    out_iov = elem->out_sg;
+    in_num = elem->in_num;
+    in_iov = elem->in_sg;
+    if (unlikely(iov_to_buf(out_iov, out_num, 0, &req, sizeof(req))
+                != sizeof(req))) {
+        virtio_error(vdev, "virtio-crypto request outhdr too short");
+        return -1;
+    }
+    iov_discard_front(&out_iov, &out_num, sizeof(req));
+
+    if (in_iov[in_num - 1].iov_len <
+            sizeof(struct virtio_crypto_inhdr)) {
+        virtio_error(vdev, "virtio-crypto request inhdr too short");
+        return -1;
+    }
+    /* We always touch the last byte, so just see how big in_iov is. */
+    request->in_len = iov_size(in_iov, in_num);
+    request->in = (void *)in_iov[in_num - 1].iov_base
+              + in_iov[in_num - 1].iov_len
+              - sizeof(struct virtio_crypto_inhdr);
+    iov_discard_back(in_iov, &in_num, sizeof(struct virtio_crypto_inhdr));
+
+    /*
+     * The length of operation result, including dest_data
+     * and digest_result if exist.
+     */
+    request->in_num = in_num;
+    request->in_iov = in_iov;
+
+    opcode = virtio_ldl_p(vdev, &req.header.opcode);
+    session_id = virtio_ldq_p(vdev, &req.header.session_id);
+
+    switch (opcode) {
+    case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
+    case VIRTIO_CRYPTO_CIPHER_DECRYPT:
+        ret = virtio_crypto_handle_sym_req(vcrypto,
+                         &req.u.sym_req,
+                         &sym_op_info,
+                         out_iov, out_num);
+        /* Serious errors, need to reset virtio crypto device */
+        if (ret == -EFAULT) {
+            return -1;
+        } else if (ret == -VIRTIO_CRYPTO_NOTSUPP) {
+            virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
+            virtio_crypto_free_request(request);
+        } else {
+            sym_op_info->session_id = session_id;
+
+            /* Set request's parameter */
+            request->flags = CRYPTODEV_BACKEND_ALG_SYM;
+            request->u.sym_op_info = sym_op_info;
+            ret = cryptodev_backend_sym_operation(vcrypto->cryptodev,
+                                    sym_op_info, queue_index, &local_err);
+            if (ret < 0) {
+                status = VIRTIO_CRYPTO_ERR;
+                if (local_err) {
+                    error_report_err(local_err);
+                }
+            } else { /* ret >= 0 */
+                status = VIRTIO_CRYPTO_OK;
+            }
+            virtio_crypto_req_complete(request, status);
+            virtio_crypto_free_request(request);
+        }
+        break;
+    case VIRTIO_CRYPTO_HASH:
+    case VIRTIO_CRYPTO_MAC:
+    case VIRTIO_CRYPTO_AEAD_ENCRYPT:
+    case VIRTIO_CRYPTO_AEAD_DECRYPT:
+    default:
+        error_report("virtio-crypto unsupported dataq opcode: %u",
+                     opcode);
+        virtio_crypto_req_complete(request, VIRTIO_CRYPTO_NOTSUPP);
+        virtio_crypto_free_request(request);
+    }
+
+    return 0;
+}
+
+static void virtio_crypto_handle_dataq(VirtIODevice *vdev, VirtQueue *vq)
+{
+    VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev);
+    VirtIOCryptoReq *req;
+
+    while ((req = virtio_crypto_get_request(vcrypto, vq))) {
+        if (virtio_crypto_handle_request(req) < 0) {
+            virtqueue_detach_element(req->vq, &req->elem, 0);
+            virtio_crypto_free_request(req);
+            break;
+        }
+    }
+}
+
 static uint64_t virtio_crypto_get_features(VirtIODevice *vdev,
                                            uint64_t features,
                                            Error **errp)
@@ -370,7 +726,7 @@  static void virtio_crypto_device_realize(DeviceState *dev, Error **errp)
     vcrypto->curr_queues = 1;
 
     for (i = 0; i < vcrypto->max_queues; i++) {
-        virtio_add_queue(vdev, 1024, NULL);
+        virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq);
     }
 
     vcrypto->ctrl_vq = virtio_add_queue(vdev, 64, virtio_crypto_handle_ctrl);
diff --git a/include/hw/virtio/virtio-crypto.h b/include/hw/virtio/virtio-crypto.h
index 4a4b3da..2628056 100644
--- a/include/hw/virtio/virtio-crypto.h
+++ b/include/hw/virtio/virtio-crypto.h
@@ -58,6 +58,10 @@  typedef struct VirtIOCryptoReq {
     VirtQueueElement elem;
     /* flags of operation, such as type of algorithm */
     uint32_t flags;
+    struct virtio_crypto_inhdr *in;
+    struct iovec *in_iov; /* Head address of dest iovec */
+    unsigned int in_num; /* Number of dest iovec */
+    size_t in_len;
     VirtQueue *vq;
     struct VirtIOCrypto *vcrypto;
     union {