diff mbox series

[v4,2/3] aspeed/hace: Support AST2600 HACE

Message ID 20220331074844.30065-3-steven_lee@aspeedtech.com (mailing list archive)
State New, archived
Headers show
Series aspeed/hace: Support AST2600 HACE | expand

Commit Message

Steven Lee March 31, 2022, 7:48 a.m. UTC
The aspeed ast2600 accumulative mode is described in datasheet
ast2600v10.pdf section 25.6.4:
 1. Allocating and initiating accumulative hash digest write buffer
    with initial state.
    * Since QEMU crypto/hash api doesn't provide the API to set initial
      state of hash library, and the initial state is already setted by
      crypto library (gcrypt/glib/...), so skip this step.
 2. Calculating accumulative hash digest.
    (a) When receiving the last accumulative data, software need to add
        padding message at the end of the accumulative data. Padding
        message described in specific of MD5, SHA-1, SHA224, SHA256,
        SHA512, SHA512/224, SHA512/256.
        * Since the crypto library (gcrypt/glib) already pad the
          padding message internally.
        * This patch is to remove the padding message which fed byguest
          machine driver.

Signed-off-by: Troy Lee <troy_lee@aspeedtech.com>
Signed-off-by: Steven Lee <steven_lee@aspeedtech.com>
---
 hw/misc/aspeed_hace.c | 140 ++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 136 insertions(+), 4 deletions(-)

Comments

Cédric Le Goater April 20, 2022, 12:53 p.m. UTC | #1
On 3/31/22 09:48, Steven Lee wrote:
> The aspeed ast2600 accumulative mode is described in datasheet
> ast2600v10.pdf section 25.6.4:
>   1. Allocating and initiating accumulative hash digest write buffer
>      with initial state.
>      * Since QEMU crypto/hash api doesn't provide the API to set initial
>        state of hash library, and the initial state is already setted by

s/setted/set/

>        crypto library (gcrypt/glib/...), so skip this step.
>   2. Calculating accumulative hash digest.
>      (a) When receiving the last accumulative data, software need to add
>          padding message at the end of the accumulative data. Padding
>          message described in specific of MD5, SHA-1, SHA224, SHA256,
>          SHA512, SHA512/224, SHA512/256.
>          * Since the crypto library (gcrypt/glib) already pad the
>            padding message internally.
>          * This patch is to remove the padding message which fed byguest
>            machine driver.
> 
> Signed-off-by: Troy Lee <troy_lee@aspeedtech.com>
> Signed-off-by: Steven Lee <steven_lee@aspeedtech.com>
> ---
>   hw/misc/aspeed_hace.c | 140 ++++++++++++++++++++++++++++++++++++++++--
>   1 file changed, 136 insertions(+), 4 deletions(-)
> 
> diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
> index 59fe5bfca2..5a7a144602 100644
> --- a/hw/misc/aspeed_hace.c
> +++ b/hw/misc/aspeed_hace.c
> @@ -95,12 +95,115 @@ static int hash_algo_lookup(uint32_t reg)
>       return -1;
>   }
>   
> -static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
> +/**
> + * Check whether the request contains padding message.
> + *
> + * @param iov           iov of current request
> + * @param id            index of iov of current request
> + * @param total_req_len length of all acc_mode requests(including padding msg)
> + * @param req_len       length of the current request
> + * @param total_msg_len length of all acc_mode requests(excluding padding msg)
> + * @param pad_offset    start offset of padding message
> + */
> +static bool has_padding(struct iovec *iov, uint32_t total_req_len,
> +                        hwaddr req_len, uint32_t *total_msg_len,
> +                        uint32_t *pad_offset)
> +{
> +    *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
> +    /*
> +     * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
> +     * last request. The last request should contain padding message.
> +     * We check whether message contains padding by
> +     *   1. Get total message length. If the current message contains
> +     *      padding, the last 8 bytes are total message length.
> +     *   2. Check whether the total message length is valid.
> +     *      If it is valid, the value should less than or eaual to

s/eaual/equal/

> +     *      total_req_len.
> +     *   3. Current request len - padding_size to get padding offset.
> +     *      The padding message's first byte should be 0x80
> +     */
> +    if (*total_msg_len <= total_req_len) {
> +        uint32_t padding_size = total_req_len - *total_msg_len;
> +        uint8_t *padding = iov->iov_base;
> +        *pad_offset = req_len - padding_size;
> +        if (padding[*pad_offset] == 0x80) {
> +            return true;
> +        }
> +    }
> +
> +    return false;
> +}
> +
> +static int reconstruct_iov(struct iovec *cache, struct iovec *iov, int id,
> +                           uint32_t *total_req_len,
> +                           uint32_t *pad_offset,
> +                           int *count)
> +{
> +    int i, iov_count;
> +    if (pad_offset != 0) {
> +        (cache + *count)->iov_base = (iov + id)->iov_base;

I would prefer the array notation iov[i], like elsewhere in this file..

> +        (cache + *count)->iov_len = *pad_offset;
> +        ++*count;
> +    }
> +    for (i = 0; i < *count; i++) {
> +        (iov + i)->iov_base = (cache + i)->iov_base;
> +        (iov + i)->iov_len = (cache + i)->iov_len;

ditto.

> +    }
> +    iov_count = *count;
> +    *count = 0;
> +    *total_req_len = 0;
> +    return iov_count;
> +}
> +
> +/**
> + * Generate iov for accumulative mode.
> + *
> + * @param cache         cached iov
> + * @param iov           iov of current request
> + * @param id            index of iov of current request
> + * @param total_req_len total length of the request(including padding)
> + * @param req_len       length of the current request
> + * @param count         count of cached iov
> + */
> +static int gen_acc_mode_iov(struct iovec *cache, struct iovec *iov, int id,
> +                            uint32_t *total_req_len, hwaddr *req_len,
> +                            int *count)
> +{
> +    uint32_t pad_offset;
> +    uint32_t total_msg_len;
> +    *total_req_len += *req_len;
> +
> +    if (has_padding(&iov[id], *total_req_len, *req_len, &total_msg_len,
> +                    &pad_offset)) {
> +        if (*count) {
> +            return reconstruct_iov(cache, iov, id, total_req_len,
> +                    &pad_offset, count);
> +        }
> +
> +        *req_len -= *total_req_len - total_msg_len;
> +        *total_req_len = 0;
> +        (iov + id)->iov_len = *req_len;
> +        return id + 1;
> +    } else {
> +        (cache + *count)->iov_base = iov->iov_base;
> +        (cache + *count)->iov_len = *req_len;
> +        ++*count;
> +    }
> +
> +    return 0;
> +}
> +
> +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
> +                              bool acc_mode)
>   {
>       struct iovec iov[ASPEED_HACE_MAX_SG];
>       g_autofree uint8_t *digest_buf;
>       size_t digest_len = 0;
> +    int niov = 0;
>       int i;
> +    static struct iovec iov_cache[ASPEED_HACE_MAX_SG];
> +    static int count;
> +    static uint32_t total_len;

Why static ? Shouldn't these be AspeedHACEState attributes instead ?


>       if (sg_mode) {
>           uint32_t len = 0;
> @@ -124,10 +227,17 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
>                                           MEMTXATTRS_UNSPECIFIED, NULL);
>               addr &= SG_LIST_ADDR_MASK;
>   
> -            iov[i].iov_len = len & SG_LIST_LEN_MASK;
> -            plen = iov[i].iov_len;
> +            plen = len & SG_LIST_LEN_MASK;
>               iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
>                                                   MEMTXATTRS_UNSPECIFIED);
> +
> +            if (acc_mode) {
> +                niov = gen_acc_mode_iov(
> +                        iov_cache, iov, i, &total_len, &plen, &count);
> +
> +            } else {
> +                iov[i].iov_len = plen;
> +            }
>           }
>       } else {
>           hwaddr len = s->regs[R_HASH_SRC_LEN];
> @@ -137,6 +247,27 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
>                                               &len, false,
>                                               MEMTXATTRS_UNSPECIFIED);
>           i = 1;
> +
> +        if (count) {
> +            /*
> +             * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
> +             * Thus if we received a request with sg_mode disabled, it is
> +             * required to check whether cache is empty. If no, we should
> +             * combine cached iov and the current iov.
> +             */
> +            uint32_t total_msg_len;
> +            uint32_t pad_offset;
> +            total_len += len;
> +            if (has_padding(iov, total_len, len, &total_msg_len,
> +                            &pad_offset)) {
> +                niov = reconstruct_iov(iov_cache, iov, 0, &total_len,
> +                        &pad_offset, &count);
> +            }
> +        }
> +    }
> +
> +    if (niov) {
> +        i = niov;
>       }
>   
>       if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
> @@ -238,7 +369,8 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
>                           __func__, data & ahc->hash_mask);
>                   break;
>           }
> -        do_hash_operation(s, algo, data & HASH_SG_EN);
> +        do_hash_operation(s, algo, data & HASH_SG_EN,
> +                ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
>   
>           if (data & HASH_IRQ_EN) {
>               qemu_irq_raise(s->irq);
Steven Lee April 21, 2022, 2:07 a.m. UTC | #2
The 04/20/2022 20:53, Cédric Le Goater wrote:
> On 3/31/22 09:48, Steven Lee wrote:
> > The aspeed ast2600 accumulative mode is described in datasheet
> > ast2600v10.pdf section 25.6.4:
> >   1. Allocating and initiating accumulative hash digest write buffer
> >      with initial state.
> >      * Since QEMU crypto/hash api doesn't provide the API to set initial
> >        state of hash library, and the initial state is already setted by
> 
> s/setted/set/
> 

will fix it.

> >        crypto library (gcrypt/glib/...), so skip this step.
> >   2. Calculating accumulative hash digest.
> >      (a) When receiving the last accumulative data, software need to add
> >          padding message at the end of the accumulative data. Padding
> >          message described in specific of MD5, SHA-1, SHA224, SHA256,
> >          SHA512, SHA512/224, SHA512/256.
> >          * Since the crypto library (gcrypt/glib) already pad the
> >            padding message internally.
> >          * This patch is to remove the padding message which fed byguest
> >            machine driver.
> > 
> > Signed-off-by: Troy Lee <troy_lee@aspeedtech.com>
> > Signed-off-by: Steven Lee <steven_lee@aspeedtech.com>
> > ---
> >   hw/misc/aspeed_hace.c | 140 ++++++++++++++++++++++++++++++++++++++++--
> >   1 file changed, 136 insertions(+), 4 deletions(-)
> > 
> > diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
> > index 59fe5bfca2..5a7a144602 100644
> > --- a/hw/misc/aspeed_hace.c
> > +++ b/hw/misc/aspeed_hace.c
> > @@ -95,12 +95,115 @@ static int hash_algo_lookup(uint32_t reg)
> >       return -1;
> >   }
> >   
> > -static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
> > +/**
> > + * Check whether the request contains padding message.
> > + *
> > + * @param iov           iov of current request
> > + * @param id            index of iov of current request
> > + * @param total_req_len length of all acc_mode requests(including padding msg)
> > + * @param req_len       length of the current request
> > + * @param total_msg_len length of all acc_mode requests(excluding padding msg)
> > + * @param pad_offset    start offset of padding message
> > + */
> > +static bool has_padding(struct iovec *iov, uint32_t total_req_len,
> > +                        hwaddr req_len, uint32_t *total_msg_len,
> > +                        uint32_t *pad_offset)
> > +{
> > +    *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
> > +    /*
> > +     * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
> > +     * last request. The last request should contain padding message.
> > +     * We check whether message contains padding by
> > +     *   1. Get total message length. If the current message contains
> > +     *      padding, the last 8 bytes are total message length.
> > +     *   2. Check whether the total message length is valid.
> > +     *      If it is valid, the value should less than or eaual to
> 
> s/eaual/equal/
> 

will fix it.

> > +     *      total_req_len.
> > +     *   3. Current request len - padding_size to get padding offset.
> > +     *      The padding message's first byte should be 0x80
> > +     */
> > +    if (*total_msg_len <= total_req_len) {
> > +        uint32_t padding_size = total_req_len - *total_msg_len;
> > +        uint8_t *padding = iov->iov_base;
> > +        *pad_offset = req_len - padding_size;
> > +        if (padding[*pad_offset] == 0x80) {
> > +            return true;
> > +        }
> > +    }
> > +
> > +    return false;
> > +}
> > +
> > +static int reconstruct_iov(struct iovec *cache, struct iovec *iov, int id,
> > +                           uint32_t *total_req_len,
> > +                           uint32_t *pad_offset,
> > +                           int *count)
> > +{
> > +    int i, iov_count;
> > +    if (pad_offset != 0) {
> > +        (cache + *count)->iov_base = (iov + id)->iov_base;
> 
> I would prefer the array notation iov[i], like elsewhere in this file..
> 

will use iov[i] instead of (iov + i).

> > +        (cache + *count)->iov_len = *pad_offset;
> > +        ++*count;
> > +    }
> > +    for (i = 0; i < *count; i++) {
> > +        (iov + i)->iov_base = (cache + i)->iov_base;
> > +        (iov + i)->iov_len = (cache + i)->iov_len;
> 
> ditto.
> 

will use iov[i] instead of (iov + i).

> > +    }
> > +    iov_count = *count;
> > +    *count = 0;
> > +    *total_req_len = 0;
> > +    return iov_count;
> > +}
> > +
> > +/**
> > + * Generate iov for accumulative mode.
> > + *
> > + * @param cache         cached iov
> > + * @param iov           iov of current request
> > + * @param id            index of iov of current request
> > + * @param total_req_len total length of the request(including padding)
> > + * @param req_len       length of the current request
> > + * @param count         count of cached iov
> > + */
> > +static int gen_acc_mode_iov(struct iovec *cache, struct iovec *iov, int id,
> > +                            uint32_t *total_req_len, hwaddr *req_len,
> > +                            int *count)
> > +{
> > +    uint32_t pad_offset;
> > +    uint32_t total_msg_len;
> > +    *total_req_len += *req_len;
> > +
> > +    if (has_padding(&iov[id], *total_req_len, *req_len, &total_msg_len,
> > +                    &pad_offset)) {
> > +        if (*count) {
> > +            return reconstruct_iov(cache, iov, id, total_req_len,
> > +                    &pad_offset, count);
> > +        }
> > +
> > +        *req_len -= *total_req_len - total_msg_len;
> > +        *total_req_len = 0;
> > +        (iov + id)->iov_len = *req_len;
> > +        return id + 1;
> > +    } else {
> > +        (cache + *count)->iov_base = iov->iov_base;
> > +        (cache + *count)->iov_len = *req_len;
> > +        ++*count;
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> > +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
> > +                              bool acc_mode)
> >   {
> >       struct iovec iov[ASPEED_HACE_MAX_SG];
> >       g_autofree uint8_t *digest_buf;
> >       size_t digest_len = 0;
> > +    int niov = 0;
> >       int i;
> > +    static struct iovec iov_cache[ASPEED_HACE_MAX_SG];
> > +    static int count;
> > +    static uint32_t total_len;
> 
> Why static ? Shouldn't these be AspeedHACEState attributes instead ?
> 
> 

will add these static variables in AspeedHACEState.
Thanks for your review.

Steven

> >       if (sg_mode) {
> >           uint32_t len = 0;
> > @@ -124,10 +227,17 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
> >                                           MEMTXATTRS_UNSPECIFIED, NULL);
> >               addr &= SG_LIST_ADDR_MASK;
> >   
> > -            iov[i].iov_len = len & SG_LIST_LEN_MASK;
> > -            plen = iov[i].iov_len;
> > +            plen = len & SG_LIST_LEN_MASK;
> >               iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
> >                                                   MEMTXATTRS_UNSPECIFIED);
> > +
> > +            if (acc_mode) {
> > +                niov = gen_acc_mode_iov(
> > +                        iov_cache, iov, i, &total_len, &plen, &count);
> > +
> > +            } else {
> > +                iov[i].iov_len = plen;
> > +            }
> >           }
> >       } else {
> >           hwaddr len = s->regs[R_HASH_SRC_LEN];
> > @@ -137,6 +247,27 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
> >                                               &len, false,
> >                                               MEMTXATTRS_UNSPECIFIED);
> >           i = 1;
> > +
> > +        if (count) {
> > +            /*
> > +             * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
> > +             * Thus if we received a request with sg_mode disabled, it is
> > +             * required to check whether cache is empty. If no, we should
> > +             * combine cached iov and the current iov.
> > +             */
> > +            uint32_t total_msg_len;
> > +            uint32_t pad_offset;
> > +            total_len += len;
> > +            if (has_padding(iov, total_len, len, &total_msg_len,
> > +                            &pad_offset)) {
> > +                niov = reconstruct_iov(iov_cache, iov, 0, &total_len,
> > +                        &pad_offset, &count);
> > +            }
> > +        }
> > +    }
> > +
> > +    if (niov) {
> > +        i = niov;
> >       }
> >   
> >       if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
> > @@ -238,7 +369,8 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
> >                           __func__, data & ahc->hash_mask);
> >                   break;
> >           }
> > -        do_hash_operation(s, algo, data & HASH_SG_EN);
> > +        do_hash_operation(s, algo, data & HASH_SG_EN,
> > +                ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
> >   
> >           if (data & HASH_IRQ_EN) {
> >               qemu_irq_raise(s->irq);
>
Cédric Le Goater April 21, 2022, 7:01 a.m. UTC | #3
Hello Steven,

>>> +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
>>> +                              bool acc_mode)
>>>    {
>>>        struct iovec iov[ASPEED_HACE_MAX_SG];
>>>        g_autofree uint8_t *digest_buf;
>>>        size_t digest_len = 0;
>>> +    int niov = 0;
>>>        int i;
>>> +    static struct iovec iov_cache[ASPEED_HACE_MAX_SG];
>>> +    static int count;
>>> +    static uint32_t total_len;
>>
>> Why static ? Shouldn't these be AspeedHACEState attributes instead ?
>>
> 
> will add these static variables in AspeedHACEState.

When you do, please update the reset handler and the vmstate.

Thanks,

C.
diff mbox series

Patch

diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
index 59fe5bfca2..5a7a144602 100644
--- a/hw/misc/aspeed_hace.c
+++ b/hw/misc/aspeed_hace.c
@@ -95,12 +95,115 @@  static int hash_algo_lookup(uint32_t reg)
     return -1;
 }
 
-static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
+/**
+ * Check whether the request contains padding message.
+ *
+ * @param iov           iov of current request
+ * @param id            index of iov of current request
+ * @param total_req_len length of all acc_mode requests(including padding msg)
+ * @param req_len       length of the current request
+ * @param total_msg_len length of all acc_mode requests(excluding padding msg)
+ * @param pad_offset    start offset of padding message
+ */
+static bool has_padding(struct iovec *iov, uint32_t total_req_len,
+                        hwaddr req_len, uint32_t *total_msg_len,
+                        uint32_t *pad_offset)
+{
+    *total_msg_len = (uint32_t)(ldq_be_p(iov->iov_base + req_len - 8) / 8);
+    /*
+     * SG_LIST_LEN_LAST asserted in the request length doesn't mean it is the
+     * last request. The last request should contain padding message.
+     * We check whether message contains padding by
+     *   1. Get total message length. If the current message contains
+     *      padding, the last 8 bytes are total message length.
+     *   2. Check whether the total message length is valid.
+     *      If it is valid, the value should less than or eaual to
+     *      total_req_len.
+     *   3. Current request len - padding_size to get padding offset.
+     *      The padding message's first byte should be 0x80
+     */
+    if (*total_msg_len <= total_req_len) {
+        uint32_t padding_size = total_req_len - *total_msg_len;
+        uint8_t *padding = iov->iov_base;
+        *pad_offset = req_len - padding_size;
+        if (padding[*pad_offset] == 0x80) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+static int reconstruct_iov(struct iovec *cache, struct iovec *iov, int id,
+                           uint32_t *total_req_len,
+                           uint32_t *pad_offset,
+                           int *count)
+{
+    int i, iov_count;
+    if (pad_offset != 0) {
+        (cache + *count)->iov_base = (iov + id)->iov_base;
+        (cache + *count)->iov_len = *pad_offset;
+        ++*count;
+    }
+    for (i = 0; i < *count; i++) {
+        (iov + i)->iov_base = (cache + i)->iov_base;
+        (iov + i)->iov_len = (cache + i)->iov_len;
+    }
+    iov_count = *count;
+    *count = 0;
+    *total_req_len = 0;
+    return iov_count;
+}
+
+/**
+ * Generate iov for accumulative mode.
+ *
+ * @param cache         cached iov
+ * @param iov           iov of current request
+ * @param id            index of iov of current request
+ * @param total_req_len total length of the request(including padding)
+ * @param req_len       length of the current request
+ * @param count         count of cached iov
+ */
+static int gen_acc_mode_iov(struct iovec *cache, struct iovec *iov, int id,
+                            uint32_t *total_req_len, hwaddr *req_len,
+                            int *count)
+{
+    uint32_t pad_offset;
+    uint32_t total_msg_len;
+    *total_req_len += *req_len;
+
+    if (has_padding(&iov[id], *total_req_len, *req_len, &total_msg_len,
+                    &pad_offset)) {
+        if (*count) {
+            return reconstruct_iov(cache, iov, id, total_req_len,
+                    &pad_offset, count);
+        }
+
+        *req_len -= *total_req_len - total_msg_len;
+        *total_req_len = 0;
+        (iov + id)->iov_len = *req_len;
+        return id + 1;
+    } else {
+        (cache + *count)->iov_base = iov->iov_base;
+        (cache + *count)->iov_len = *req_len;
+        ++*count;
+    }
+
+    return 0;
+}
+
+static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
+                              bool acc_mode)
 {
     struct iovec iov[ASPEED_HACE_MAX_SG];
     g_autofree uint8_t *digest_buf;
     size_t digest_len = 0;
+    int niov = 0;
     int i;
+    static struct iovec iov_cache[ASPEED_HACE_MAX_SG];
+    static int count;
+    static uint32_t total_len;
 
     if (sg_mode) {
         uint32_t len = 0;
@@ -124,10 +227,17 @@  static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
                                         MEMTXATTRS_UNSPECIFIED, NULL);
             addr &= SG_LIST_ADDR_MASK;
 
-            iov[i].iov_len = len & SG_LIST_LEN_MASK;
-            plen = iov[i].iov_len;
+            plen = len & SG_LIST_LEN_MASK;
             iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
                                                 MEMTXATTRS_UNSPECIFIED);
+
+            if (acc_mode) {
+                niov = gen_acc_mode_iov(
+                        iov_cache, iov, i, &total_len, &plen, &count);
+
+            } else {
+                iov[i].iov_len = plen;
+            }
         }
     } else {
         hwaddr len = s->regs[R_HASH_SRC_LEN];
@@ -137,6 +247,27 @@  static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
                                             &len, false,
                                             MEMTXATTRS_UNSPECIFIED);
         i = 1;
+
+        if (count) {
+            /*
+             * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
+             * Thus if we received a request with sg_mode disabled, it is
+             * required to check whether cache is empty. If no, we should
+             * combine cached iov and the current iov.
+             */
+            uint32_t total_msg_len;
+            uint32_t pad_offset;
+            total_len += len;
+            if (has_padding(iov, total_len, len, &total_msg_len,
+                            &pad_offset)) {
+                niov = reconstruct_iov(iov_cache, iov, 0, &total_len,
+                        &pad_offset, &count);
+            }
+        }
+    }
+
+    if (niov) {
+        i = niov;
     }
 
     if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf, &digest_len, NULL) < 0) {
@@ -238,7 +369,8 @@  static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
                         __func__, data & ahc->hash_mask);
                 break;
         }
-        do_hash_operation(s, algo, data & HASH_SG_EN);
+        do_hash_operation(s, algo, data & HASH_SG_EN,
+                ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
 
         if (data & HASH_IRQ_EN) {
             qemu_irq_raise(s->irq);