diff mbox series

[v2,1/2] hw/misc: Supporting AST2600 HACE accumulative mode

Message ID 20220112080937.366835-2-troy_lee@aspeedtech.com (mailing list archive)
State New, archived
Headers show
Series Supporting AST2600 HACE engine accumulative mode | expand

Commit Message

Troy Lee Jan. 12, 2022, 8:09 a.m. UTC
Accumulative mode will supply a initial state and append padding bit at
the end of hash stream.  However, the crypto library will padding those
bit automatically, so ripped it off from iov array.

The aspeed ast2600 acculumative mode is described in datasheet
ast2600v10.pdf section 25.6.4:
 1. Allocationg and initiating accumulative hash digest write buffer
    with initial state.
    * Since QEMU crypto/hash api doesn't provide the API to set initial
      state of hash library, and the initial state is already setted by
      crypto library (gcrypt/glib/...), so skip this step.
 2. Calculating accumulative hash digest.
    (a) When receiving the last accumulative data, software need to add
        padding message at the end of the accumulative data. Padding
        message described in specific of MD5, SHA-1, SHA224, SHA256,
        SHA512, SHA512/224, SHA512/256.
        * Since the crypto library (gcrypt/glib) already pad the
          padding message internally.
        * This patch is to remove the padding message which fed byguest
          machine driver.

Changes in v2:
- Coding style
- Add accumulative mode description in comment

Signed-off-by: Troy Lee <troy_lee@aspeedtech.com>
---
 hw/misc/aspeed_hace.c         | 43 ++++++++++++++++++++++++++++-------
 include/hw/misc/aspeed_hace.h |  1 +
 2 files changed, 36 insertions(+), 8 deletions(-)

Comments

Cédric Le Goater Jan. 17, 2022, 8:11 a.m. UTC | #1
Hello Troy,

On 1/12/22 09:09, Troy Lee wrote:
> Accumulative mode will supply a initial state and append padding bit at
> the end of hash stream.  However, the crypto library will padding those
> bit automatically, so ripped it off from iov array.
> 
> The aspeed ast2600 acculumative mode is described in datasheet
> ast2600v10.pdf section 25.6.4:
>   1. Allocationg and initiating accumulative hash digest write buffer
>      with initial state.
>      * Since QEMU crypto/hash api doesn't provide the API to set initial
>        state of hash library, and the initial state is already setted by
>        crypto library (gcrypt/glib/...), so skip this step.
>   2. Calculating accumulative hash digest.
>      (a) When receiving the last accumulative data, software need to add
>          padding message at the end of the accumulative data. Padding
>          message described in specific of MD5, SHA-1, SHA224, SHA256,
>          SHA512, SHA512/224, SHA512/256.
>          * Since the crypto library (gcrypt/glib) already pad the
>            padding message internally.
>          * This patch is to remove the padding message which fed byguest
>            machine driver.
> 
> Changes in v2:
> - Coding style
> - Add accumulative mode description in comment
> 
> Signed-off-by: Troy Lee <troy_lee@aspeedtech.com>
> ---
>   hw/misc/aspeed_hace.c         | 43 ++++++++++++++++++++++++++++-------
>   include/hw/misc/aspeed_hace.h |  1 +
>   2 files changed, 36 insertions(+), 8 deletions(-)
> 
> diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
> index 10f00e65f4..0710f44621 100644
> --- a/hw/misc/aspeed_hace.c
> +++ b/hw/misc/aspeed_hace.c
> @@ -11,6 +11,7 @@
>   #include "qemu/osdep.h"
>   #include "qemu/log.h"
>   #include "qemu/error-report.h"
> +#include "qemu/bswap.h"
>   #include "hw/misc/aspeed_hace.h"
>   #include "qapi/error.h"
>   #include "migration/vmstate.h"
> @@ -27,6 +28,7 @@
>   
>   #define R_HASH_SRC      (0x20 / 4)
>   #define R_HASH_DEST     (0x24 / 4)
> +#define R_HASH_KEY_BUFF (0x28 / 4)
>   #define R_HASH_SRC_LEN  (0x2c / 4)
>   
>   #define R_HASH_CMD      (0x30 / 4)
> @@ -94,7 +96,8 @@ static int hash_algo_lookup(uint32_t reg)
>       return -1;
>   }
>   
> -static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
> +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
> +                              bool acc_mode)
>   {
>       struct iovec iov[ASPEED_HACE_MAX_SG];
>       g_autofree uint8_t *digest_buf;
> @@ -103,6 +106,7 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
>   
>       if (sg_mode) {
>           uint32_t len = 0;
> +        uint32_t total_len = 0;
>   
>           for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
>               uint32_t addr, src;
> @@ -123,10 +127,26 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
>                                           MEMTXATTRS_UNSPECIFIED, NULL);
>               addr &= SG_LIST_ADDR_MASK;
>   
> -            iov[i].iov_len = len & SG_LIST_LEN_MASK;
> -            plen = iov[i].iov_len;
> +            plen = len & SG_LIST_LEN_MASK;
>               iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
>                                                   MEMTXATTRS_UNSPECIFIED);
> +
> +            if (acc_mode) {
> +                total_len += plen;
> +
> +                if (len & SG_LIST_LEN_LAST) {
> +                    /*
> +                     * In the padding message, the last 64/128 bit represents
> +                     * the total length of bitstream in big endian.
> +                     * SHA-224, SHA-256 are 64 bit
> +                     * SHA-384, SHA-512, SHA-512/224, SHA-512/256 are 128 bit
> +                     * However, we would not process such a huge bit stream.

If we do not support accumulative mode with SHA-384, SHA-512, SHA-512/224,
SHA-512/256 algos, we should make sure that do_hash_operation() is not
called and emit a warning.

Thanks,

C.

> +                     */
> +                    plen -= total_len - (ldq_be_p(iov[i].iov_base + plen - 8) / 8);
> +                }
> +            }
> +
> +            iov[i].iov_len = plen;
>           }
>       } else {
>           hwaddr len = s->regs[R_HASH_SRC_LEN];
> @@ -210,6 +230,9 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
>       case R_HASH_DEST:
>           data &= ahc->dest_mask;
>           break;
> +    case R_HASH_KEY_BUFF:
> +        data &= ahc->key_mask;
> +        break;
>       case R_HASH_SRC_LEN:
>           data &= 0x0FFFFFFF;
>           break;
> @@ -229,12 +252,13 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
>           }
>           algo = hash_algo_lookup(data);
>           if (algo < 0) {
> -                qemu_log_mask(LOG_GUEST_ERROR,
> -                        "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
> -                        __func__, data & ahc->hash_mask);
> -                break;
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                    "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
> +                    __func__, data & ahc->hash_mask);
> +            break;
>           }
> -        do_hash_operation(s, algo, data & HASH_SG_EN);
> +        do_hash_operation(s, algo, data & HASH_SG_EN,
> +                ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
>   
>           if (data & HASH_IRQ_EN) {
>               qemu_irq_raise(s->irq);
> @@ -333,6 +357,7 @@ static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
>   
>       ahc->src_mask = 0x0FFFFFFF;
>       ahc->dest_mask = 0x0FFFFFF8;
> +    ahc->key_mask = 0x0FFFFFC0;
>       ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
>   }
>   
> @@ -351,6 +376,7 @@ static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
>   
>       ahc->src_mask = 0x3fffffff;
>       ahc->dest_mask = 0x3ffffff8;
> +    ahc->key_mask = 0x3FFFFFC0;
>       ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
>   }
>   
> @@ -369,6 +395,7 @@ static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
>   
>       ahc->src_mask = 0x7FFFFFFF;
>       ahc->dest_mask = 0x7FFFFFF8;
> +    ahc->key_mask = 0x7FFFFFF8;
>       ahc->hash_mask = 0x00147FFF;
>   }
>   
> diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
> index 94d5ada95f..2242945eb4 100644
> --- a/include/hw/misc/aspeed_hace.h
> +++ b/include/hw/misc/aspeed_hace.h
> @@ -37,6 +37,7 @@ struct AspeedHACEClass {
>   
>       uint32_t src_mask;
>       uint32_t dest_mask;
> +    uint32_t key_mask;
>       uint32_t hash_mask;
>   };
>   
>
Cédric Le Goater Jan. 17, 2022, 8:48 a.m. UTC | #2
>> -static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
>> +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
>> +                              bool acc_mode)
>>   {
>>       struct iovec iov[ASPEED_HACE_MAX_SG];
>>       g_autofree uint8_t *digest_buf;
>> @@ -103,6 +106,7 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
>>       if (sg_mode) {
>>           uint32_t len = 0;
>> +        uint32_t total_len = 0;
>>           for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
>>               uint32_t addr, src;
>> @@ -123,10 +127,26 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
>>                                           MEMTXATTRS_UNSPECIFIED, NULL);
>>               addr &= SG_LIST_ADDR_MASK;
>> -            iov[i].iov_len = len & SG_LIST_LEN_MASK;
>> -            plen = iov[i].iov_len;
>> +            plen = len & SG_LIST_LEN_MASK;
>>               iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
>>                                                   MEMTXATTRS_UNSPECIFIED);
>> +
>> +            if (acc_mode) {
>> +                total_len += plen;
>> +
>> +                if (len & SG_LIST_LEN_LAST) {
>> +                    /*
>> +                     * In the padding message, the last 64/128 bit represents
>> +                     * the total length of bitstream in big endian.
>> +                     * SHA-224, SHA-256 are 64 bit
>> +                     * SHA-384, SHA-512, SHA-512/224, SHA-512/256 are 128 bit
>> +                     * However, we would not process such a huge bit stream.
> 
> If we do not support accumulative mode with SHA-384, SHA-512, SHA-512/224,
> SHA-512/256 algos, we should make sure that do_hash_operation() is not
> called and emit a warning.

I misunderstood the comment. I don't think it is very complex to
extract the total length field correctly. You only need the algo
type to pick the right size. May be add one more parameter to
do_hash_operation() and


>> +                     */
>> +                    plen -= total_len - (ldq_be_p(iov[i].iov_base + plen - 8) / 8);

change the offset in the above ?

Thanks,

C.
Joel Stanley Feb. 8, 2022, 10:46 a.m. UTC | #3
Hello Troy,

On Wed, 12 Jan 2022 at 08:10, Troy Lee <troy_lee@aspeedtech.com> wrote:
>
> Accumulative mode will supply a initial state and append padding bit at
> the end of hash stream.  However, the crypto library will padding those
> bit automatically, so ripped it off from iov array.
>
> The aspeed ast2600 acculumative mode is described in datasheet
> ast2600v10.pdf section 25.6.4:
>  1. Allocationg and initiating accumulative hash digest write buffer
>     with initial state.
>     * Since QEMU crypto/hash api doesn't provide the API to set initial
>       state of hash library, and the initial state is already setted by
>       crypto library (gcrypt/glib/...), so skip this step.
>  2. Calculating accumulative hash digest.
>     (a) When receiving the last accumulative data, software need to add
>         padding message at the end of the accumulative data. Padding
>         message described in specific of MD5, SHA-1, SHA224, SHA256,
>         SHA512, SHA512/224, SHA512/256.
>         * Since the crypto library (gcrypt/glib) already pad the
>           padding message internally.
>         * This patch is to remove the padding message which fed byguest
>           machine driver.


I tested the latest aspeed SDK u-boot, loaded form mmc (with our mmc
model that lives in Cedric's tree) and qemu crashed:

#0  0x00007fe867d44932 in ?? () from /usr/lib/x86_64-linux-gnu/libglib-2.0.so.0
#1  0x0000557aba2b6e22 in qcrypto_glib_hash_bytesv (alg=<optimized
out>, iov=0x7fe8662ee0b0, niov=1, result=0x7fe8662ee0a8,
resultlen=0x7fe8662ee0a0, errp=0x0) at ../crypto/hash-glib.c:68
#2  0x0000557ab9f549ea in do_hash_operation (s=s@entry=0x7fe866e1b3b0,
algo=5, sg_mode=sg_mode@entry=true, acc_mode=acc_mode@entry=true) at
../hw/misc/aspeed_hace.c:161
#3  0x0000557ab9f54dd1 in aspeed_hace_write (opaque=<optimized out>,
addr=12, data=262504, size=<optimized out>) at
../hw/misc/aspeed_hace.c:260

WIthout your patch applied the HACE operation fails, as we do not have
support for accumulative mode, but we do not crash.

>
> Changes in v2:
> - Coding style
> - Add accumulative mode description in comment
>
> Signed-off-by: Troy Lee <troy_lee@aspeedtech.com>
> ---
>  hw/misc/aspeed_hace.c         | 43 ++++++++++++++++++++++++++++-------
>  include/hw/misc/aspeed_hace.h |  1 +
>  2 files changed, 36 insertions(+), 8 deletions(-)
>
> diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
> index 10f00e65f4..0710f44621 100644
> --- a/hw/misc/aspeed_hace.c
> +++ b/hw/misc/aspeed_hace.c
> @@ -11,6 +11,7 @@
>  #include "qemu/osdep.h"
>  #include "qemu/log.h"
>  #include "qemu/error-report.h"
> +#include "qemu/bswap.h"
>  #include "hw/misc/aspeed_hace.h"
>  #include "qapi/error.h"
>  #include "migration/vmstate.h"
> @@ -27,6 +28,7 @@
>
>  #define R_HASH_SRC      (0x20 / 4)
>  #define R_HASH_DEST     (0x24 / 4)
> +#define R_HASH_KEY_BUFF (0x28 / 4)
>  #define R_HASH_SRC_LEN  (0x2c / 4)
>
>  #define R_HASH_CMD      (0x30 / 4)
> @@ -94,7 +96,8 @@ static int hash_algo_lookup(uint32_t reg)
>      return -1;
>  }
>
> -static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
> +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
> +                              bool acc_mode)
>  {
>      struct iovec iov[ASPEED_HACE_MAX_SG];
>      g_autofree uint8_t *digest_buf;
> @@ -103,6 +106,7 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
>
>      if (sg_mode) {
>          uint32_t len = 0;
> +        uint32_t total_len = 0;
>
>          for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
>              uint32_t addr, src;
> @@ -123,10 +127,26 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
>                                          MEMTXATTRS_UNSPECIFIED, NULL);
>              addr &= SG_LIST_ADDR_MASK;
>
> -            iov[i].iov_len = len & SG_LIST_LEN_MASK;
> -            plen = iov[i].iov_len;
> +            plen = len & SG_LIST_LEN_MASK;
>              iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
>                                                  MEMTXATTRS_UNSPECIFIED);
> +
> +            if (acc_mode) {
> +                total_len += plen;
> +
> +                if (len & SG_LIST_LEN_LAST) {
> +                    /*
> +                     * In the padding message, the last 64/128 bit represents
> +                     * the total length of bitstream in big endian.
> +                     * SHA-224, SHA-256 are 64 bit
> +                     * SHA-384, SHA-512, SHA-512/224, SHA-512/256 are 128 bit
> +                     * However, we would not process such a huge bit stream.
> +                     */
> +                    plen -= total_len - (ldq_be_p(iov[i].iov_base + plen - 8) / 8);
> +                }
> +            }
> +
> +            iov[i].iov_len = plen;
>          }
>      } else {
>          hwaddr len = s->regs[R_HASH_SRC_LEN];
> @@ -210,6 +230,9 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
>      case R_HASH_DEST:
>          data &= ahc->dest_mask;
>          break;
> +    case R_HASH_KEY_BUFF:
> +        data &= ahc->key_mask;
> +        break;
>      case R_HASH_SRC_LEN:
>          data &= 0x0FFFFFFF;
>          break;
> @@ -229,12 +252,13 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
>          }
>          algo = hash_algo_lookup(data);
>          if (algo < 0) {
> -                qemu_log_mask(LOG_GUEST_ERROR,
> -                        "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
> -                        __func__, data & ahc->hash_mask);
> -                break;
> +            qemu_log_mask(LOG_GUEST_ERROR,
> +                    "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
> +                    __func__, data & ahc->hash_mask);
> +            break;
>          }
> -        do_hash_operation(s, algo, data & HASH_SG_EN);
> +        do_hash_operation(s, algo, data & HASH_SG_EN,
> +                ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
>
>          if (data & HASH_IRQ_EN) {
>              qemu_irq_raise(s->irq);
> @@ -333,6 +357,7 @@ static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
>
>      ahc->src_mask = 0x0FFFFFFF;
>      ahc->dest_mask = 0x0FFFFFF8;
> +    ahc->key_mask = 0x0FFFFFC0;
>      ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
>  }
>
> @@ -351,6 +376,7 @@ static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
>
>      ahc->src_mask = 0x3fffffff;
>      ahc->dest_mask = 0x3ffffff8;
> +    ahc->key_mask = 0x3FFFFFC0;
>      ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
>  }
>
> @@ -369,6 +395,7 @@ static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
>
>      ahc->src_mask = 0x7FFFFFFF;
>      ahc->dest_mask = 0x7FFFFFF8;
> +    ahc->key_mask = 0x7FFFFFF8;
>      ahc->hash_mask = 0x00147FFF;
>  }
>
> diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
> index 94d5ada95f..2242945eb4 100644
> --- a/include/hw/misc/aspeed_hace.h
> +++ b/include/hw/misc/aspeed_hace.h
> @@ -37,6 +37,7 @@ struct AspeedHACEClass {
>
>      uint32_t src_mask;
>      uint32_t dest_mask;
> +    uint32_t key_mask;
>      uint32_t hash_mask;
>  };
>
> --
> 2.25.1
>
Troy Lee Feb. 10, 2022, 6:11 a.m. UTC | #4
Hi Joel,

On Tue, Feb 8, 2022 at 6:46 PM Joel Stanley <joel@jms.id.au> wrote:
>
> Hello Troy,
>
> On Wed, 12 Jan 2022 at 08:10, Troy Lee <troy_lee@aspeedtech.com> wrote:
> >
> > Accumulative mode will supply a initial state and append padding bit at
> > the end of hash stream.  However, the crypto library will padding those
> > bit automatically, so ripped it off from iov array.
> >
> > The aspeed ast2600 acculumative mode is described in datasheet
> > ast2600v10.pdf section 25.6.4:
> >  1. Allocationg and initiating accumulative hash digest write buffer
> >     with initial state.
> >     * Since QEMU crypto/hash api doesn't provide the API to set initial
> >       state of hash library, and the initial state is already setted by
> >       crypto library (gcrypt/glib/...), so skip this step.
> >  2. Calculating accumulative hash digest.
> >     (a) When receiving the last accumulative data, software need to add
> >         padding message at the end of the accumulative data. Padding
> >         message described in specific of MD5, SHA-1, SHA224, SHA256,
> >         SHA512, SHA512/224, SHA512/256.
> >         * Since the crypto library (gcrypt/glib) already pad the
> >           padding message internally.
> >         * This patch is to remove the padding message which fed byguest
> >           machine driver.
>
>
> I tested the latest aspeed SDK u-boot, loaded form mmc (with our mmc
> model that lives in Cedric's tree) and qemu crashed:
>
> #0  0x00007fe867d44932 in ?? () from /usr/lib/x86_64-linux-gnu/libglib-2.0.so.0
> #1  0x0000557aba2b6e22 in qcrypto_glib_hash_bytesv (alg=<optimized
> out>, iov=0x7fe8662ee0b0, niov=1, result=0x7fe8662ee0a8,
> resultlen=0x7fe8662ee0a0, errp=0x0) at ../crypto/hash-glib.c:68
> #2  0x0000557ab9f549ea in do_hash_operation (s=s@entry=0x7fe866e1b3b0,
> algo=5, sg_mode=sg_mode@entry=true, acc_mode=acc_mode@entry=true) at
> ../hw/misc/aspeed_hace.c:161
> #3  0x0000557ab9f54dd1 in aspeed_hace_write (opaque=<optimized out>,
> addr=12, data=262504, size=<optimized out>) at
> ../hw/misc/aspeed_hace.c:260
>
> WIthout your patch applied the HACE operation fails, as we do not have
> support for accumulative mode, but we do not crash.

I'll double check on this issue.

> >
> > Changes in v2:
> > - Coding style
> > - Add accumulative mode description in comment
> >
> > Signed-off-by: Troy Lee <troy_lee@aspeedtech.com>
> > ---
> >  hw/misc/aspeed_hace.c         | 43 ++++++++++++++++++++++++++++-------
> >  include/hw/misc/aspeed_hace.h |  1 +
> >  2 files changed, 36 insertions(+), 8 deletions(-)
> >
> > diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
> > index 10f00e65f4..0710f44621 100644
> > --- a/hw/misc/aspeed_hace.c
> > +++ b/hw/misc/aspeed_hace.c
> > @@ -11,6 +11,7 @@
> >  #include "qemu/osdep.h"
> >  #include "qemu/log.h"
> >  #include "qemu/error-report.h"
> > +#include "qemu/bswap.h"
> >  #include "hw/misc/aspeed_hace.h"
> >  #include "qapi/error.h"
> >  #include "migration/vmstate.h"
> > @@ -27,6 +28,7 @@
> >
> >  #define R_HASH_SRC      (0x20 / 4)
> >  #define R_HASH_DEST     (0x24 / 4)
> > +#define R_HASH_KEY_BUFF (0x28 / 4)
> >  #define R_HASH_SRC_LEN  (0x2c / 4)
> >
> >  #define R_HASH_CMD      (0x30 / 4)
> > @@ -94,7 +96,8 @@ static int hash_algo_lookup(uint32_t reg)
> >      return -1;
> >  }
> >
> > -static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
> > +static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
> > +                              bool acc_mode)
> >  {
> >      struct iovec iov[ASPEED_HACE_MAX_SG];
> >      g_autofree uint8_t *digest_buf;
> > @@ -103,6 +106,7 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
> >
> >      if (sg_mode) {
> >          uint32_t len = 0;
> > +        uint32_t total_len = 0;
> >
> >          for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
> >              uint32_t addr, src;
> > @@ -123,10 +127,26 @@ static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
> >                                          MEMTXATTRS_UNSPECIFIED, NULL);
> >              addr &= SG_LIST_ADDR_MASK;
> >
> > -            iov[i].iov_len = len & SG_LIST_LEN_MASK;
> > -            plen = iov[i].iov_len;
> > +            plen = len & SG_LIST_LEN_MASK;
> >              iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
> >                                                  MEMTXATTRS_UNSPECIFIED);
> > +
> > +            if (acc_mode) {
> > +                total_len += plen;
> > +
> > +                if (len & SG_LIST_LEN_LAST) {
> > +                    /*
> > +                     * In the padding message, the last 64/128 bit represents
> > +                     * the total length of bitstream in big endian.
> > +                     * SHA-224, SHA-256 are 64 bit
> > +                     * SHA-384, SHA-512, SHA-512/224, SHA-512/256 are 128 bit
> > +                     * However, we would not process such a huge bit stream.
> > +                     */
> > +                    plen -= total_len - (ldq_be_p(iov[i].iov_base + plen - 8) / 8);
> > +                }
> > +            }
> > +
> > +            iov[i].iov_len = plen;
> >          }
> >      } else {
> >          hwaddr len = s->regs[R_HASH_SRC_LEN];
> > @@ -210,6 +230,9 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
> >      case R_HASH_DEST:
> >          data &= ahc->dest_mask;
> >          break;
> > +    case R_HASH_KEY_BUFF:
> > +        data &= ahc->key_mask;
> > +        break;
> >      case R_HASH_SRC_LEN:
> >          data &= 0x0FFFFFFF;
> >          break;
> > @@ -229,12 +252,13 @@ static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
> >          }
> >          algo = hash_algo_lookup(data);
> >          if (algo < 0) {
> > -                qemu_log_mask(LOG_GUEST_ERROR,
> > -                        "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
> > -                        __func__, data & ahc->hash_mask);
> > -                break;
> > +            qemu_log_mask(LOG_GUEST_ERROR,
> > +                    "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
> > +                    __func__, data & ahc->hash_mask);
> > +            break;
> >          }
> > -        do_hash_operation(s, algo, data & HASH_SG_EN);
> > +        do_hash_operation(s, algo, data & HASH_SG_EN,
> > +                ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
> >
> >          if (data & HASH_IRQ_EN) {
> >              qemu_irq_raise(s->irq);
> > @@ -333,6 +357,7 @@ static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
> >
> >      ahc->src_mask = 0x0FFFFFFF;
> >      ahc->dest_mask = 0x0FFFFFF8;
> > +    ahc->key_mask = 0x0FFFFFC0;
> >      ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
> >  }
> >
> > @@ -351,6 +376,7 @@ static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
> >
> >      ahc->src_mask = 0x3fffffff;
> >      ahc->dest_mask = 0x3ffffff8;
> > +    ahc->key_mask = 0x3FFFFFC0;
> >      ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
> >  }
> >
> > @@ -369,6 +395,7 @@ static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
> >
> >      ahc->src_mask = 0x7FFFFFFF;
> >      ahc->dest_mask = 0x7FFFFFF8;
> > +    ahc->key_mask = 0x7FFFFFF8;
> >      ahc->hash_mask = 0x00147FFF;
> >  }
> >
> > diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
> > index 94d5ada95f..2242945eb4 100644
> > --- a/include/hw/misc/aspeed_hace.h
> > +++ b/include/hw/misc/aspeed_hace.h
> > @@ -37,6 +37,7 @@ struct AspeedHACEClass {
> >
> >      uint32_t src_mask;
> >      uint32_t dest_mask;
> > +    uint32_t key_mask;
> >      uint32_t hash_mask;
> >  };
> >
> > --
> > 2.25.1
> >
diff mbox series

Patch

diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c
index 10f00e65f4..0710f44621 100644
--- a/hw/misc/aspeed_hace.c
+++ b/hw/misc/aspeed_hace.c
@@ -11,6 +11,7 @@ 
 #include "qemu/osdep.h"
 #include "qemu/log.h"
 #include "qemu/error-report.h"
+#include "qemu/bswap.h"
 #include "hw/misc/aspeed_hace.h"
 #include "qapi/error.h"
 #include "migration/vmstate.h"
@@ -27,6 +28,7 @@ 
 
 #define R_HASH_SRC      (0x20 / 4)
 #define R_HASH_DEST     (0x24 / 4)
+#define R_HASH_KEY_BUFF (0x28 / 4)
 #define R_HASH_SRC_LEN  (0x2c / 4)
 
 #define R_HASH_CMD      (0x30 / 4)
@@ -94,7 +96,8 @@  static int hash_algo_lookup(uint32_t reg)
     return -1;
 }
 
-static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
+static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
+                              bool acc_mode)
 {
     struct iovec iov[ASPEED_HACE_MAX_SG];
     g_autofree uint8_t *digest_buf;
@@ -103,6 +106,7 @@  static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
 
     if (sg_mode) {
         uint32_t len = 0;
+        uint32_t total_len = 0;
 
         for (i = 0; !(len & SG_LIST_LEN_LAST); i++) {
             uint32_t addr, src;
@@ -123,10 +127,26 @@  static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode)
                                         MEMTXATTRS_UNSPECIFIED, NULL);
             addr &= SG_LIST_ADDR_MASK;
 
-            iov[i].iov_len = len & SG_LIST_LEN_MASK;
-            plen = iov[i].iov_len;
+            plen = len & SG_LIST_LEN_MASK;
             iov[i].iov_base = address_space_map(&s->dram_as, addr, &plen, false,
                                                 MEMTXATTRS_UNSPECIFIED);
+
+            if (acc_mode) {
+                total_len += plen;
+
+                if (len & SG_LIST_LEN_LAST) {
+                    /*
+                     * In the padding message, the last 64/128 bit represents
+                     * the total length of bitstream in big endian.
+                     * SHA-224, SHA-256 are 64 bit
+                     * SHA-384, SHA-512, SHA-512/224, SHA-512/256 are 128 bit
+                     * However, we would not process such a huge bit stream.
+                     */
+                    plen -= total_len - (ldq_be_p(iov[i].iov_base + plen - 8) / 8);
+                }
+            }
+
+            iov[i].iov_len = plen;
         }
     } else {
         hwaddr len = s->regs[R_HASH_SRC_LEN];
@@ -210,6 +230,9 @@  static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
     case R_HASH_DEST:
         data &= ahc->dest_mask;
         break;
+    case R_HASH_KEY_BUFF:
+        data &= ahc->key_mask;
+        break;
     case R_HASH_SRC_LEN:
         data &= 0x0FFFFFFF;
         break;
@@ -229,12 +252,13 @@  static void aspeed_hace_write(void *opaque, hwaddr addr, uint64_t data,
         }
         algo = hash_algo_lookup(data);
         if (algo < 0) {
-                qemu_log_mask(LOG_GUEST_ERROR,
-                        "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
-                        __func__, data & ahc->hash_mask);
-                break;
+            qemu_log_mask(LOG_GUEST_ERROR,
+                    "%s: Invalid hash algorithm selection 0x%"PRIx64"\n",
+                    __func__, data & ahc->hash_mask);
+            break;
         }
-        do_hash_operation(s, algo, data & HASH_SG_EN);
+        do_hash_operation(s, algo, data & HASH_SG_EN,
+                ((data & HASH_HMAC_MASK) == HASH_DIGEST_ACCUM));
 
         if (data & HASH_IRQ_EN) {
             qemu_irq_raise(s->irq);
@@ -333,6 +357,7 @@  static void aspeed_ast2400_hace_class_init(ObjectClass *klass, void *data)
 
     ahc->src_mask = 0x0FFFFFFF;
     ahc->dest_mask = 0x0FFFFFF8;
+    ahc->key_mask = 0x0FFFFFC0;
     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
 }
 
@@ -351,6 +376,7 @@  static void aspeed_ast2500_hace_class_init(ObjectClass *klass, void *data)
 
     ahc->src_mask = 0x3fffffff;
     ahc->dest_mask = 0x3ffffff8;
+    ahc->key_mask = 0x3FFFFFC0;
     ahc->hash_mask = 0x000003ff; /* No SG or SHA512 modes */
 }
 
@@ -369,6 +395,7 @@  static void aspeed_ast2600_hace_class_init(ObjectClass *klass, void *data)
 
     ahc->src_mask = 0x7FFFFFFF;
     ahc->dest_mask = 0x7FFFFFF8;
+    ahc->key_mask = 0x7FFFFFF8;
     ahc->hash_mask = 0x00147FFF;
 }
 
diff --git a/include/hw/misc/aspeed_hace.h b/include/hw/misc/aspeed_hace.h
index 94d5ada95f..2242945eb4 100644
--- a/include/hw/misc/aspeed_hace.h
+++ b/include/hw/misc/aspeed_hace.h
@@ -37,6 +37,7 @@  struct AspeedHACEClass {
 
     uint32_t src_mask;
     uint32_t dest_mask;
+    uint32_t key_mask;
     uint32_t hash_mask;
 };