@@ -102,4 +102,173 @@ target_ulong HELPER(aes32dsi)(target_ulong rs1, target_ulong rs2,
{
return aes32_operation(shamt, rs1, rs2, false, false);
}
+
+#define BY(X, I) ((X >> (8 * I)) & 0xFF)
+
+#define AES_SHIFROWS_LO(RS1, RS2) ( \
+ (((RS1 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \
+ (((RS2 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \
+ (((RS2 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \
+ (((RS1 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0))
+
+#define AES_INVSHIFROWS_LO(RS1, RS2) ( \
+ (((RS2 >> 24) & 0xFF) << 56) | (((RS2 >> 48) & 0xFF) << 48) | \
+ (((RS1 >> 8) & 0xFF) << 40) | (((RS1 >> 32) & 0xFF) << 32) | \
+ (((RS1 >> 56) & 0xFF) << 24) | (((RS2 >> 16) & 0xFF) << 16) | \
+ (((RS2 >> 40) & 0xFF) << 8) | (((RS1 >> 0) & 0xFF) << 0))
+
+#define AES_MIXBYTE(COL, B0, B1, B2, B3) ( \
+ BY(COL, B3) ^ BY(COL, B2) ^ AES_GFMUL(BY(COL, B1), 3) ^ \
+ AES_GFMUL(BY(COL, B0), 2))
+
+#define AES_MIXCOLUMN(COL) ( \
+ AES_MIXBYTE(COL, 3, 0, 1, 2) << 24 | \
+ AES_MIXBYTE(COL, 2, 3, 0, 1) << 16 | \
+ AES_MIXBYTE(COL, 1, 2, 3, 0) << 8 | AES_MIXBYTE(COL, 0, 1, 2, 3) << 0)
+
+#define AES_INVMIXBYTE(COL, B0, B1, B2, B3) ( \
+ AES_GFMUL(BY(COL, B3), 0x9) ^ AES_GFMUL(BY(COL, B2), 0xd) ^ \
+ AES_GFMUL(BY(COL, B1), 0xb) ^ AES_GFMUL(BY(COL, B0), 0xe))
+
+#define AES_INVMIXCOLUMN(COL) ( \
+ AES_INVMIXBYTE(COL, 3, 0, 1, 2) << 24 | \
+ AES_INVMIXBYTE(COL, 2, 3, 0, 1) << 16 | \
+ AES_INVMIXBYTE(COL, 1, 2, 3, 0) << 8 | \
+ AES_INVMIXBYTE(COL, 0, 1, 2, 3) << 0)
+
+static inline target_ulong aes64_operation(target_ulong rs1, target_ulong rs2,
+ bool enc, bool mix)
+{
+ uint64_t RS1 = rs1;
+ uint64_t RS2 = rs2;
+ uint64_t result;
+ uint64_t temp;
+ uint32_t col_0;
+ uint32_t col_1;
+
+ if (enc) {
+ temp = AES_SHIFROWS_LO(RS1, RS2);
+ temp = (((uint64_t)AES_sbox[(temp >> 0) & 0xFF] << 0) |
+ ((uint64_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
+ ((uint64_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
+ ((uint64_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
+ ((uint64_t)AES_sbox[(temp >> 32) & 0xFF] << 32) |
+ ((uint64_t)AES_sbox[(temp >> 40) & 0xFF] << 40) |
+ ((uint64_t)AES_sbox[(temp >> 48) & 0xFF] << 48) |
+ ((uint64_t)AES_sbox[(temp >> 56) & 0xFF] << 56));
+ if (mix) {
+ col_0 = temp & 0xFFFFFFFF;
+ col_1 = temp >> 32;
+
+ col_0 = AES_MIXCOLUMN(col_0);
+ col_1 = AES_MIXCOLUMN(col_1);
+
+ result = ((uint64_t)col_1 << 32) | col_0;
+ } else {
+ result = temp;
+ }
+ } else {
+ temp = AES_INVSHIFROWS_LO(RS1, RS2);
+ temp = (((uint64_t)AES_isbox[(temp >> 0) & 0xFF] << 0) |
+ ((uint64_t)AES_isbox[(temp >> 8) & 0xFF] << 8) |
+ ((uint64_t)AES_isbox[(temp >> 16) & 0xFF] << 16) |
+ ((uint64_t)AES_isbox[(temp >> 24) & 0xFF] << 24) |
+ ((uint64_t)AES_isbox[(temp >> 32) & 0xFF] << 32) |
+ ((uint64_t)AES_isbox[(temp >> 40) & 0xFF] << 40) |
+ ((uint64_t)AES_isbox[(temp >> 48) & 0xFF] << 48) |
+ ((uint64_t)AES_isbox[(temp >> 56) & 0xFF] << 56));
+ if (mix) {
+ col_0 = temp & 0xFFFFFFFF;
+ col_1 = temp >> 32;
+
+ col_0 = AES_INVMIXCOLUMN(col_0);
+ col_1 = AES_INVMIXCOLUMN(col_1);
+
+ result = ((uint64_t)col_1 << 32) | col_0;
+ } else {
+ result = temp;
+ }
+ }
+
+ return result;
+}
+
+target_ulong HELPER(aes64esm)(target_ulong rs1, target_ulong rs2)
+{
+ return aes64_operation(rs1, rs2, true, true);
+}
+
+target_ulong HELPER(aes64es)(target_ulong rs1, target_ulong rs2)
+{
+ return aes64_operation(rs1, rs2, true, false);
+}
+
+target_ulong HELPER(aes64ds)(target_ulong rs1, target_ulong rs2)
+{
+ return aes64_operation(rs1, rs2, false, false);
+}
+
+target_ulong HELPER(aes64dsm)(target_ulong rs1, target_ulong rs2)
+{
+ return aes64_operation(rs1, rs2, false, true);
+}
+
+target_ulong HELPER(aes64ks2)(target_ulong rs1, target_ulong rs2)
+{
+ uint64_t RS1 = rs1;
+ uint64_t RS2 = rs2;
+ uint32_t rs1_hi = RS1 >> 32;
+ uint32_t rs2_lo = RS2;
+ uint32_t rs2_hi = RS2 >> 32;
+
+ uint32_t r_lo = (rs1_hi ^ rs2_lo);
+ uint32_t r_hi = (rs1_hi ^ rs2_lo ^ rs2_hi);
+ target_ulong result = ((uint64_t)r_hi << 32) | r_lo;
+
+ return result;
+}
+
+target_ulong HELPER(aes64ks1i)(target_ulong rs1, target_ulong rnum)
+{
+ uint64_t RS1 = rs1;
+ static const uint8_t round_consts[10] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36
+ };
+
+ uint8_t enc_rnum = rnum;
+ uint32_t temp = (RS1 >> 32) & 0xFFFFFFFF;
+ uint8_t rcon_ = 0;
+ target_ulong result;
+
+ if (enc_rnum != 0xA) {
+ temp = ror32(temp, 8); /* Rotate right by 8 */
+ rcon_ = round_consts[enc_rnum];
+ }
+
+ temp = ((uint32_t)AES_sbox[(temp >> 24) & 0xFF] << 24) |
+ ((uint32_t)AES_sbox[(temp >> 16) & 0xFF] << 16) |
+ ((uint32_t)AES_sbox[(temp >> 8) & 0xFF] << 8) |
+ ((uint32_t)AES_sbox[(temp >> 0) & 0xFF] << 0);
+
+ temp ^= rcon_;
+
+ result = ((uint64_t)temp << 32) | temp;
+
+ return result;
+}
+
+target_ulong HELPER(aes64im)(target_ulong rs1)
+{
+ uint64_t RS1 = rs1;
+ uint32_t col_0 = RS1 & 0xFFFFFFFF;
+ uint32_t col_1 = RS1 >> 32;
+ target_ulong result;
+
+ col_0 = AES_INVMIXCOLUMN(col_0);
+ col_1 = AES_INVMIXCOLUMN(col_1);
+
+ result = ((uint64_t)col_1 << 32) | col_0;
+
+ return result;
+}
#undef sext32_xlen
@@ -1118,3 +1118,11 @@ DEF_HELPER_FLAGS_3(aes32esmi, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
DEF_HELPER_FLAGS_3(aes32esi, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
DEF_HELPER_FLAGS_3(aes32dsmi, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
DEF_HELPER_FLAGS_3(aes32dsi, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
+
+DEF_HELPER_FLAGS_2(aes64esm, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(aes64es, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(aes64ds, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(aes64dsm, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(aes64ks2, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_2(aes64ks1i, TCG_CALL_NO_RWG_SE, tl, tl, tl)
+DEF_HELPER_FLAGS_1(aes64im, TCG_CALL_NO_RWG_SE, tl, tl)
@@ -36,6 +36,7 @@
%imm_j 31:s1 12:8 20:1 21:10 !function=ex_shift_1
%imm_u 12:s20 !function=ex_shift_12
%imm_bs 30:2 !function=ex_shift_3
+%imm_rnum 20:4
# Argument sets:
&empty
@@ -92,6 +93,7 @@
@sfence_vm ....... ..... ..... ... ..... ....... %rs1
@k_aes .. ..... ..... ..... ... ..... ....... &k_aes shamt=%imm_bs %rs2 %rs1 %rd
+@i_aes .. ..... ..... ..... ... ..... ....... &i imm=%imm_rnum %rs1 %rd
# Formats 64:
@sh5 ....... ..... ..... ... ..... ....... &shift shamt=%sh5 %rs1 %rd
@@ -842,6 +844,16 @@ hinval_gvma 0110011 ..... ..... 000 00000 1110011 @hfence_gvma
# *** RV32 Zknd Standard Extension ***
aes32dsmi .. 10111 ..... ..... 000 ..... 0110011 @k_aes
aes32dsi .. 10101 ..... ..... 000 ..... 0110011 @k_aes
+# *** RV64 Zknd Standard Extension ***
+aes64dsm 00 11111 ..... ..... 000 ..... 0110011 @r
+aes64ds 00 11101 ..... ..... 000 ..... 0110011 @r
+aes64im 00 11000 00000 ..... 001 ..... 0010011 @r2
# *** RV32 Zkne Standard Extension ***
aes32esmi .. 10011 ..... ..... 000 ..... 0110011 @k_aes
aes32esi .. 10001 ..... ..... 000 ..... 0110011 @k_aes
+# *** RV64 Zkne Standard Extension ***
+aes64es 00 11001 ..... ..... 000 ..... 0110011 @r
+aes64esm 00 11011 ..... ..... 000 ..... 0110011 @r
+# *** RV64 Zkne/zknd Standard Extension ***
+aes64ks2 01 11111 ..... ..... 000 ..... 0110011 @r
+aes64ks1i 00 11000 1.... ..... 001 ..... 0010011 @i_aes
@@ -65,3 +65,50 @@ static bool trans_aes32dsi(DisasContext *ctx, arg_aes32dsi *a)
REQUIRE_ZKND(ctx);
return gen_aes32_sm4(ctx, a, gen_helper_aes32dsi);
}
+
+static bool trans_aes64es(DisasContext *ctx, arg_aes64es *a)
+{
+ REQUIRE_ZKNE(ctx);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64es, NULL);
+}
+
+static bool trans_aes64esm(DisasContext *ctx, arg_aes64esm *a)
+{
+ REQUIRE_ZKNE(ctx);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64esm, NULL);
+}
+
+static bool trans_aes64ds(DisasContext *ctx, arg_aes64ds *a)
+{
+ REQUIRE_ZKND(ctx);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64ds, NULL);
+}
+
+static bool trans_aes64dsm(DisasContext *ctx, arg_aes64dsm *a)
+{
+ REQUIRE_ZKND(ctx);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64dsm, NULL);
+}
+
+static bool trans_aes64ks2(DisasContext *ctx, arg_aes64ks2 *a)
+{
+ REQUIRE_EITHER_EXT(ctx, zknd, zkne);
+ return gen_arith(ctx, a, EXT_NONE, gen_helper_aes64ks2, NULL);
+}
+
+static bool trans_aes64ks1i(DisasContext *ctx, arg_aes64ks1i *a)
+{
+ REQUIRE_EITHER_EXT(ctx, zknd, zkne);
+
+ if (a->imm > 0xA) {
+ return false;
+ }
+
+ return gen_arith_imm_tl(ctx, a, EXT_NONE, gen_helper_aes64ks1i, NULL);
+}
+
+static bool trans_aes64im(DisasContext *ctx, arg_aes64im *a)
+{
+ REQUIRE_ZKND(ctx);
+ return gen_unary(ctx, a, EXT_NONE, gen_helper_aes64im);
+}