diff mbox series

[v6,25/72] target/riscv: rvv-1.0: take fractional LMUL into vector max elements calculation

Message ID 20210112093950.17530-26-frank.chang@sifive.com (mailing list archive)
State New, archived
Headers show
Series support vector extension v1.0 | expand

Commit Message

Frank Chang Jan. 12, 2021, 9:38 a.m. UTC
From: Frank Chang <frank.chang@sifive.com>

Update vext_get_vlmax() and MAXSZ() to take fractional LMUL into
calculation for RVV 1.0.

Signed-off-by: Frank Chang <frank.chang@sifive.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/riscv/cpu.h                      | 43 ++++++++++++++++++-------
 target/riscv/insn_trans/trans_rvv.c.inc | 12 ++++++-
 2 files changed, 42 insertions(+), 13 deletions(-)

Comments

Alistair Francis Jan. 25, 2021, 11:42 p.m. UTC | #1
On Tue, Jan 12, 2021 at 2:04 AM <frank.chang@sifive.com> wrote:
>
> From: Frank Chang <frank.chang@sifive.com>
>
> Update vext_get_vlmax() and MAXSZ() to take fractional LMUL into
> calculation for RVV 1.0.
>
> Signed-off-by: Frank Chang <frank.chang@sifive.com>
> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

> ---
>  target/riscv/cpu.h                      | 43 ++++++++++++++++++-------
>  target/riscv/insn_trans/trans_rvv.c.inc | 12 ++++++-
>  2 files changed, 42 insertions(+), 13 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index aae3512c33f..993539aed94 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -392,18 +392,27 @@ FIELD(TB_FLAGS, HLSX, 12, 1)
>  bool riscv_cpu_is_32bit(CPURISCVState *env);
>
>  /*
> - * A simplification for VLMAX
> - * = (1 << LMUL) * VLEN / (8 * (1 << SEW))
> - * = (VLEN << LMUL) / (8 << SEW)
> - * = (VLEN << LMUL) >> (SEW + 3)
> - * = VLEN >> (SEW + 3 - LMUL)
> + * Encode LMUL to lmul as follows:
> + *     LMUL    vlmul    lmul
> + *      1       000       0
> + *      2       001       1
> + *      4       010       2
> + *      8       011       3
> + *      -       100       -
> + *     1/8      101      -3
> + *     1/4      110      -2
> + *     1/2      111      -1
> + *
> + * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
> + * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
> + *      => VLMAX = vlen >> (1 + 3 - (-3))
> + *               = 256 >> 7
> + *               = 2
>   */
>  static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
>  {
> -    uint8_t sew, lmul;
> -
> -    sew = FIELD_EX64(vtype, VTYPE, VSEW);
> -    lmul = FIELD_EX64(vtype, VTYPE, VLMUL);
> +    uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
> +    int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
>      return cpu->cfg.vlen >> (sew + 3 - lmul);
>  }
>
> @@ -416,12 +425,22 @@ static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
>      *cs_base = 0;
>
>      if (riscv_has_ext(env, RVV)) {
> +        /*
> +         * If env->vl equals to VLMAX, we can use generic vector operation
> +         * expanders (GVEC) to accerlate the vector operations.
> +         * However, as LMUL could be a fractional number. The maximum
> +         * vector size can be operated might be less than 8 bytes,
> +         * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
> +         * only when maxsz >= 8 bytes.
> +         */
>          uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
> -        bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl);
> +        uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
> +        uint32_t maxsz = vlmax << sew;
> +        bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl)
> +                           && (maxsz >= 8);
>          flags = FIELD_DP32(flags, TB_FLAGS, VILL,
>                      FIELD_EX64(env->vtype, VTYPE, VILL));
> -        flags = FIELD_DP32(flags, TB_FLAGS, SEW,
> -                    FIELD_EX64(env->vtype, VTYPE, VSEW));
> +        flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
>          flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
>                      FIELD_EX64(env->vtype, VTYPE, VLMUL));
>          flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
> diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
> index 367fb28186f..72d0bc109b0 100644
> --- a/target/riscv/insn_trans/trans_rvv.c.inc
> +++ b/target/riscv/insn_trans/trans_rvv.c.inc
> @@ -1268,7 +1268,17 @@ GEN_VEXT_AMO_TRANS(vamomaxuei64_v, MO_64, 35, rwdvm, amo_op, amo_check)
>  /*
>   *** Vector Integer Arithmetic Instructions
>   */
> -#define MAXSZ(s) (s->vlen >> (3 - s->lmul))
> +
> +/*
> + * MAXSZ returns the maximum vector size can be operated in bytes,
> + * which is used in GVEC IR when vl_eq_vlmax flag is set to true
> + * to accerlate vector operation.
> + */
> +static inline uint32_t MAXSZ(DisasContext *s)
> +{
> +    int scale = s->lmul - 3;
> +    return scale < 0 ? s->vlen >> -scale : s->vlen << scale;
> +}
>
>  static bool opivv_check(DisasContext *s, arg_rmrr *a)
>  {
> --
> 2.17.1
>
>
diff mbox series

Patch

diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index aae3512c33f..993539aed94 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -392,18 +392,27 @@  FIELD(TB_FLAGS, HLSX, 12, 1)
 bool riscv_cpu_is_32bit(CPURISCVState *env);
 
 /*
- * A simplification for VLMAX
- * = (1 << LMUL) * VLEN / (8 * (1 << SEW))
- * = (VLEN << LMUL) / (8 << SEW)
- * = (VLEN << LMUL) >> (SEW + 3)
- * = VLEN >> (SEW + 3 - LMUL)
+ * Encode LMUL to lmul as follows:
+ *     LMUL    vlmul    lmul
+ *      1       000       0
+ *      2       001       1
+ *      4       010       2
+ *      8       011       3
+ *      -       100       -
+ *     1/8      101      -3
+ *     1/4      110      -2
+ *     1/2      111      -1
+ *
+ * then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
+ * e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
+ *      => VLMAX = vlen >> (1 + 3 - (-3))
+ *               = 256 >> 7
+ *               = 2
  */
 static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype)
 {
-    uint8_t sew, lmul;
-
-    sew = FIELD_EX64(vtype, VTYPE, VSEW);
-    lmul = FIELD_EX64(vtype, VTYPE, VLMUL);
+    uint8_t sew = FIELD_EX64(vtype, VTYPE, VSEW);
+    int8_t lmul = sextract32(FIELD_EX64(vtype, VTYPE, VLMUL), 0, 3);
     return cpu->cfg.vlen >> (sew + 3 - lmul);
 }
 
@@ -416,12 +425,22 @@  static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
     *cs_base = 0;
 
     if (riscv_has_ext(env, RVV)) {
+        /*
+         * If env->vl equals to VLMAX, we can use generic vector operation
+         * expanders (GVEC) to accerlate the vector operations.
+         * However, as LMUL could be a fractional number. The maximum
+         * vector size can be operated might be less than 8 bytes,
+         * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
+         * only when maxsz >= 8 bytes.
+         */
         uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype);
-        bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl);
+        uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+        uint32_t maxsz = vlmax << sew;
+        bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl)
+                           && (maxsz >= 8);
         flags = FIELD_DP32(flags, TB_FLAGS, VILL,
                     FIELD_EX64(env->vtype, VTYPE, VILL));
-        flags = FIELD_DP32(flags, TB_FLAGS, SEW,
-                    FIELD_EX64(env->vtype, VTYPE, VSEW));
+        flags = FIELD_DP32(flags, TB_FLAGS, SEW, sew);
         flags = FIELD_DP32(flags, TB_FLAGS, LMUL,
                     FIELD_EX64(env->vtype, VTYPE, VLMUL));
         flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax);
diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc
index 367fb28186f..72d0bc109b0 100644
--- a/target/riscv/insn_trans/trans_rvv.c.inc
+++ b/target/riscv/insn_trans/trans_rvv.c.inc
@@ -1268,7 +1268,17 @@  GEN_VEXT_AMO_TRANS(vamomaxuei64_v, MO_64, 35, rwdvm, amo_op, amo_check)
 /*
  *** Vector Integer Arithmetic Instructions
  */
-#define MAXSZ(s) (s->vlen >> (3 - s->lmul))
+
+/*
+ * MAXSZ returns the maximum vector size can be operated in bytes,
+ * which is used in GVEC IR when vl_eq_vlmax flag is set to true
+ * to accerlate vector operation.
+ */
+static inline uint32_t MAXSZ(DisasContext *s)
+{
+    int scale = s->lmul - 3;
+    return scale < 0 ? s->vlen >> -scale : s->vlen << scale;
+}
 
 static bool opivv_check(DisasContext *s, arg_rmrr *a)
 {