@@ -430,3 +430,52 @@ VCVT_FS_2sh 1111 001 0 1 . 1 shift:5 .... 1111 0 . . 1 .... \
@2reg_shift size=0
VCVT_FU_2sh 1111 001 1 1 . 1 shift:5 .... 1111 0 . . 1 .... \
@2reg_shift size=0
+
+######################################################################
+# 1-reg-and-modified-immediate grouping:
+# 1111 001 i 1 D 000 imm:3 Vd:4 cmode:4 0 Q op 1 Vm:4
+######################################################################
+
+&1reg_imm vd q imm cmode op
+
+%asimd_imm_value 24:1 16:3 0:4
+
+@1reg_imm .... ... . . . ... ... .... .... . q:1 . . .... \
+ &1reg_imm imm=%asimd_imm_value vd=%vd_dp
+
+{
+ # Logic operations, ie not VMOV or VMVN: (cmode & 1) && cmode < 12
+ VORR_1r 1111 001 . 1 . 000 ... .... 0001 0 . 0 1 .... \
+ @1reg_imm cmode=1 op=0
+ VORR_1r 1111 001 . 1 . 000 ... .... 0011 0 . 0 1 .... \
+ @1reg_imm cmode=3 op=0
+ VORR_1r 1111 001 . 1 . 000 ... .... 0101 0 . 0 1 .... \
+ @1reg_imm cmode=5 op=0
+ VORR_1r 1111 001 . 1 . 000 ... .... 0111 0 . 0 1 .... \
+ @1reg_imm cmode=7 op=0
+ VORR_1r 1111 001 . 1 . 000 ... .... 1001 0 . 0 1 .... \
+ @1reg_imm cmode=9 op=0
+ VORR_1r 1111 001 . 1 . 000 ... .... 1011 0 . 0 1 .... \
+ @1reg_imm cmode=11 op=0
+
+ VBIC_1r 1111 001 . 1 . 000 ... .... 0001 0 . 1 1 .... \
+ @1reg_imm cmode=1 op=1
+ VBIC_1r 1111 001 . 1 . 000 ... .... 0011 0 . 1 1 .... \
+ @1reg_imm cmode=3 op=1
+ VBIC_1r 1111 001 . 1 . 000 ... .... 0101 0 . 1 1 .... \
+ @1reg_imm cmode=5 op=1
+ VBIC_1r 1111 001 . 1 . 000 ... .... 0111 0 . 1 1 .... \
+ @1reg_imm cmode=7 op=1
+ VBIC_1r 1111 001 . 1 . 000 ... .... 1001 0 . 1 1 .... \
+ @1reg_imm cmode=9 op=1
+ VBIC_1r 1111 001 . 1 . 000 ... .... 1011 0 . 1 1 .... \
+ @1reg_imm cmode=11 op=1
+
+ # A VMVN special case: cmode == 14 op == 1
+ VMVN_14_1r 1111 001 . 1 . 000 ... .... 1110 0 . 1 1 .... \
+ @1reg_imm cmode=14 op=1
+
+ # VMOV, VMVN: all other cmode/op combinations
+ VMOV_1r 1111 001 . 1 . 000 ... .... cmode:4 0 . op:1 1 .... \
+ @1reg_imm
+}
@@ -1821,3 +1821,154 @@ DO_FP_2SH(VCVT_SF, gen_helper_vfp_sltos)
DO_FP_2SH(VCVT_UF, gen_helper_vfp_ultos)
DO_FP_2SH(VCVT_FS, gen_helper_vfp_tosls_round_to_zero)
DO_FP_2SH(VCVT_FU, gen_helper_vfp_touls_round_to_zero)
+
+static uint32_t asimd_imm_const(uint32_t imm, int cmode, int op)
+{
+ /*
+ * Expand the encoded constant.
+ * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
+ * We choose to not special-case this and will behave as if a
+ * valid constant encoding of 0 had been given.
+ * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
+ */
+ switch (cmode) {
+ case 0: case 1:
+ /* no-op */
+ break;
+ case 2: case 3:
+ imm <<= 8;
+ break;
+ case 4: case 5:
+ imm <<= 16;
+ break;
+ case 6: case 7:
+ imm <<= 24;
+ break;
+ case 8: case 9:
+ imm |= imm << 16;
+ break;
+ case 10: case 11:
+ imm = (imm << 8) | (imm << 24);
+ break;
+ case 12:
+ imm = (imm << 8) | 0xff;
+ break;
+ case 13:
+ imm = (imm << 16) | 0xffff;
+ break;
+ case 14:
+ imm |= (imm << 8) | (imm << 16) | (imm << 24);
+ if (op) {
+ imm = ~imm;
+ }
+ break;
+ case 15:
+ imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
+ | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
+ break;
+ }
+ if (op) {
+ imm = ~imm;
+ }
+ return imm;
+}
+
+static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
+ GVecGen2iFn *fn)
+{
+ uint32_t imm;
+ int reg_ofs, vec_size;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ if (a->vd & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ reg_ofs = neon_reg_offset(a->vd, 0);
+ vec_size = a->q ? 16 : 8;
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
+
+ fn(MO_32, reg_ofs, reg_ofs, imm, vec_size, vec_size);
+ return true;
+}
+
+static bool trans_VORR_1r(DisasContext *s, arg_1reg_imm *a)
+{
+ return do_1reg_imm(s, a, tcg_gen_gvec_ori);
+}
+
+static bool trans_VBIC_1r(DisasContext *s, arg_1reg_imm *a)
+{
+ /* The immediate value will be inverted, so BIC becomes AND. */
+ return do_1reg_imm(s, a, tcg_gen_gvec_andi);
+}
+
+static bool trans_VMVN_14_1r(DisasContext *s, arg_1reg_imm *a)
+{
+ /* The cmode==14 op==1 special case isn't vectorized */
+ uint32_t imm;
+ TCGv_i64 t64;
+ int pass;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
+ return false;
+ }
+
+ if (a->vd & a->q) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ imm = asimd_imm_const(a->imm, a->cmode, a->op);
+
+ t64 = tcg_temp_new_i64();
+ for (pass = 0; pass <= a->q; ++pass) {
+ uint64_t val = 0;
+ int n;
+
+ for (n = 0; n < 8; n++) {
+ if (imm & (1 << (n + pass * 8))) {
+ val |= 0xffull << (n * 8);
+ }
+ }
+ tcg_gen_movi_i64(t64, val);
+ neon_store_reg64(t64, a->vd + pass);
+ }
+ tcg_temp_free_i64(t64);
+ return true;
+}
+
+static void gen_VMOV_1r(unsigned vece, uint32_t dofs, uint32_t aofs,
+ int64_t c, uint32_t oprsz, uint32_t maxsz)
+{
+ tcg_gen_gvec_dup_imm(MO_32, dofs, oprsz, maxsz, c);
+}
+
+static bool trans_VMOV_1r(DisasContext *s, arg_1reg_imm *a)
+{
+ /* There is one unallocated cmode/op combination in this space */
+ if (a->cmode == 15 && a->op == 1) {
+ return false;
+ }
+ return do_1reg_imm(s, a, gen_VMOV_1r);
+}
@@ -5232,105 +5232,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
/* Three register same length: handled by decodetree */
return 1;
} else if (insn & (1 << 4)) {
- if ((insn & 0x00380080) != 0) {
- /* Two registers and shift: handled by decodetree */
- return 1;
- } else { /* (insn & 0x00380080) == 0 */
- int invert, reg_ofs, vec_size;
-
- if (q && (rd & 1)) {
- return 1;
- }
-
- op = (insn >> 8) & 0xf;
- /* One register and immediate. */
- imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
- invert = (insn & (1 << 5)) != 0;
- /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
- * We choose to not special-case this and will behave as if a
- * valid constant encoding of 0 had been given.
- */
- switch (op) {
- case 0: case 1:
- /* no-op */
- break;
- case 2: case 3:
- imm <<= 8;
- break;
- case 4: case 5:
- imm <<= 16;
- break;
- case 6: case 7:
- imm <<= 24;
- break;
- case 8: case 9:
- imm |= imm << 16;
- break;
- case 10: case 11:
- imm = (imm << 8) | (imm << 24);
- break;
- case 12:
- imm = (imm << 8) | 0xff;
- break;
- case 13:
- imm = (imm << 16) | 0xffff;
- break;
- case 14:
- imm |= (imm << 8) | (imm << 16) | (imm << 24);
- if (invert) {
- imm = ~imm;
- }
- break;
- case 15:
- if (invert) {
- return 1;
- }
- imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
- | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
- break;
- }
- if (invert) {
- imm = ~imm;
- }
-
- reg_ofs = neon_reg_offset(rd, 0);
- vec_size = q ? 16 : 8;
-
- if (op & 1 && op < 12) {
- if (invert) {
- /* The immediate value has already been inverted,
- * so BIC becomes AND.
- */
- tcg_gen_gvec_andi(MO_32, reg_ofs, reg_ofs, imm,
- vec_size, vec_size);
- } else {
- tcg_gen_gvec_ori(MO_32, reg_ofs, reg_ofs, imm,
- vec_size, vec_size);
- }
- } else {
- /* VMOV, VMVN. */
- if (op == 14 && invert) {
- TCGv_i64 t64 = tcg_temp_new_i64();
-
- for (pass = 0; pass <= q; ++pass) {
- uint64_t val = 0;
- int n;
-
- for (n = 0; n < 8; n++) {
- if (imm & (1 << (n + pass * 8))) {
- val |= 0xffull << (n * 8);
- }
- }
- tcg_gen_movi_i64(t64, val);
- neon_store_reg64(t64, rd + pass);
- }
- tcg_temp_free_i64(t64);
- } else {
- tcg_gen_gvec_dup_imm(MO_32, reg_ofs, vec_size,
- vec_size, imm);
- }
- }
- }
+ /* Two registers and shift or reg and imm: handled by decodetree */
+ return 1;
} else { /* (insn & 0x00800010 == 0x00800000) */
if (size != 3) {
op = (insn >> 8) & 0xf;
Convert the insns in the one-register-and-immediate group to decodetree. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> --- target/arm/neon-dp.decode | 49 +++++++++++ target/arm/translate-neon.inc.c | 151 ++++++++++++++++++++++++++++++++ target/arm/translate.c | 101 +-------------------- 3 files changed, 202 insertions(+), 99 deletions(-)