@@ -419,6 +419,9 @@ Vimm_1r 1111 001 . 1 . 000 ... .... cmode:4 0 . op:1 1 .... @1reg_imm
##################################################################
VEXT 1111 001 0 1 . 11 .... .... imm:4 . q:1 . 0 .... \
vm=%vm_dp vn=%vn_dp vd=%vd_dp
+
+ VTBL 1111 001 1 1 . 11 .... .... 10 len:2 . op:1 . 0 .... \
+ vm=%vm_dp vn=%vn_dp vd=%vd_dp
]
# Subgroup for size != 0b11
@@ -2888,3 +2888,59 @@ static bool trans_VEXT(DisasContext *s, arg_VEXT *a)
}
return true;
}
+
+static bool trans_VTBL(DisasContext *s, arg_VTBL *a)
+{
+ int n;
+ TCGv_i32 tmp, tmp2, tmp3, tmp4;
+ TCGv_ptr ptr1;
+
+ if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
+ return false;
+ }
+
+ /* UNDEF accesses to D16-D31 if they don't exist. */
+ if (!dc_isar_feature(aa32_simd_r32, s) &&
+ ((a->vd | a->vn | a->vm) & 0x10)) {
+ return false;
+ }
+
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ n = a->len + 1;
+ if ((a->vn + n) > 32) {
+ /*
+ * This is UNPREDICTABLE; we choose to UNDEF to avoid the
+ * helper function running off the end of the register file.
+ */
+ return 1;
+ }
+ n <<= 3;
+ if (a->op) {
+ tmp = neon_load_reg(a->vd, 0);
+ } else {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ }
+ tmp2 = neon_load_reg(a->vm, 0);
+ ptr1 = vfp_reg_ptr(true, a->vn);
+ tmp4 = tcg_const_i32(n);
+ gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp4);
+ tcg_temp_free_i32(tmp);
+ if (a->op) {
+ tmp = neon_load_reg(a->vd, 1);
+ } else {
+ tmp = tcg_temp_new_i32();
+ tcg_gen_movi_i32(tmp, 0);
+ }
+ tmp3 = neon_load_reg(a->vm, 1);
+ gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp4);
+ tcg_temp_free_i32(tmp4);
+ tcg_temp_free_ptr(ptr1);
+ neon_store_reg(a->vd, 0, tmp2);
+ neon_store_reg(a->vd, 1, tmp3);
+ tcg_temp_free_i32(tmp);
+ return true;
+}
@@ -5025,13 +5025,12 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
{
int op;
int q;
- int rd, rn, rm, rd_ofs, rm_ofs;
+ int rd, rm, rd_ofs, rm_ofs;
int size;
int pass;
int u;
int vec_size;
- TCGv_i32 tmp, tmp2, tmp3, tmp5;
- TCGv_ptr ptr1;
+ TCGv_i32 tmp, tmp2, tmp3;
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
return 1;
@@ -5052,7 +5051,6 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
q = (insn & (1 << 6)) != 0;
u = (insn >> 24) & 1;
VFP_DREG_D(rd, insn);
- VFP_DREG_N(rn, insn);
VFP_DREG_M(rm, insn);
size = (insn >> 20) & 3;
vec_size = q ? 16 : 8;
@@ -5577,39 +5575,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
break;
}
} else if ((insn & (1 << 10)) == 0) {
- /* VTBL, VTBX. */
- int n = ((insn >> 8) & 3) + 1;
- if ((rn + n) > 32) {
- /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
- * helper function running off the end of the register file.
- */
- return 1;
- }
- n <<= 3;
- if (insn & (1 << 6)) {
- tmp = neon_load_reg(rd, 0);
- } else {
- tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, 0);
- }
- tmp2 = neon_load_reg(rm, 0);
- ptr1 = vfp_reg_ptr(true, rn);
- tmp5 = tcg_const_i32(n);
- gen_helper_neon_tbl(tmp2, tmp2, tmp, ptr1, tmp5);
- tcg_temp_free_i32(tmp);
- if (insn & (1 << 6)) {
- tmp = neon_load_reg(rd, 1);
- } else {
- tmp = tcg_temp_new_i32();
- tcg_gen_movi_i32(tmp, 0);
- }
- tmp3 = neon_load_reg(rm, 1);
- gen_helper_neon_tbl(tmp3, tmp3, tmp, ptr1, tmp5);
- tcg_temp_free_i32(tmp5);
- tcg_temp_free_ptr(ptr1);
- neon_store_reg(rd, 0, tmp2);
- neon_store_reg(rd, 1, tmp3);
- tcg_temp_free_i32(tmp);
+ /* VTBL, VTBX: handled by decodetree */
+ return 1;
} else if ((insn & 0x380) == 0) {
/* VDUP */
int element;
Convert the Neon VTBL, VTBX instructions to decodetree. The actual implementation of the insn is copied across to the new trans function unchanged except for renaming 'tmp5' to 'tmp4'. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> --- target/arm/neon-dp.decode | 3 ++ target/arm/translate-neon.inc.c | 56 +++++++++++++++++++++++++++++++++ target/arm/translate.c | 41 +++--------------------- 3 files changed, 63 insertions(+), 37 deletions(-)