diff mbox series

[RFC,v4,29/75] target/i386: introduce H*, L*, V*, U*, W* (SSE/AVX) operands

Message ID 20190821172951.15333-30-jan.bobek@gmail.com (mailing list archive)
State New, archived
Headers show
Series rewrite MMX/SSE*/AVX/AVX2 vector instruction translation | expand

Commit Message

Jan Bobek Aug. 21, 2019, 5:29 p.m. UTC
These address the SSE/AVX-technology register file. Offset of the
entire corresponding register is passed as the operand value,
regardless of operand-size suffix.

Signed-off-by: Jan Bobek <jan.bobek@gmail.com>
---
 target/i386/translate.c | 220 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 220 insertions(+)
diff mbox series

Patch

diff --git a/target/i386/translate.c b/target/i386/translate.c
index 815354f12b..23ba1d5edd 100644
--- a/target/i386/translate.c
+++ b/target/i386/translate.c
@@ -4941,6 +4941,7 @@  DEF_INSNOP_ALIAS(Mb, M)
 DEF_INSNOP_ALIAS(Mw, M)
 DEF_INSNOP_ALIAS(Md, M)
 DEF_INSNOP_ALIAS(Mq, M)
+DEF_INSNOP_ALIAS(Mhq, M)
 DEF_INSNOP_ALIAS(Mdq, M)
 DEF_INSNOP_ALIAS(Mqq, M)
 
@@ -5112,6 +5113,225 @@  INSNOP_LDST(mm, Mq)
     tcg_temp_free_i64(r64);
 }
 
+/*
+ * vex_v
+ *
+ * Operand whose value is the VVVV field of the VEX prefix.
+ */
+typedef int insnop_arg_t(vex_v);
+typedef struct {} insnop_ctxt_t(vex_v);
+
+INSNOP_INIT(vex_v)
+{
+    return s->prefix & PREFIX_VEX;
+}
+INSNOP_PREPARE(vex_v)
+{
+    return s->vex_v;
+}
+INSNOP_FINALIZE(vex_v)
+{
+}
+
+/*
+ * is4
+ *
+ * Upper 4 bits of the 8-bit immediate selector, used with the SSE/AVX
+ * register file in some instructions.
+ */
+typedef int insnop_arg_t(is4);
+typedef struct {
+    insnop_ctxt_t(Ib) ctxt_Ib;
+    insnop_arg_t(Ib) arg_Ib;
+} insnop_ctxt_t(is4);
+
+INSNOP_INIT(is4)
+{
+    return insnop_init(Ib)(&ctxt->ctxt_Ib, env, s, modrm, is_write);
+}
+INSNOP_PREPARE(is4)
+{
+    ctxt->arg_Ib = insnop_prepare(Ib)(&ctxt->ctxt_Ib, env, s, modrm, is_write);
+    return (ctxt->arg_Ib >> 4) & 0xf;
+}
+INSNOP_FINALIZE(is4)
+{
+    insnop_finalize(Ib)(&ctxt->ctxt_Ib, env, s, modrm, is_write, ctxt->arg_Ib);
+}
+
+/*
+ * SSE/AVX-technology registers
+ */
+typedef unsigned int insnop_arg_t(xmm);
+typedef struct {} insnop_ctxt_t(xmm);
+
+INSNOP_INIT(xmm)
+{
+    return true;
+}
+INSNOP_PREPARE(xmm)
+{
+    return offsetof(CPUX86State, xmm_t0);
+}
+INSNOP_FINALIZE(xmm)
+{
+}
+
+#define DEF_INSNOP_XMM(opT, opTxmmid)                                   \
+    typedef insnop_arg_t(xmm) insnop_arg_t(opT);                        \
+    typedef struct {                                                    \
+        insnop_ctxt_t(opTxmmid) xmmid;                                  \
+    } insnop_ctxt_t(opT);                                               \
+                                                                        \
+    INSNOP_INIT(opT)                                                    \
+    {                                                                   \
+        return insnop_init(opTxmmid)(&ctxt->xmmid, env, s, modrm, is_write); \
+    }                                                                   \
+    INSNOP_PREPARE(opT)                                                 \
+    {                                                                   \
+        const insnop_arg_t(opTxmmid) xmmid =                            \
+            insnop_prepare(opTxmmid)(&ctxt->xmmid, env, s, modrm, is_write); \
+        const insnop_arg_t(opT) arg =                                   \
+            offsetof(CPUX86State, xmm_regs[xmmid]);                     \
+        insnop_finalize(opTxmmid)(&ctxt->xmmid, env, s,                 \
+                                  modrm, is_write, xmmid);              \
+        return arg;                                                     \
+    }                                                                   \
+    INSNOP_FINALIZE(opT)                                                \
+    {                                                                   \
+    }
+
+DEF_INSNOP_XMM(V, modrm_reg)
+DEF_INSNOP_ALIAS(Vd, V)
+DEF_INSNOP_ALIAS(Vq, V)
+DEF_INSNOP_ALIAS(Vdq, V)
+DEF_INSNOP_ALIAS(Vqq, V)
+
+DEF_INSNOP_XMM(U, modrm_rm_direct)
+DEF_INSNOP_ALIAS(Ub, U)
+DEF_INSNOP_ALIAS(Uw, U)
+DEF_INSNOP_ALIAS(Ud, U)
+DEF_INSNOP_ALIAS(Uq, U)
+DEF_INSNOP_ALIAS(Udq, U)
+DEF_INSNOP_ALIAS(Uqq, U)
+
+DEF_INSNOP_XMM(H, vex_v)
+DEF_INSNOP_ALIAS(Hd, H)
+DEF_INSNOP_ALIAS(Hq, H)
+DEF_INSNOP_ALIAS(Hdq, H)
+DEF_INSNOP_ALIAS(Hqq, H)
+
+DEF_INSNOP_XMM(L, is4)
+DEF_INSNOP_ALIAS(Ldq, L)
+DEF_INSNOP_ALIAS(Lqq, L)
+
+DEF_INSNOP_LDST(MWb, xmm, Mb)
+DEF_INSNOP_LDST(MWw, xmm, Mw)
+DEF_INSNOP_LDST(MWd, xmm, Md)
+DEF_INSNOP_LDST(MWq, xmm, Mq)
+DEF_INSNOP_LDST(MWdq, xmm, Mdq)
+DEF_INSNOP_LDST(MWqq, xmm, Mqq)
+DEF_INSNOP_LDST(MUdqMhq, xmm, Mhq)
+DEF_INSNOP_EITHER(Wb, Ub, MWb)
+DEF_INSNOP_EITHER(Ww, Uw, MWw)
+DEF_INSNOP_EITHER(Wd, Ud, MWd)
+DEF_INSNOP_EITHER(Wq, Uq, MWq)
+DEF_INSNOP_EITHER(Wdq, Udq, MWdq)
+DEF_INSNOP_EITHER(Wqq, Uqq, MWqq)
+DEF_INSNOP_EITHER(UdqMq, Udq, MWq)
+DEF_INSNOP_EITHER(UdqMhq, Udq, MUdqMhq)
+
+INSNOP_LDST(xmm, Mb)
+{
+    const insnop_arg_t(xmm) ofs = offsetof(ZMMReg, ZMM_B(0));
+    const TCGv_i32 r32 = tcg_temp_new_i32();
+    if (is_write) {
+        tcg_gen_ld_i32(r32, cpu_env, arg + ofs);
+        tcg_gen_qemu_st_i32(r32, ptr, s->mem_index, MO_UB);
+    } else {
+        tcg_gen_qemu_ld_i32(r32, ptr, s->mem_index, MO_UB);
+        tcg_gen_st_i32(r32, cpu_env, arg + ofs);
+    }
+    tcg_temp_free_i32(r32);
+}
+INSNOP_LDST(xmm, Mw)
+{
+    const insnop_arg_t(xmm) ofs = offsetof(ZMMReg, ZMM_W(0));
+    const TCGv_i32 r32 = tcg_temp_new_i32();
+    if (is_write) {
+        tcg_gen_ld_i32(r32, cpu_env, arg + ofs);
+        tcg_gen_qemu_st_i32(r32, ptr, s->mem_index, MO_LEUW);
+    } else {
+        tcg_gen_qemu_ld_i32(r32, ptr, s->mem_index, MO_LEUW);
+        tcg_gen_st_i32(r32, cpu_env, arg + ofs);
+    }
+    tcg_temp_free_i32(r32);
+}
+INSNOP_LDST(xmm, Md)
+{
+    const insnop_arg_t(xmm) ofs = offsetof(ZMMReg, ZMM_L(0));
+    const TCGv_i32 r32 = tcg_temp_new_i32();
+    if (is_write) {
+        tcg_gen_ld_i32(r32, cpu_env, arg + ofs);
+        tcg_gen_qemu_st_i32(r32, ptr, s->mem_index, MO_LEUL);
+    } else {
+        tcg_gen_qemu_ld_i32(r32, ptr, s->mem_index, MO_LEUL);
+        tcg_gen_st_i32(r32, cpu_env, arg + ofs);
+    }
+    tcg_temp_free_i32(r32);
+}
+INSNOP_LDST(xmm, Mq)
+{
+    const insnop_arg_t(xmm) ofs = offsetof(ZMMReg, ZMM_Q(0));
+    const TCGv_i64 r64 = tcg_temp_new_i64();
+    if (is_write) {
+        tcg_gen_ld_i64(r64, cpu_env, arg + ofs);
+        tcg_gen_qemu_st_i64(r64, ptr, s->mem_index, MO_LEQ);
+    } else {
+        tcg_gen_qemu_ld_i64(r64, ptr, s->mem_index, MO_LEQ);
+        tcg_gen_st_i64(r64, cpu_env, arg + ofs);
+    }
+    tcg_temp_free_i64(r64);
+}
+INSNOP_LDST(xmm, Mdq)
+{
+    const insnop_arg_t(xmm) ofs = offsetof(ZMMReg, ZMM_Q(0));
+    const insnop_arg_t(xmm) ofs1 = offsetof(ZMMReg, ZMM_Q(1));
+    const TCGv_i64 r64 = tcg_temp_new_i64();
+    const TCGv ptr1 = tcg_temp_new();
+    tcg_gen_addi_tl(ptr1, ptr, sizeof(uint64_t));
+    if (is_write) {
+        tcg_gen_ld_i64(r64, cpu_env, arg + ofs);
+        tcg_gen_qemu_st_i64(r64, ptr, s->mem_index, MO_LEQ);
+        tcg_gen_ld_i64(r64, cpu_env, arg + ofs1);
+        tcg_gen_qemu_st_i64(r64, ptr1, s->mem_index, MO_LEQ);
+    } else {
+        tcg_gen_qemu_ld_i64(r64, ptr, s->mem_index, MO_LEQ);
+        tcg_gen_st_i64(r64, cpu_env, arg + ofs);
+        tcg_gen_qemu_ld_i64(r64, ptr1, s->mem_index, MO_LEQ);
+        tcg_gen_st_i64(r64, cpu_env, arg + ofs1);
+    }
+    tcg_temp_free_i64(r64);
+    tcg_temp_free(ptr1);
+}
+INSNOP_LDST(xmm, Mqq)
+{
+    insnop_ldst(xmm, Mdq)(env, s, is_write, arg, ptr);
+}
+INSNOP_LDST(xmm, Mhq)
+{
+    const insnop_arg_t(xmm) ofs = offsetof(ZMMReg, ZMM_Q(1));
+    const TCGv_i64 r64 = tcg_temp_new_i64();
+    if (is_write) {
+        tcg_gen_ld_i64(r64, cpu_env, arg + ofs);
+        tcg_gen_qemu_st_i64(r64, ptr, s->mem_index, MO_LEQ);
+    } else {
+        tcg_gen_qemu_ld_i64(r64, ptr, s->mem_index, MO_LEQ);
+        tcg_gen_st_i64(r64, cpu_env, arg + ofs);
+    }
+    tcg_temp_free_i64(r64);
+}
+
 static void gen_sse_ng(CPUX86State *env, DisasContext *s, int b)
 {
     enum {