diff mbox

[01/17] x86emul: split instruction decoding from execution

Message ID 57D17E08020000780010D136@prv-mh.provo.novell.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jan Beulich Sept. 8, 2016, 1:04 p.m. UTC
This is only the mechanical part, a subsequent patch will make non-
mechanical adjustments to actually do all decoding in this new
function.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
x86emul: split instruction decoding from execution

This is only the mechanical part, a subsequent patch will make non-
mechanical adjustments to actually do all decoding in this new
function.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -48,7 +48,9 @@
 /* All operands are implicit in the opcode. */
 #define ImplicitOps (DstImplicit|SrcImplicit)
 
-static uint8_t opcode_table[256] = {
+typedef uint8_t opcode_desc_t;
+
+static const opcode_desc_t opcode_table[256] = {
     /* 0x00 - 0x07 */
     ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
     ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
@@ -178,7 +180,7 @@ static uint8_t opcode_table[256] = {
     ImplicitOps, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM
 };
 
-static uint8_t twobyte_table[256] = {
+static const opcode_desc_t twobyte_table[256] = {
     /* 0x00 - 0x07 */
     SrcMem16|ModRM, ImplicitOps|ModRM, 0, 0, 0, ImplicitOps, ImplicitOps, 0,
     /* 0x08 - 0x0F */
@@ -607,7 +609,7 @@ do{ asm volatile (
 })
 #define truncate_ea(ea) truncate_word((ea), ad_bytes)
 
-#define mode_64bit() (def_ad_bytes == 8)
+#define mode_64bit() (ctxt->addr_size == 64)
 
 #define fail_if(p)                                      \
 do {                                                    \
@@ -1558,32 +1560,63 @@ int x86emul_unhandleable_rw(
     return X86EMUL_UNHANDLEABLE;
 }
 
-int
-x86_emulate(
-    struct x86_emulate_ctxt *ctxt,
-    const struct x86_emulate_ops  *ops)
-{
-    /* Shadow copy of register state. Committed on successful emulation. */
-    struct cpu_user_regs _regs = *ctxt->regs;
+struct x86_emulate_state {
+    unsigned int op_bytes, ad_bytes;
+
+    enum { ext_none, ext_0f, ext_0f38 } ext;
+    uint8_t opcode;
+    uint8_t modrm, modrm_mod, modrm_reg, modrm_rm;
+    uint8_t rex_prefix;
+    bool lock_prefix;
+    opcode_desc_t desc;
+    union vex vex;
+    int override_seg;
 
-    uint8_t b, d, sib, sib_index, sib_base, rex_prefix = 0;
-    uint8_t modrm = 0, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0;
-    enum { ext_none, ext_0f, ext_0f38 } ext = ext_none;
-    union vex vex = {};
-    unsigned int op_bytes, def_op_bytes, ad_bytes, def_ad_bytes;
-    bool_t lock_prefix = 0;
-    int override_seg = -1, rc = X86EMUL_OKAY;
-    struct operand src = { .reg = REG_POISON };
-    struct operand dst = { .reg = REG_POISON };
-    enum x86_swint_type swint_type;
-    struct x86_emulate_stub stub = {};
-    DECLARE_ALIGNED(mmval_t, mmval);
     /*
      * Data operand effective address (usually computed from ModRM).
      * Default is a memory operand relative to segment DS.
      */
-    struct operand ea = { .type = OP_MEM, .reg = REG_POISON };
-    ea.mem.seg = x86_seg_ds; /* gcc may reject anon union initializer */
+    struct operand ea;
+
+    /* Immediate operand values, if any. Use otherwise unused fields. */
+#define imm1 ea.val
+#define imm2 ea.orig_val
+
+    /* Shadow copy of register state. Committed on successful emulation. */
+    struct cpu_user_regs regs;
+};
+
+/* Helper definitions. */
+#define op_bytes (state->op_bytes)
+#define ad_bytes (state->ad_bytes)
+#define ext (state->ext)
+#define modrm (state->modrm)
+#define modrm_mod (state->modrm_mod)
+#define modrm_reg (state->modrm_reg)
+#define modrm_rm (state->modrm_rm)
+#define rex_prefix (state->rex_prefix)
+#define lock_prefix (state->lock_prefix)
+#define vex (state->vex)
+#define override_seg (state->override_seg)
+#define ea (state->ea)
+#define _regs (state->regs)
+
+static int
+x86_decode(
+    struct x86_emulate_state *state,
+    struct x86_emulate_ctxt *ctxt,
+    const struct x86_emulate_ops  *ops)
+{
+    uint8_t b, d, sib, sib_index, sib_base;
+    unsigned int def_op_bytes, def_ad_bytes;
+    int rc = X86EMUL_OKAY;
+
+    memset(state, 0, sizeof(*state));
+    override_seg = -1;
+    ea.type = OP_MEM;
+    ea.mem.seg = x86_seg_ds;
+    ea.reg = REG_POISON;
+    _regs = *ctxt->regs;
 
     ctxt->retire.byte = 0;
 
@@ -1800,7 +1833,7 @@ x86_emulate(
                     d = (d & ~(DstMask | SrcMask)) | DstMem | SrcReg | Mov;
                 break;
             default: /* Until it is worth making this table based ... */
-                goto cannot_emulate;
+                return X86EMUL_UNHANDLEABLE;
             }
             break;
 
@@ -1932,6 +1965,61 @@ x86_emulate(
     if ( override_seg != -1 && ea.type == OP_MEM )
         ea.mem.seg = override_seg;
 
+    /* Fetch the immediate operand, if present. */
+    switch ( d & SrcMask )
+    {
+        unsigned int bytes;
+
+    case SrcImm:
+        if ( !(d & ByteOp) )
+            bytes = op_bytes != 8 ? op_bytes : 4;
+        else
+        {
+    case SrcImmByte:
+            bytes = 1;
+        }
+        /* NB. Immediates are sign-extended as necessary. */
+        switch ( bytes )
+        {
+        case 1: imm1 = insn_fetch_type(int8_t);  break;
+        case 2: imm1 = insn_fetch_type(int16_t); break;
+        case 4: imm1 = insn_fetch_type(int32_t); break;
+        }
+        break;
+    case SrcImm16:
+        imm1 = insn_fetch_type(uint16_t);
+        break;
+    }
+
+    state->opcode = b;
+    state->desc = d;
+
+ done:
+    return rc;
+}
+
+int
+x86_emulate(
+    struct x86_emulate_ctxt *ctxt,
+    const struct x86_emulate_ops *ops)
+{
+    struct x86_emulate_state state;
+    int rc;
+    uint8_t b, d;
+    struct operand src = { .reg = REG_POISON };
+    struct operand dst = { .reg = REG_POISON };
+    enum x86_swint_type swint_type;
+    struct x86_emulate_stub stub = {};
+    DECLARE_ALIGNED(mmval_t, mmval);
+
+    rc = x86_decode(&state, ctxt, ops);
+    if ( rc != X86EMUL_OKAY)
+        return rc;
+
+    b = state.opcode;
+    d = state.desc;
+#define state (&state)
+
     /* Decode and fetch the source operand: register, memory or immediate. */
     switch ( d & SrcMask )
     {
@@ -1987,18 +2075,12 @@ x86_emulate(
             src.bytes = 1;
         }
         src.type  = OP_IMM;
-        /* NB. Immediates are sign-extended as necessary. */
-        switch ( src.bytes )
-        {
-        case 1: src.val = insn_fetch_type(int8_t);  break;
-        case 2: src.val = insn_fetch_type(int16_t); break;
-        case 4: src.val = insn_fetch_type(int32_t); break;
-        }
+        src.val   = imm1;
         break;
     case SrcImm16:
         src.type  = OP_IMM;
         src.bytes = 2;
-        src.val   = insn_fetch_type(uint16_t);
+        src.val   = imm1;
         break;
     }
 
@@ -3892,8 +3974,8 @@ x86_emulate(
     /* Commit shadow register state. */
     _regs.eflags &= ~EFLG_RF;
 
-    /* Zero the upper 32 bits of %rip if not in long mode. */
-    if ( def_ad_bytes < sizeof(_regs.eip) )
+    /* Zero the upper 32 bits of %rip if not in 64-bit mode. */
+    if ( !mode_64bit() )
         _regs.eip = (uint32_t)_regs.eip;
 
     *ctxt->regs = _regs;
@@ -4876,4 +4958,19 @@ x86_emulate(
     _put_fpu();
     put_stub(stub);
     return X86EMUL_UNHANDLEABLE;
+#undef state
 }
+
+#undef op_bytes
+#undef ad_bytes
+#undef ext
+#undef modrm
+#undef modrm_mod
+#undef modrm_reg
+#undef modrm_rm
+#undef rex_prefix
+#undef lock_prefix
+#undef vex
+#undef override_seg
+#undef ea
+#undef _regs

Comments

Andrew Cooper Sept. 9, 2016, 6:35 p.m. UTC | #1
On 08/09/16 14:04, Jan Beulich wrote:
> This is only the mechanical part, a subsequent patch will make non-
> mechanical adjustments to actually do all decoding in this new
> function.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -48,7 +48,9 @@
>  /* All operands are implicit in the opcode. */
>  #define ImplicitOps (DstImplicit|SrcImplicit)
>  
> -static uint8_t opcode_table[256] = {
> +typedef uint8_t opcode_desc_t;
> +
> +static const opcode_desc_t opcode_table[256] = {
>      /* 0x00 - 0x07 */
>      ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
>      ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
> @@ -178,7 +180,7 @@ static uint8_t opcode_table[256] = {
>      ImplicitOps, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM
>  };
>  
> -static uint8_t twobyte_table[256] = {
> +static const opcode_desc_t twobyte_table[256] = {
>      /* 0x00 - 0x07 */
>      SrcMem16|ModRM, ImplicitOps|ModRM, 0, 0, 0, ImplicitOps, ImplicitOps, 0,
>      /* 0x08 - 0x0F */
> @@ -607,7 +609,7 @@ do{ asm volatile (
>  })
>  #define truncate_ea(ea) truncate_word((ea), ad_bytes)
>  
> -#define mode_64bit() (def_ad_bytes == 8)
> +#define mode_64bit() (ctxt->addr_size == 64)
>  
>  #define fail_if(p)                                      \
>  do {                                                    \
> @@ -1558,32 +1560,63 @@ int x86emul_unhandleable_rw(
>      return X86EMUL_UNHANDLEABLE;
>  }
>  
> -int
> -x86_emulate(
> -    struct x86_emulate_ctxt *ctxt,
> -    const struct x86_emulate_ops  *ops)
> -{
> -    /* Shadow copy of register state. Committed on successful emulation. */
> -    struct cpu_user_regs _regs = *ctxt->regs;
> +struct x86_emulate_state {
> +    unsigned int op_bytes, ad_bytes;
> +
> +    enum { ext_none, ext_0f, ext_0f38 } ext;
> +    uint8_t opcode;
> +    uint8_t modrm, modrm_mod, modrm_reg, modrm_rm;
> +    uint8_t rex_prefix;
> +    bool lock_prefix;
> +    opcode_desc_t desc;
> +    union vex vex;
> +    int override_seg;
>  
> -    uint8_t b, d, sib, sib_index, sib_base, rex_prefix = 0;
> -    uint8_t modrm = 0, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0;
> -    enum { ext_none, ext_0f, ext_0f38 } ext = ext_none;
> -    union vex vex = {};
> -    unsigned int op_bytes, def_op_bytes, ad_bytes, def_ad_bytes;
> -    bool_t lock_prefix = 0;
> -    int override_seg = -1, rc = X86EMUL_OKAY;
> -    struct operand src = { .reg = REG_POISON };
> -    struct operand dst = { .reg = REG_POISON };
> -    enum x86_swint_type swint_type;
> -    struct x86_emulate_stub stub = {};
> -    DECLARE_ALIGNED(mmval_t, mmval);
>      /*
>       * Data operand effective address (usually computed from ModRM).
>       * Default is a memory operand relative to segment DS.
>       */
> -    struct operand ea = { .type = OP_MEM, .reg = REG_POISON };
> -    ea.mem.seg = x86_seg_ds; /* gcc may reject anon union initializer */
> +    struct operand ea;
> +
> +    /* Immediate operand values, if any. Use otherwise unused fields. */
> +#define imm1 ea.val
> +#define imm2 ea.orig_val

Some instructions (e.g. bextr) have both immediate and memory operands. 
Reusing ea like this is unsafe.

Immediate data was previously stashed in src, separately from ea.  In
the light of the XSA-123 problems, I think it would be better to just
have "uint64_t imm1, imm2;" here.

> +
> +    /* Shadow copy of register state. Committed on successful emulation. */
> +    struct cpu_user_regs regs;
> +};
> +
> +/* Helper definitions. */
> +#define op_bytes (state->op_bytes)
> +#define ad_bytes (state->ad_bytes)
> +#define ext (state->ext)
> +#define modrm (state->modrm)
> +#define modrm_mod (state->modrm_mod)
> +#define modrm_reg (state->modrm_reg)
> +#define modrm_rm (state->modrm_rm)
> +#define rex_prefix (state->rex_prefix)
> +#define lock_prefix (state->lock_prefix)
> +#define vex (state->vex)
> +#define override_seg (state->override_seg)
> +#define ea (state->ea)
> +#define _regs (state->regs)
> +
> +static int
> +x86_decode(
> +    struct x86_emulate_state *state,
> +    struct x86_emulate_ctxt *ctxt,
> +    const struct x86_emulate_ops  *ops)
> +{
> +    uint8_t b, d, sib, sib_index, sib_base;
> +    unsigned int def_op_bytes, def_ad_bytes;
> +    int rc = X86EMUL_OKAY;
> +
> +    memset(state, 0, sizeof(*state));
> +    override_seg = -1;
> +    ea.type = OP_MEM;
> +    ea.mem.seg = x86_seg_ds;
> +    ea.reg = REG_POISON;
> +    _regs = *ctxt->regs;

The helper definitions are fine for the transition period, but I would
like to see them eventually removed to help reduce the quantity of
information hiding in this area.  Please don't introduce new uses.

>  
>      ctxt->retire.byte = 0;
>  
> @@ -1800,7 +1833,7 @@ x86_emulate(
>                      d = (d & ~(DstMask | SrcMask)) | DstMem | SrcReg | Mov;
>                  break;
>              default: /* Until it is worth making this table based ... */
> -                goto cannot_emulate;
> +                return X86EMUL_UNHANDLEABLE;
>              }
>              break;
>  
> @@ -1932,6 +1965,61 @@ x86_emulate(
>      if ( override_seg != -1 && ea.type == OP_MEM )
>          ea.mem.seg = override_seg;
>  
> +    /* Fetch the immediate operand, if present. */
> +    switch ( d & SrcMask )
> +    {
> +        unsigned int bytes;
> +
> +    case SrcImm:
> +        if ( !(d & ByteOp) )
> +            bytes = op_bytes != 8 ? op_bytes : 4;
> +        else
> +        {
> +    case SrcImmByte:
> +            bytes = 1;
> +        }
> +        /* NB. Immediates are sign-extended as necessary. */
> +        switch ( bytes )
> +        {
> +        case 1: imm1 = insn_fetch_type(int8_t);  break;
> +        case 2: imm1 = insn_fetch_type(int16_t); break;
> +        case 4: imm1 = insn_fetch_type(int32_t); break;
> +        }
> +        break;
> +    case SrcImm16:
> +        imm1 = insn_fetch_type(uint16_t);
> +        break;
> +    }
> +
> +    state->opcode = b;
> +    state->desc = d;
> +
> + done:
> +    return rc;
> +}
> +
> +int
> +x86_emulate(
> +    struct x86_emulate_ctxt *ctxt,
> +    const struct x86_emulate_ops *ops)
> +{
> +    struct x86_emulate_state state;
> +    int rc;
> +    uint8_t b, d;
> +    struct operand src = { .reg = REG_POISON };
> +    struct operand dst = { .reg = REG_POISON };
> +    enum x86_swint_type swint_type;
> +    struct x86_emulate_stub stub = {};
> +    DECLARE_ALIGNED(mmval_t, mmval);
> +
> +    rc = x86_decode(&state, ctxt, ops);
> +    if ( rc != X86EMUL_OKAY)

Space before the bracket (although I guess these lines drop out before
the end of the series anyway?)

~Andrew

> +        return rc;
> +
> +    b = state.opcode;
> +    d = state.desc;
> +#define state (&state)
> +
>      /* Decode and fetch the source operand: register, memory or immediate. */
>      switch ( d & SrcMask )
>      {
> @@ -1987,18 +2075,12 @@ x86_emulate(
>              src.bytes = 1;
>          }
>          src.type  = OP_IMM;
> -        /* NB. Immediates are sign-extended as necessary. */
> -        switch ( src.bytes )
> -        {
> -        case 1: src.val = insn_fetch_type(int8_t);  break;
> -        case 2: src.val = insn_fetch_type(int16_t); break;
> -        case 4: src.val = insn_fetch_type(int32_t); break;
> -        }
> +        src.val   = imm1;
>          break;
>      case SrcImm16:
>          src.type  = OP_IMM;
>          src.bytes = 2;
> -        src.val   = insn_fetch_type(uint16_t);
> +        src.val   = imm1;
>          break;
>      }
>  
> @@ -3892,8 +3974,8 @@ x86_emulate(
>      /* Commit shadow register state. */
>      _regs.eflags &= ~EFLG_RF;
>  
> -    /* Zero the upper 32 bits of %rip if not in long mode. */
> -    if ( def_ad_bytes < sizeof(_regs.eip) )
> +    /* Zero the upper 32 bits of %rip if not in 64-bit mode. */
> +    if ( !mode_64bit() )
>          _regs.eip = (uint32_t)_regs.eip;
>  
>      *ctxt->regs = _regs;
> @@ -4876,4 +4958,19 @@ x86_emulate(
>      _put_fpu();
>      put_stub(stub);
>      return X86EMUL_UNHANDLEABLE;
> +#undef state
>  }
> +
> +#undef op_bytes
> +#undef ad_bytes
> +#undef ext
> +#undef modrm
> +#undef modrm_mod
> +#undef modrm_reg
> +#undef modrm_rm
> +#undef rex_prefix
> +#undef lock_prefix
> +#undef vex
> +#undef override_seg
> +#undef ea
> +#undef _regs
>
>
Jan Beulich Sept. 12, 2016, 7:20 a.m. UTC | #2
>>> On 09.09.16 at 20:35, <andrew.cooper3@citrix.com> wrote:
> On 08/09/16 14:04, Jan Beulich wrote:
>> @@ -607,7 +609,7 @@ do{ asm volatile (
>>  })
>>  #define truncate_ea(ea) truncate_word((ea), ad_bytes)
>>  
>> -#define mode_64bit() (def_ad_bytes == 8)
>> +#define mode_64bit() (ctxt->addr_size == 64)
>>  
>>  #define fail_if(p)                                      \
>>  do {                                                    \
>> @@ -1558,32 +1560,63 @@ int x86emul_unhandleable_rw(
>>      return X86EMUL_UNHANDLEABLE;
>>  }
>>  
>> -int
>> -x86_emulate(
>> -    struct x86_emulate_ctxt *ctxt,
>> -    const struct x86_emulate_ops  *ops)
>> -{
>> -    /* Shadow copy of register state. Committed on successful emulation. */
>> -    struct cpu_user_regs _regs = *ctxt->regs;
>> +struct x86_emulate_state {
>> +    unsigned int op_bytes, ad_bytes;
>> +
>> +    enum { ext_none, ext_0f, ext_0f38 } ext;
>> +    uint8_t opcode;
>> +    uint8_t modrm, modrm_mod, modrm_reg, modrm_rm;
>> +    uint8_t rex_prefix;
>> +    bool lock_prefix;
>> +    opcode_desc_t desc;
>> +    union vex vex;
>> +    int override_seg;
>>  
>> -    uint8_t b, d, sib, sib_index, sib_base, rex_prefix = 0;
>> -    uint8_t modrm = 0, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0;
>> -    enum { ext_none, ext_0f, ext_0f38 } ext = ext_none;
>> -    union vex vex = {};
>> -    unsigned int op_bytes, def_op_bytes, ad_bytes, def_ad_bytes;
>> -    bool_t lock_prefix = 0;
>> -    int override_seg = -1, rc = X86EMUL_OKAY;
>> -    struct operand src = { .reg = REG_POISON };
>> -    struct operand dst = { .reg = REG_POISON };
>> -    enum x86_swint_type swint_type;
>> -    struct x86_emulate_stub stub = {};
>> -    DECLARE_ALIGNED(mmval_t, mmval);
>>      /*
>>       * Data operand effective address (usually computed from ModRM).
>>       * Default is a memory operand relative to segment DS.
>>       */
>> -    struct operand ea = { .type = OP_MEM, .reg = REG_POISON };
>> -    ea.mem.seg = x86_seg_ds; /* gcc may reject anon union initializer */
>> +    struct operand ea;
>> +
>> +    /* Immediate operand values, if any. Use otherwise unused fields. */
>> +#define imm1 ea.val
>> +#define imm2 ea.orig_val
> 
> Some instructions (e.g. bextr) have both immediate and memory operands. 
> Reusing ea like this is unsafe.

I disagree: Both fields have never been used for anything, and it was
more than once that I had thought about how to eliminate them from
ea without eliminating them from src and dst. Until finally I found a use
for them here.

> Immediate data was previously stashed in src, separately from ea.  In
> the light of the XSA-123 problems, I think it would be better to just
> have "uint64_t imm1, imm2;" here.

The XSA-123 problem was completely different, and I specifically took
this into consideration. We're not aliasing scalars and pointers here,
so there's no risk of causing a new security issue. Just to repeat -
these two fields have been completely unused so far.

>> +
>> +    /* Shadow copy of register state. Committed on successful emulation. */
>> +    struct cpu_user_regs regs;
>> +};
>> +
>> +/* Helper definitions. */
>> +#define op_bytes (state->op_bytes)
>> +#define ad_bytes (state->ad_bytes)
>> +#define ext (state->ext)
>> +#define modrm (state->modrm)
>> +#define modrm_mod (state->modrm_mod)
>> +#define modrm_reg (state->modrm_reg)
>> +#define modrm_rm (state->modrm_rm)
>> +#define rex_prefix (state->rex_prefix)
>> +#define lock_prefix (state->lock_prefix)
>> +#define vex (state->vex)
>> +#define override_seg (state->override_seg)
>> +#define ea (state->ea)
>> +#define _regs (state->regs)
>> +
>> +static int
>> +x86_decode(
>> +    struct x86_emulate_state *state,
>> +    struct x86_emulate_ctxt *ctxt,
>> +    const struct x86_emulate_ops  *ops)
>> +{
>> +    uint8_t b, d, sib, sib_index, sib_base;
>> +    unsigned int def_op_bytes, def_ad_bytes;
>> +    int rc = X86EMUL_OKAY;
>> +
>> +    memset(state, 0, sizeof(*state));
>> +    override_seg = -1;
>> +    ea.type = OP_MEM;
>> +    ea.mem.seg = x86_seg_ds;
>> +    ea.reg = REG_POISON;
>> +    _regs = *ctxt->regs;
> 
> The helper definitions are fine for the transition period, but I would
> like to see them eventually removed to help reduce the quantity of
> information hiding in this area.  Please don't introduce new uses.

I simply can't write e.g. "state->ea.type" here, as that would get
macro expanded to "state->(state->ea).type". I've carefully tried to
avoid introducing new uses where possible, and I'll be happy to
correct cases where I failed to, but here we just can't (and I hope
you agree that it would be odd to not use the helpers consistently
in a single block of code, i.e. I'd like to not replace _regs (which goes
away in patch 3 anyway).

>> +int
>> +x86_emulate(
>> +    struct x86_emulate_ctxt *ctxt,
>> +    const struct x86_emulate_ops *ops)
>> +{
>> +    struct x86_emulate_state state;
>> +    int rc;
>> +    uint8_t b, d;
>> +    struct operand src = { .reg = REG_POISON };
>> +    struct operand dst = { .reg = REG_POISON };
>> +    enum x86_swint_type swint_type;
>> +    struct x86_emulate_stub stub = {};
>> +    DECLARE_ALIGNED(mmval_t, mmval);
>> +
>> +    rc = x86_decode(&state, ctxt, ops);
>> +    if ( rc != X86EMUL_OKAY)
> 
> Space before the bracket (although I guess these lines drop out before
> the end of the series anyway?)

Oops. And to the question - no, why would they? Emulation obviously
requires decoding as the first step.

Jan
diff mbox

Patch

--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -48,7 +48,9 @@ 
 /* All operands are implicit in the opcode. */
 #define ImplicitOps (DstImplicit|SrcImplicit)
 
-static uint8_t opcode_table[256] = {
+typedef uint8_t opcode_desc_t;
+
+static const opcode_desc_t opcode_table[256] = {
     /* 0x00 - 0x07 */
     ByteOp|DstMem|SrcReg|ModRM, DstMem|SrcReg|ModRM,
     ByteOp|DstReg|SrcMem|ModRM, DstReg|SrcMem|ModRM,
@@ -178,7 +180,7 @@  static uint8_t opcode_table[256] = {
     ImplicitOps, ImplicitOps, ByteOp|DstMem|SrcNone|ModRM, DstMem|SrcNone|ModRM
 };
 
-static uint8_t twobyte_table[256] = {
+static const opcode_desc_t twobyte_table[256] = {
     /* 0x00 - 0x07 */
     SrcMem16|ModRM, ImplicitOps|ModRM, 0, 0, 0, ImplicitOps, ImplicitOps, 0,
     /* 0x08 - 0x0F */
@@ -607,7 +609,7 @@  do{ asm volatile (
 })
 #define truncate_ea(ea) truncate_word((ea), ad_bytes)
 
-#define mode_64bit() (def_ad_bytes == 8)
+#define mode_64bit() (ctxt->addr_size == 64)
 
 #define fail_if(p)                                      \
 do {                                                    \
@@ -1558,32 +1560,63 @@  int x86emul_unhandleable_rw(
     return X86EMUL_UNHANDLEABLE;
 }
 
-int
-x86_emulate(
-    struct x86_emulate_ctxt *ctxt,
-    const struct x86_emulate_ops  *ops)
-{
-    /* Shadow copy of register state. Committed on successful emulation. */
-    struct cpu_user_regs _regs = *ctxt->regs;
+struct x86_emulate_state {
+    unsigned int op_bytes, ad_bytes;
+
+    enum { ext_none, ext_0f, ext_0f38 } ext;
+    uint8_t opcode;
+    uint8_t modrm, modrm_mod, modrm_reg, modrm_rm;
+    uint8_t rex_prefix;
+    bool lock_prefix;
+    opcode_desc_t desc;
+    union vex vex;
+    int override_seg;
 
-    uint8_t b, d, sib, sib_index, sib_base, rex_prefix = 0;
-    uint8_t modrm = 0, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0;
-    enum { ext_none, ext_0f, ext_0f38 } ext = ext_none;
-    union vex vex = {};
-    unsigned int op_bytes, def_op_bytes, ad_bytes, def_ad_bytes;
-    bool_t lock_prefix = 0;
-    int override_seg = -1, rc = X86EMUL_OKAY;
-    struct operand src = { .reg = REG_POISON };
-    struct operand dst = { .reg = REG_POISON };
-    enum x86_swint_type swint_type;
-    struct x86_emulate_stub stub = {};
-    DECLARE_ALIGNED(mmval_t, mmval);
     /*
      * Data operand effective address (usually computed from ModRM).
      * Default is a memory operand relative to segment DS.
      */
-    struct operand ea = { .type = OP_MEM, .reg = REG_POISON };
-    ea.mem.seg = x86_seg_ds; /* gcc may reject anon union initializer */
+    struct operand ea;
+
+    /* Immediate operand values, if any. Use otherwise unused fields. */
+#define imm1 ea.val
+#define imm2 ea.orig_val
+
+    /* Shadow copy of register state. Committed on successful emulation. */
+    struct cpu_user_regs regs;
+};
+
+/* Helper definitions. */
+#define op_bytes (state->op_bytes)
+#define ad_bytes (state->ad_bytes)
+#define ext (state->ext)
+#define modrm (state->modrm)
+#define modrm_mod (state->modrm_mod)
+#define modrm_reg (state->modrm_reg)
+#define modrm_rm (state->modrm_rm)
+#define rex_prefix (state->rex_prefix)
+#define lock_prefix (state->lock_prefix)
+#define vex (state->vex)
+#define override_seg (state->override_seg)
+#define ea (state->ea)
+#define _regs (state->regs)
+
+static int
+x86_decode(
+    struct x86_emulate_state *state,
+    struct x86_emulate_ctxt *ctxt,
+    const struct x86_emulate_ops  *ops)
+{
+    uint8_t b, d, sib, sib_index, sib_base;
+    unsigned int def_op_bytes, def_ad_bytes;
+    int rc = X86EMUL_OKAY;
+
+    memset(state, 0, sizeof(*state));
+    override_seg = -1;
+    ea.type = OP_MEM;
+    ea.mem.seg = x86_seg_ds;
+    ea.reg = REG_POISON;
+    _regs = *ctxt->regs;
 
     ctxt->retire.byte = 0;
 
@@ -1800,7 +1833,7 @@  x86_emulate(
                     d = (d & ~(DstMask | SrcMask)) | DstMem | SrcReg | Mov;
                 break;
             default: /* Until it is worth making this table based ... */
-                goto cannot_emulate;
+                return X86EMUL_UNHANDLEABLE;
             }
             break;
 
@@ -1932,6 +1965,61 @@  x86_emulate(
     if ( override_seg != -1 && ea.type == OP_MEM )
         ea.mem.seg = override_seg;
 
+    /* Fetch the immediate operand, if present. */
+    switch ( d & SrcMask )
+    {
+        unsigned int bytes;
+
+    case SrcImm:
+        if ( !(d & ByteOp) )
+            bytes = op_bytes != 8 ? op_bytes : 4;
+        else
+        {
+    case SrcImmByte:
+            bytes = 1;
+        }
+        /* NB. Immediates are sign-extended as necessary. */
+        switch ( bytes )
+        {
+        case 1: imm1 = insn_fetch_type(int8_t);  break;
+        case 2: imm1 = insn_fetch_type(int16_t); break;
+        case 4: imm1 = insn_fetch_type(int32_t); break;
+        }
+        break;
+    case SrcImm16:
+        imm1 = insn_fetch_type(uint16_t);
+        break;
+    }
+
+    state->opcode = b;
+    state->desc = d;
+
+ done:
+    return rc;
+}
+
+int
+x86_emulate(
+    struct x86_emulate_ctxt *ctxt,
+    const struct x86_emulate_ops *ops)
+{
+    struct x86_emulate_state state;
+    int rc;
+    uint8_t b, d;
+    struct operand src = { .reg = REG_POISON };
+    struct operand dst = { .reg = REG_POISON };
+    enum x86_swint_type swint_type;
+    struct x86_emulate_stub stub = {};
+    DECLARE_ALIGNED(mmval_t, mmval);
+
+    rc = x86_decode(&state, ctxt, ops);
+    if ( rc != X86EMUL_OKAY)
+        return rc;
+
+    b = state.opcode;
+    d = state.desc;
+#define state (&state)
+
     /* Decode and fetch the source operand: register, memory or immediate. */
     switch ( d & SrcMask )
     {
@@ -1987,18 +2075,12 @@  x86_emulate(
             src.bytes = 1;
         }
         src.type  = OP_IMM;
-        /* NB. Immediates are sign-extended as necessary. */
-        switch ( src.bytes )
-        {
-        case 1: src.val = insn_fetch_type(int8_t);  break;
-        case 2: src.val = insn_fetch_type(int16_t); break;
-        case 4: src.val = insn_fetch_type(int32_t); break;
-        }
+        src.val   = imm1;
         break;
     case SrcImm16:
         src.type  = OP_IMM;
         src.bytes = 2;
-        src.val   = insn_fetch_type(uint16_t);
+        src.val   = imm1;
         break;
     }
 
@@ -3892,8 +3974,8 @@  x86_emulate(
     /* Commit shadow register state. */
     _regs.eflags &= ~EFLG_RF;
 
-    /* Zero the upper 32 bits of %rip if not in long mode. */
-    if ( def_ad_bytes < sizeof(_regs.eip) )
+    /* Zero the upper 32 bits of %rip if not in 64-bit mode. */
+    if ( !mode_64bit() )
         _regs.eip = (uint32_t)_regs.eip;
 
     *ctxt->regs = _regs;
@@ -4876,4 +4958,19 @@  x86_emulate(
     _put_fpu();
     put_stub(stub);
     return X86EMUL_UNHANDLEABLE;
+#undef state
 }
+
+#undef op_bytes
+#undef ad_bytes
+#undef ext
+#undef modrm
+#undef modrm_mod
+#undef modrm_reg
+#undef modrm_rm
+#undef rex_prefix
+#undef lock_prefix
+#undef vex
+#undef override_seg
+#undef ea
+#undef _regs