@@ -67,10 +67,14 @@ inttypes["x86_32"] = {
"xen_ulong_t" : "uint32_t",
};
header["x86_32"] = """
+#define __DECL_REG_LO8(which) uint32_t e ## which ## x
+#define __DECL_REG_LO16(name) uint32_t e ## name
#define __i386___X86_32 1
#pragma pack(4)
""";
footer["x86_32"] = """
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
#pragma pack()
""";
@@ -89,10 +93,18 @@ header["x86_64"] = """
# define __DECL_REG(name) uint64_t r ## name
# define __align8__ FIXME
#endif
+#define __DECL_REG_LOHI(name) __DECL_REG(name ## x)
+#define __DECL_REG_LO8 __DECL_REG
+#define __DECL_REG_LO16 __DECL_REG
+#define __DECL_REG_HI __DECL_REG
#define __x86_64___X86_64 1
""";
footer["x86_64"] = """
#undef __DECL_REG
+#undef __DECL_REG_LOHI
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
+#undef __DECL_REG_HI
"""
###########################################################################
@@ -46,7 +46,7 @@ x86_emulate/x86_emulate.c x86_emulate/x8
HOSTCFLAGS += $(CFLAGS_xeninclude)
x86_emulate.o: x86_emulate.c x86_emulate/x86_emulate.c x86_emulate/x86_emulate.h
- $(HOSTCC) $(HOSTCFLAGS) -c -g -o $@ $<
+ $(HOSTCC) $(HOSTCFLAGS) -D__XEN_TOOLS__ -c -g -o $@ $<
test_x86_emulator.o: test_x86_emulator.c blowfish.h x86_emulate/x86_emulate.h
$(HOSTCC) $(HOSTCFLAGS) -c -g -o $@ $<
@@ -1530,8 +1530,8 @@ void arch_get_info_guest(struct vcpu *v,
}
/* IOPL privileges are virtualised: merge back into returned eflags. */
- BUG_ON((c(user_regs.eflags) & X86_EFLAGS_IOPL) != 0);
- c(user_regs.eflags |= v->arch.pv_vcpu.iopl);
+ BUG_ON((c(user_regs._eflags) & X86_EFLAGS_IOPL) != 0);
+ c(user_regs._eflags |= v->arch.pv_vcpu.iopl);
if ( !compat )
{
@@ -3093,7 +3093,7 @@ void hvm_task_switch(
if ( segr.attr.fields.db )
sp = regs->_esp -= opsz;
else
- sp = *(uint16_t *)®s->esp -= opsz;
+ sp = regs->sp -= opsz;
if ( hvm_virtual_to_linear_addr(x86_seg_ss, &segr, sp, opsz,
hvm_access_write,
16 << segr.attr.fields.db,
@@ -66,30 +66,23 @@ static void realmode_deliver_exception(
}
}
- frame[0] = regs->eip + insn_len;
+ frame[0] = regs->ip + insn_len;
frame[1] = csr->sel;
- frame[2] = regs->eflags & ~X86_EFLAGS_RF;
+ frame[2] = regs->flags & ~X86_EFLAGS_RF;
/* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db )
- {
- regs->esp -= 6;
- pstk = regs->esp;
- }
+ pstk = regs->_esp -= 6;
else
- {
- pstk = (uint16_t)(regs->esp - 6);
- regs->esp &= ~0xffff;
- regs->esp |= pstk;
- }
+ pstk = regs->sp -= 6;
pstk += hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt)->base;
(void)hvm_copy_to_guest_phys(pstk, frame, sizeof(frame));
csr->sel = cs_eip >> 16;
csr->base = (uint32_t)csr->sel << 4;
- regs->eip = (uint16_t)cs_eip;
- regs->eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
+ regs->ip = (uint16_t)cs_eip;
+ regs->_eflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF | X86_EFLAGS_RF);
/* Exception delivery clears STI and MOV-SS blocking. */
if ( hvmemul_ctxt->intr_shadow &
@@ -12,8 +12,8 @@
static void ioemul_handle_proliant_quirk(
u8 opcode, char *io_emul_stub, struct cpu_user_regs *regs)
{
- uint16_t port = regs->edx;
- uint8_t value = regs->eax;
+ uint16_t port = regs->dx;
+ uint8_t value = regs->al;
if ( (opcode != 0xee) || (port != 0xcd4) || !(value & 0x80) )
return;
@@ -732,7 +732,7 @@ do {
} while (0)
#define register_address_adjust(reg, adj) \
_register_address_increment(reg, \
- _regs.eflags & EFLG_DF ? -(adj) : (adj), \
+ _regs._eflags & EFLG_DF ? -(adj) : (adj), \
ad_bytes)
#define sp_pre_dec(dec) ({ \
@@ -823,7 +823,7 @@ static int _get_fpu(
if ( type >= X86EMUL_FPU_ymm )
{
/* Should be unreachable if VEX decoding is working correctly. */
- ASSERT((cr0 & CR0_PE) && !(ctxt->regs->eflags & EFLG_VM));
+ ASSERT((cr0 & CR0_PE) && !(ctxt->regs->_eflags & EFLG_VM));
}
if ( cr0 & CR0_EM )
{
@@ -906,7 +906,7 @@ do {
asm volatile ( _PRE_EFLAGS("[eflags]", "[mask]", "[tmp]") \
"call *%[func];" \
_POST_EFLAGS("[eflags]", "[mask]", "[tmp]") \
- : [eflags] "+g" (_regs.eflags), \
+ : [eflags] "+g" (_regs._eflags), \
[tmp] "=&r" (tmp_) \
: [func] "rm" (stub.func), \
[mask] "i" (EFLG_ZF|EFLG_PF|EFLG_CF) ); \
@@ -917,9 +917,8 @@ static inline unsigned long get_loop_cou
const struct cpu_user_regs *regs,
int ad_bytes)
{
- return (ad_bytes == 2) ? (uint16_t)regs->ecx :
- (ad_bytes == 4) ? (uint32_t)regs->ecx :
- regs->ecx;
+ return (ad_bytes > 4) ? regs->r(cx)
+ : (ad_bytes < 4) ? regs->cx : regs->_ecx;
}
static inline void put_loop_count(
@@ -928,9 +927,9 @@ static inline void put_loop_count(
unsigned long count)
{
if ( ad_bytes == 2 )
- *(uint16_t *)®s->ecx = count;
+ regs->cx = count;
else
- regs->ecx = ad_bytes == 4 ? (uint32_t)count : count;
+ regs->r(cx) = ad_bytes == 4 ? (uint32_t)count : count;
}
#define get_rep_prefix(using_si, using_di) ({ \
@@ -946,9 +945,9 @@ static inline void put_loop_count(
*/ \
if ( mode_64bit() && ad_bytes == 4 ) \
{ \
- _regs.ecx = 0; \
- if ( using_si ) _regs.esi = (uint32_t)_regs.esi; \
- if ( using_di ) _regs.edi = (uint32_t)_regs.edi; \
+ _regs.r(cx) = 0; \
+ if ( using_si ) _regs.r(si) = _regs._esi; \
+ if ( using_di ) _regs.r(di) = _regs._edi; \
} \
goto no_writeback; \
} \
@@ -983,7 +982,7 @@ static void __put_rep_prefix(
/* Clip maximum repetitions so that the index register at most just wraps. */
#define truncate_ea_and_reps(ea, reps, bytes_per_rep) ({ \
unsigned long todo__, ea__ = truncate_word(ea, ad_bytes); \
- if ( !(ctxt->regs->eflags & EFLG_DF) ) \
+ if ( !(_regs._eflags & EFLG_DF) ) \
todo__ = truncate_word(-(ea), ad_bytes) / (bytes_per_rep); \
else if ( truncate_word((ea) + (bytes_per_rep) - 1, ad_bytes) < ea__ )\
todo__ = 1; \
@@ -1140,7 +1139,7 @@ get_cpl(
{
struct segment_register reg;
- if ( ctxt->regs->eflags & EFLG_VM )
+ if ( ctxt->regs->_eflags & EFLG_VM )
return 3;
if ( (ops->read_segment == NULL) ||
@@ -1158,7 +1157,7 @@ _mode_iopl(
int cpl = get_cpl(ctxt, ops);
if ( cpl == -1 )
return -1;
- return (cpl <= ((ctxt->regs->eflags >> 12) & 3));
+ return (cpl <= ((ctxt->regs->_eflags >> 12) & 3));
}
#define mode_ring0() ({ \
@@ -1182,7 +1181,7 @@ static int ioport_access_check(
struct segment_register tr;
int rc = X86EMUL_OKAY;
- if ( !(ctxt->regs->eflags & EFLG_VM) && mode_iopl() )
+ if ( !(ctxt->regs->_eflags & EFLG_VM) && mode_iopl() )
return X86EMUL_OKAY;
fail_if(ops->read_segment == NULL);
@@ -1251,7 +1250,7 @@ in_protmode(
struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops)
{
- return !(in_realmode(ctxt, ops) || (ctxt->regs->eflags & EFLG_VM));
+ return !(in_realmode(ctxt, ops) || (ctxt->regs->_eflags & EFLG_VM));
}
#define EAX 0
@@ -1619,22 +1618,14 @@ decode_register(
switch ( modrm_reg )
{
- case 0: p = ®s->eax; break;
- case 1: p = ®s->ecx; break;
- case 2: p = ®s->edx; break;
- case 3: p = ®s->ebx; break;
- case 4: p = (highbyte_regs ?
- ((unsigned char *)®s->eax + 1) :
- (unsigned char *)®s->esp); break;
- case 5: p = (highbyte_regs ?
- ((unsigned char *)®s->ecx + 1) :
- (unsigned char *)®s->ebp); break;
- case 6: p = (highbyte_regs ?
- ((unsigned char *)®s->edx + 1) :
- (unsigned char *)®s->esi); break;
- case 7: p = (highbyte_regs ?
- ((unsigned char *)®s->ebx + 1) :
- (unsigned char *)®s->edi); break;
+ case 0: p = ®s->r(ax); break;
+ case 1: p = ®s->r(cx); break;
+ case 2: p = ®s->r(dx); break;
+ case 3: p = ®s->r(bx); break;
+ case 4: p = (highbyte_regs ? ®s->ah : (void *)®s->r(sp)); break;
+ case 5: p = (highbyte_regs ? ®s->ch : (void *)®s->r(bp)); break;
+ case 6: p = (highbyte_regs ? ®s->dh : (void *)®s->r(si)); break;
+ case 7: p = (highbyte_regs ? ®s->bh : (void *)®s->r(di)); break;
#if defined(__x86_64__)
case 8: p = ®s->r8; break;
case 9: p = ®s->r9; break;
@@ -1746,8 +1737,8 @@ static int inject_swint(enum x86_swint_t
* a 32bit OS. Someone with many TUITs can see about reading the
* TSS Software Interrupt Redirection bitmap.
*/
- if ( (ctxt->regs->eflags & EFLG_VM) &&
- ((ctxt->regs->eflags & EFLG_IOPL) != EFLG_IOPL) )
+ if ( (ctxt->regs->_eflags & EFLG_VM) &&
+ ((ctxt->regs->_eflags & EFLG_IOPL) != EFLG_IOPL) )
goto raise_exn;
/*
@@ -2167,7 +2158,7 @@ x86_decode(
default:
BUG(); /* Shouldn't be possible. */
case 2:
- if ( in_realmode(ctxt, ops) || (state->regs->eflags & EFLG_VM) )
+ if ( in_realmode(ctxt, ops) || (state->regs->_eflags & EFLG_VM) )
break;
/* fall through */
case 4:
@@ -2319,33 +2310,33 @@ x86_decode(
switch ( modrm_rm )
{
case 0:
- ea.mem.off = state->regs->ebx + state->regs->esi;
+ ea.mem.off = state->regs->bx + state->regs->si;
break;
case 1:
- ea.mem.off = state->regs->ebx + state->regs->edi;
+ ea.mem.off = state->regs->bx + state->regs->di;
break;
case 2:
ea.mem.seg = x86_seg_ss;
- ea.mem.off = state->regs->ebp + state->regs->esi;
+ ea.mem.off = state->regs->bp + state->regs->si;
break;
case 3:
ea.mem.seg = x86_seg_ss;
- ea.mem.off = state->regs->ebp + state->regs->edi;
+ ea.mem.off = state->regs->bp + state->regs->di;
break;
case 4:
- ea.mem.off = state->regs->esi;
+ ea.mem.off = state->regs->si;
break;
case 5:
- ea.mem.off = state->regs->edi;
+ ea.mem.off = state->regs->di;
break;
case 6:
if ( modrm_mod == 0 )
break;
ea.mem.seg = x86_seg_ss;
- ea.mem.off = state->regs->ebp;
+ ea.mem.off = state->regs->bp;
break;
case 7:
- ea.mem.off = state->regs->ebx;
+ ea.mem.off = state->regs->bx;
break;
}
switch ( modrm_mod )
@@ -2519,7 +2510,7 @@ x86_emulate(
struct x86_emulate_state state;
int rc;
uint8_t b, d;
- bool singlestep = ctxt->regs->eflags & EFLG_TF;
+ bool singlestep = _regs._eflags & EFLG_TF;
struct operand src = { .reg = PTR_POISON };
struct operand dst = { .reg = PTR_POISON };
enum x86_swint_type swint_type;
@@ -2728,36 +2719,36 @@ x86_emulate(
struct segment_register cs, sreg;
case 0x00 ... 0x05: add: /* add */
- emulate_2op_SrcV("add", src, dst, _regs.eflags);
+ emulate_2op_SrcV("add", src, dst, _regs._eflags);
break;
case 0x08 ... 0x0d: or: /* or */
- emulate_2op_SrcV("or", src, dst, _regs.eflags);
+ emulate_2op_SrcV("or", src, dst, _regs._eflags);
break;
case 0x10 ... 0x15: adc: /* adc */
- emulate_2op_SrcV("adc", src, dst, _regs.eflags);
+ emulate_2op_SrcV("adc", src, dst, _regs._eflags);
break;
case 0x18 ... 0x1d: sbb: /* sbb */
- emulate_2op_SrcV("sbb", src, dst, _regs.eflags);
+ emulate_2op_SrcV("sbb", src, dst, _regs._eflags);
break;
case 0x20 ... 0x25: and: /* and */
- emulate_2op_SrcV("and", src, dst, _regs.eflags);
+ emulate_2op_SrcV("and", src, dst, _regs._eflags);
break;
case 0x28 ... 0x2d: sub: /* sub */
- emulate_2op_SrcV("sub", src, dst, _regs.eflags);
+ emulate_2op_SrcV("sub", src, dst, _regs._eflags);
break;
case 0x30 ... 0x35: xor: /* xor */
- emulate_2op_SrcV("xor", src, dst, _regs.eflags);
+ emulate_2op_SrcV("xor", src, dst, _regs._eflags);
break;
case 0x38 ... 0x3d: cmp: /* cmp */
generate_exception_if(lock_prefix, EXC_UD);
- emulate_2op_SrcV("cmp", src, dst, _regs.eflags);
+ emulate_2op_SrcV("cmp", src, dst, _regs._eflags);
dst.type = OP_NONE;
break;
@@ -2798,40 +2789,40 @@ x86_emulate(
case 0x27: /* daa */
case 0x2f: /* das */ {
- uint8_t al = _regs.eax;
- unsigned long eflags = _regs.eflags;
+ uint8_t al = _regs.al;
+ unsigned int eflags = _regs._eflags;
generate_exception_if(mode_64bit(), EXC_UD);
- _regs.eflags &= ~(EFLG_CF|EFLG_AF|EFLG_SF|EFLG_ZF|EFLG_PF);
+ _regs._eflags &= ~(EFLG_CF|EFLG_AF|EFLG_SF|EFLG_ZF|EFLG_PF);
if ( ((al & 0x0f) > 9) || (eflags & EFLG_AF) )
{
- _regs.eflags |= EFLG_AF;
+ _regs._eflags |= EFLG_AF;
if ( b == 0x2f && (al < 6 || (eflags & EFLG_CF)) )
- _regs.eflags |= EFLG_CF;
- *(uint8_t *)&_regs.eax += (b == 0x27) ? 6 : -6;
+ _regs._eflags |= EFLG_CF;
+ _regs.al += (b == 0x27) ? 6 : -6;
}
if ( (al > 0x99) || (eflags & EFLG_CF) )
{
- *(uint8_t *)&_regs.eax += (b == 0x27) ? 0x60 : -0x60;
- _regs.eflags |= EFLG_CF;
+ _regs.al += (b == 0x27) ? 0x60 : -0x60;
+ _regs._eflags |= EFLG_CF;
}
- _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
- _regs.eflags |= (( int8_t)_regs.eax < 0) ? EFLG_SF : 0;
- _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0;
+ _regs._eflags |= !_regs.al ? EFLG_ZF : 0;
+ _regs._eflags |= ((int8_t)_regs.al < 0) ? EFLG_SF : 0;
+ _regs._eflags |= even_parity(_regs.al) ? EFLG_PF : 0;
break;
}
case 0x37: /* aaa */
case 0x3f: /* aas */
generate_exception_if(mode_64bit(), EXC_UD);
- _regs.eflags &= ~EFLG_CF;
- if ( ((uint8_t)_regs.eax > 9) || (_regs.eflags & EFLG_AF) )
+ _regs._eflags &= ~EFLG_CF;
+ if ( (_regs.al > 9) || (_regs._eflags & EFLG_AF) )
{
- ((uint8_t *)&_regs.eax)[0] += (b == 0x37) ? 6 : -6;
- ((uint8_t *)&_regs.eax)[1] += (b == 0x37) ? 1 : -1;
- _regs.eflags |= EFLG_CF | EFLG_AF;
+ _regs.al += (b == 0x37) ? 6 : -6;
+ _regs.ah += (b == 0x37) ? 1 : -1;
+ _regs._eflags |= EFLG_CF | EFLG_AF;
}
- ((uint8_t *)&_regs.eax)[0] &= 0x0f;
+ _regs.al &= 0x0f;
break;
case 0x40 ... 0x4f: /* inc/dec reg */
@@ -2840,9 +2831,9 @@ x86_emulate(
dst.bytes = op_bytes;
dst.val = *dst.reg;
if ( b & 8 )
- emulate_1op("dec", dst, _regs.eflags);
+ emulate_1op("dec", dst, _regs._eflags);
else
- emulate_1op("inc", dst, _regs.eflags);
+ emulate_1op("inc", dst, _regs._eflags);
break;
case 0x50 ... 0x57: /* push reg */
@@ -2864,9 +2855,9 @@ x86_emulate(
case 0x60: /* pusha */ {
int i;
- unsigned long regs[] = {
- _regs.eax, _regs.ecx, _regs.edx, _regs.ebx,
- _regs.esp, _regs.ebp, _regs.esi, _regs.edi };
+ unsigned int regs[] = {
+ _regs._eax, _regs._ecx, _regs._edx, _regs._ebx,
+ _regs._esp, _regs._ebp, _regs._esi, _regs._edi };
generate_exception_if(mode_64bit(), EXC_UD);
fail_if(!ops->write);
@@ -2879,11 +2870,10 @@ x86_emulate(
case 0x61: /* popa */ {
int i;
- unsigned long dummy_esp, *regs[] = {
- (unsigned long *)&_regs.edi, (unsigned long *)&_regs.esi,
- (unsigned long *)&_regs.ebp, (unsigned long *)&dummy_esp,
- (unsigned long *)&_regs.ebx, (unsigned long *)&_regs.edx,
- (unsigned long *)&_regs.ecx, (unsigned long *)&_regs.eax };
+ unsigned int dummy_esp, *regs[] = {
+ &_regs._edi, &_regs._esi, &_regs._ebp, &dummy_esp,
+ &_regs._ebx, &_regs._edx, &_regs._ecx, &_regs._eax };
+
generate_exception_if(mode_64bit(), EXC_UD);
for ( i = 0; i < 8; i++ )
{
@@ -2939,12 +2929,12 @@ x86_emulate(
goto done;
if ( src_rpl > (dst.val & 3) )
{
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
dst.val = (dst.val & ~3) | src_rpl;
}
else
{
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
dst.type = OP_NONE;
}
generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
@@ -2973,7 +2963,8 @@ x86_emulate(
case 0x6c ... 0x6d: /* ins %dx,%es:%edi */ {
unsigned long nr_reps = get_rep_prefix(false, true);
- unsigned int port = (uint16_t)_regs.edx;
+ unsigned int port = _regs.dx;
+
dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
dst.mem.seg = x86_seg_es;
dst.mem.off = truncate_ea_and_reps(_regs.r(di), nr_reps, dst.bytes);
@@ -3013,7 +3004,8 @@ x86_emulate(
case 0x6e ... 0x6f: /* outs %esi,%dx */ {
unsigned long nr_reps = get_rep_prefix(true, false);
- unsigned int port = (uint16_t)_regs.edx;
+ unsigned int port = _regs.dx;
+
dst.bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
ea.mem.off = truncate_ea_and_reps(_regs.r(si), nr_reps, dst.bytes);
if ( (rc = ioport_access_check(port, dst.bytes, ctxt, ops)) != 0 )
@@ -3054,7 +3046,7 @@ x86_emulate(
}
case 0x70 ... 0x7f: /* jcc (short) */
- if ( test_cc(b, _regs.eflags) )
+ if ( test_cc(b, _regs._eflags) )
jmp_rel((int32_t)src.val);
break;
@@ -3077,7 +3069,7 @@ x86_emulate(
case 0xa8 ... 0xa9: /* test imm,%%eax */
case 0x84 ... 0x85: test: /* test */
- emulate_2op_SrcV("test", src, dst, _regs.eflags);
+ emulate_2op_SrcV("test", src, dst, _regs._eflags);
dst.type = OP_NONE;
break;
@@ -3173,25 +3165,19 @@ x86_emulate(
case 0x98: /* cbw/cwde/cdqe */
switch ( op_bytes )
{
- case 2: *(int16_t *)&_regs.eax = (int8_t)_regs.eax; break; /* cbw */
- case 4: _regs.eax = (uint32_t)(int16_t)_regs.eax; break; /* cwde */
- case 8: _regs.eax = (int32_t)_regs.eax; break; /* cdqe */
+ case 2: _regs.ax = (int8_t)_regs.al; break; /* cbw */
+ case 4: _regs.r(ax) = (uint32_t)(int16_t)_regs.ax; break; /* cwde */
+ case 8: _regs.r(ax) = (int32_t)_regs._eax; break; /* cdqe */
}
break;
case 0x99: /* cwd/cdq/cqo */
switch ( op_bytes )
{
- case 2:
- *(int16_t *)&_regs.edx = ((int16_t)_regs.eax < 0) ? -1 : 0;
- break;
- case 4:
- _regs.edx = (uint32_t)(((int32_t)_regs.eax < 0) ? -1 : 0);
- break;
-#ifdef __x86_64__ /* compile warning with some versions of 32-bit gcc */
- case 8:
- _regs.rdx = ((int64_t)_regs.rax < 0) ? -1 : 0;
- break;
+ case 2: _regs.dx = -((int16_t)_regs.ax < 0); break;
+ case 4: _regs.r(dx) = (uint32_t)-((int32_t)_regs._eax < 0); break;
+#ifdef __x86_64__
+ case 8: _regs.rdx = -((int64_t)_regs.rax < 0); break;
#endif
}
break;
@@ -3224,8 +3210,8 @@ x86_emulate(
break;
case 0x9c: /* pushf */
- generate_exception_if((_regs.eflags & EFLG_VM) &&
- MASK_EXTR(_regs.eflags, EFLG_IOPL) != 3,
+ generate_exception_if((_regs._eflags & EFLG_VM) &&
+ MASK_EXTR(_regs._eflags, EFLG_IOPL) != 3,
EXC_GP, 0);
src.val = _regs.r(flags) & ~(EFLG_VM | EFLG_RF);
goto push;
@@ -3235,8 +3221,8 @@ x86_emulate(
if ( !mode_ring0() )
{
- generate_exception_if((_regs.eflags & EFLG_VM) &&
- MASK_EXTR(_regs.eflags, EFLG_IOPL) != 3,
+ generate_exception_if((_regs._eflags & EFLG_VM) &&
+ MASK_EXTR(_regs._eflags, EFLG_IOPL) != 3,
EXC_GP, 0);
mask |= EFLG_IOPL;
if ( !mode_iopl() )
@@ -3249,24 +3235,23 @@ x86_emulate(
&dst.val, op_bytes, ctxt, ops)) != 0 )
goto done;
if ( op_bytes == 2 )
- dst.val = (uint16_t)dst.val | (_regs.eflags & 0xffff0000u);
+ dst.val = (uint16_t)dst.val | (_regs._eflags & 0xffff0000u);
dst.val &= EFLAGS_MODIFIABLE;
- _regs.eflags &= mask;
- _regs.eflags |= (uint32_t)(dst.val & ~mask) | EFLG_MBS;
+ _regs._eflags &= mask;
+ _regs._eflags |= (dst.val & ~mask) | EFLG_MBS;
break;
}
case 0x9e: /* sahf */
if ( mode_64bit() )
vcpu_must_have(lahf_lm);
- *(uint8_t *)&_regs.eflags = (((uint8_t *)&_regs.eax)[1] &
- EFLAGS_MASK) | EFLG_MBS;
+ *(uint8_t *)&_regs._eflags = (_regs.ah & EFLAGS_MASK) | EFLG_MBS;
break;
case 0x9f: /* lahf */
if ( mode_64bit() )
vcpu_must_have(lahf_lm);
- ((uint8_t *)&_regs.eax)[1] = (_regs.eflags & EFLAGS_MASK) | EFLG_MBS;
+ _regs.ah = (_regs._eflags & EFLAGS_MASK) | EFLG_MBS;
break;
case 0xa4 ... 0xa5: /* movs */ {
@@ -3309,9 +3294,9 @@ x86_emulate(
register_address_adjust(_regs.r(di), src.bytes);
put_rep_prefix(1);
/* cmp: dst - src ==> src=*%%edi,dst=*%%esi ==> *%%esi - *%%edi */
- emulate_2op_SrcV("cmp", src, dst, _regs.eflags);
- if ( (repe_prefix() && !(_regs.eflags & EFLG_ZF)) ||
- (repne_prefix() && (_regs.eflags & EFLG_ZF)) )
+ emulate_2op_SrcV("cmp", src, dst, _regs._eflags);
+ if ( (repe_prefix() && !(_regs._eflags & EFLG_ZF)) ||
+ (repne_prefix() && (_regs._eflags & EFLG_ZF)) )
_regs.r(ip) = next_eip;
break;
}
@@ -3359,9 +3344,9 @@ x86_emulate(
put_rep_prefix(1);
/* cmp: %%eax - *%%edi ==> src=%%eax,dst=*%%edi ==> src - dst */
dst.bytes = src.bytes;
- emulate_2op_SrcV("cmp", dst, src, _regs.eflags);
- if ( (repe_prefix() && !(_regs.eflags & EFLG_ZF)) ||
- (repne_prefix() && (_regs.eflags & EFLG_ZF)) )
+ emulate_2op_SrcV("cmp", dst, src, _regs._eflags);
+ if ( (repe_prefix() && !(_regs._eflags & EFLG_ZF)) ||
+ (repne_prefix() && (_regs._eflags & EFLG_ZF)) )
_regs.r(ip) = next_eip;
break;
}
@@ -3382,26 +3367,26 @@ x86_emulate(
switch ( modrm_reg & 7 )
{
case 0: /* rol */
- emulate_2op_SrcB("rol", src, dst, _regs.eflags);
+ emulate_2op_SrcB("rol", src, dst, _regs._eflags);
break;
case 1: /* ror */
- emulate_2op_SrcB("ror", src, dst, _regs.eflags);
+ emulate_2op_SrcB("ror", src, dst, _regs._eflags);
break;
case 2: /* rcl */
- emulate_2op_SrcB("rcl", src, dst, _regs.eflags);
+ emulate_2op_SrcB("rcl", src, dst, _regs._eflags);
break;
case 3: /* rcr */
- emulate_2op_SrcB("rcr", src, dst, _regs.eflags);
+ emulate_2op_SrcB("rcr", src, dst, _regs._eflags);
break;
case 4: /* sal/shl */
case 6: /* sal/shl */
- emulate_2op_SrcB("sal", src, dst, _regs.eflags);
+ emulate_2op_SrcB("sal", src, dst, _regs._eflags);
break;
case 5: /* shr */
- emulate_2op_SrcB("shr", src, dst, _regs.eflags);
+ emulate_2op_SrcB("shr", src, dst, _regs._eflags);
break;
case 7: /* sar */
- emulate_2op_SrcB("sar", src, dst, _regs.eflags);
+ emulate_2op_SrcB("sar", src, dst, _regs._eflags);
break;
}
break;
@@ -3468,12 +3453,10 @@ x86_emulate(
case 0xc9: /* leave */
/* First writeback, to %%esp. */
dst.bytes = (mode_64bit() && (op_bytes == 4)) ? 8 : op_bytes;
- switch ( dst.bytes )
- {
- case 2: *(uint16_t *)&_regs.esp = (uint16_t)_regs.ebp; break;
- case 4: _regs.esp = (uint32_t)_regs.ebp; break; /* 64b: zero-ext */
- case 8: _regs.esp = _regs.ebp; break;
- }
+ if ( dst.bytes == 2 )
+ _regs.sp = _regs.bp;
+ else
+ _regs.r(sp) = dst.bytes == 4 ? _regs._ebp : _regs.r(bp);
/* Second writeback, to %%ebp. */
dst.type = OP_REG;
@@ -3509,7 +3492,7 @@ x86_emulate(
case 0xce: /* into */
generate_exception_if(mode_64bit(), EXC_UD);
- if ( !(_regs.eflags & EFLG_OF) )
+ if ( !(_regs._eflags & EFLG_OF) )
break;
src.val = EXC_OF;
swint_type = x86_swint_into;
@@ -3528,10 +3511,10 @@ x86_emulate(
&eflags, op_bytes, ctxt, ops)) )
goto done;
if ( op_bytes == 2 )
- eflags = (uint16_t)eflags | (_regs.eflags & 0xffff0000u);
+ eflags = (uint16_t)eflags | (_regs._eflags & 0xffff0000u);
eflags &= EFLAGS_MODIFIABLE;
- _regs.eflags &= mask;
- _regs.eflags |= (eflags & ~mask) | EFLG_MBS;
+ _regs._eflags &= mask;
+ _regs._eflags |= (eflags & ~mask) | EFLG_MBS;
if ( (rc = load_seg(x86_seg_cs, sel, 1, &cs, ctxt, ops)) ||
(rc = commit_far_branch(&cs, (uint32_t)eip)) )
goto done;
@@ -3543,7 +3526,7 @@ x86_emulate(
goto grp2;
case 0xd2 ... 0xd3: /* Grp2 */
- src.val = _regs.ecx;
+ src.val = _regs.cl;
goto grp2;
case 0xd4: /* aam */
@@ -3553,35 +3536,36 @@ x86_emulate(
generate_exception_if(mode_64bit(), EXC_UD);
if ( b & 0x01 )
{
- uint16_t ax = _regs.eax;
+ uint16_t ax = _regs.ax;
- *(uint16_t *)&_regs.eax = (uint8_t)(ax + ((ax >> 8) * base));
+ _regs.ax = (uint8_t)(ax + ((ax >> 8) * base));
}
else
{
- uint8_t al = _regs.eax;
+ uint8_t al = _regs.al;
generate_exception_if(!base, EXC_DE);
- *(uint16_t *)&_regs.eax = ((al / base) << 8) | (al % base);
+ _regs.ax = ((al / base) << 8) | (al % base);
}
- _regs.eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
- _regs.eflags |= ((uint8_t)_regs.eax == 0) ? EFLG_ZF : 0;
- _regs.eflags |= (( int8_t)_regs.eax < 0) ? EFLG_SF : 0;
- _regs.eflags |= even_parity(_regs.eax) ? EFLG_PF : 0;
+ _regs._eflags &= ~(EFLG_SF|EFLG_ZF|EFLG_PF);
+ _regs._eflags |= !_regs.al ? EFLG_ZF : 0;
+ _regs._eflags |= ((int8_t)_regs.al < 0) ? EFLG_SF : 0;
+ _regs._eflags |= even_parity(_regs.al) ? EFLG_PF : 0;
break;
}
case 0xd6: /* salc */
generate_exception_if(mode_64bit(), EXC_UD);
- *(uint8_t *)&_regs.eax = (_regs.eflags & EFLG_CF) ? 0xff : 0x00;
+ _regs.al = (_regs._eflags & EFLG_CF) ? 0xff : 0x00;
break;
case 0xd7: /* xlat */ {
- unsigned long al = (uint8_t)_regs.eax;
- if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.ebx + al),
+ unsigned long al;
+
+ if ( (rc = read_ulong(ea.mem.seg, truncate_ea(_regs.r(bx) + _regs.al),
&al, 1, ctxt, ops)) != 0 )
goto done;
- *(uint8_t *)&_regs.eax = al;
+ _regs.al = al;
break;
}
@@ -3998,7 +3982,7 @@ x86_emulate(
/* fnstsw %ax */
dst.bytes = 2;
dst.type = OP_REG;
- dst.reg = (unsigned long *)&_regs.eax;
+ dst.reg = (void *)&_regs.ax;
emulate_fpu_insn_memdst("fnstsw", dst.val);
break;
case 0xe8 ... 0xef: /* fucomip %stN */
@@ -4070,7 +4054,7 @@ x86_emulate(
case 0xe0 ... 0xe2: /* loop{,z,nz} */ {
unsigned long count = get_loop_count(&_regs, ad_bytes);
- int do_jmp = !(_regs.eflags & EFLG_ZF); /* loopnz */
+ int do_jmp = !(_regs._eflags & EFLG_ZF); /* loopnz */
if ( b == 0xe1 )
do_jmp = !do_jmp; /* loopz */
@@ -4095,8 +4079,7 @@ x86_emulate(
case 0xed: /* in %dx,%eax */
case 0xee: /* out %al,%dx */
case 0xef: /* out %eax,%dx */ {
- unsigned int port = ((b < 0xe8) ? (uint8_t)src.val
- : (uint16_t)_regs.edx);
+ unsigned int port = ((b < 0xe8) ? (uint8_t)src.val : _regs.dx);
op_bytes = !(b & 1) ? 1 : (op_bytes == 8) ? 4 : op_bytes;
if ( (rc = ioport_access_check(port, op_bytes, ctxt, ops)) != 0 )
@@ -4105,7 +4088,7 @@ x86_emulate(
{
/* out */
fail_if(ops->write_io == NULL);
- rc = ops->write_io(port, op_bytes, _regs.eax, ctxt);
+ rc = ops->write_io(port, op_bytes, _regs._eax, ctxt);
}
else
{
@@ -4156,7 +4139,7 @@ x86_emulate(
break;
case 0xf5: /* cmc */
- _regs.eflags ^= EFLG_CF;
+ _regs._eflags ^= EFLG_CF;
break;
case 0xf6 ... 0xf7: /* Grp3 */
@@ -4173,25 +4156,25 @@ x86_emulate(
dst.val = ~dst.val;
break;
case 3: /* neg */
- emulate_1op("neg", dst, _regs.eflags);
+ emulate_1op("neg", dst, _regs._eflags);
break;
case 4: /* mul */
- _regs.eflags &= ~(EFLG_OF|EFLG_CF);
+ _regs._eflags &= ~(EFLG_OF|EFLG_CF);
switch ( dst.bytes )
{
case 1:
- dst.val = (uint8_t)_regs.eax;
+ dst.val = _regs.al;
dst.val *= src.val;
if ( (uint8_t)dst.val != (uint16_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
dst.bytes = 2;
break;
case 2:
- dst.val = (uint16_t)_regs.eax;
+ dst.val = _regs.ax;
dst.val *= src.val;
if ( (uint16_t)dst.val != (uint32_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
- *(uint16_t *)&_regs.edx = dst.val >> 16;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
+ _regs.dx = dst.val >> 16;
break;
#ifdef __x86_64__
case 4:
@@ -4206,7 +4189,7 @@ x86_emulate(
u[0] = src.val;
u[1] = _regs.r(ax);
if ( mul_dbl(u) )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
_regs.r(dx) = u[1];
dst.val = u[0];
break;
@@ -4214,23 +4197,23 @@ x86_emulate(
break;
case 5: /* imul */
imul:
- _regs.eflags &= ~(EFLG_OF|EFLG_CF);
+ _regs._eflags &= ~(EFLG_OF|EFLG_CF);
switch ( dst.bytes )
{
case 1:
- dst.val = (int8_t)src.val * (int8_t)_regs.eax;
+ dst.val = (int8_t)src.val * (int8_t)_regs.al;
if ( (int8_t)dst.val != (int16_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
ASSERT(b > 0x6b);
dst.bytes = 2;
break;
case 2:
dst.val = ((uint32_t)(int16_t)src.val *
- (uint32_t)(int16_t)_regs.eax);
+ (uint32_t)(int16_t)_regs.ax);
if ( (int16_t)dst.val != (int32_t)dst.val )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
if ( b > 0x6b )
- *(uint16_t *)&_regs.edx = dst.val >> 16;
+ _regs.dx = dst.val >> 16;
break;
#ifdef __x86_64__
case 4:
@@ -4246,7 +4229,7 @@ x86_emulate(
u[0] = src.val;
u[1] = _regs.r(ax);
if ( imul_dbl(u) )
- _regs.eflags |= EFLG_OF|EFLG_CF;
+ _regs._eflags |= EFLG_OF|EFLG_CF;
if ( b > 0x6b )
_regs.r(dx) = u[1];
dst.val = u[0];
@@ -4257,24 +4240,24 @@ x86_emulate(
switch ( src.bytes )
{
case 1:
- u[0] = (uint16_t)_regs.eax;
+ u[0] = _regs.ax;
u[1] = 0;
v = (uint8_t)src.val;
generate_exception_if(
div_dbl(u, v) || ((uint8_t)u[0] != (uint16_t)u[0]),
EXC_DE);
dst.val = (uint8_t)u[0];
- ((uint8_t *)&_regs.eax)[1] = u[1];
+ _regs.ah = u[1];
break;
case 2:
- u[0] = ((uint32_t)_regs.edx << 16) | (uint16_t)_regs.eax;
+ u[0] = (_regs._edx << 16) | _regs.ax;
u[1] = 0;
v = (uint16_t)src.val;
generate_exception_if(
div_dbl(u, v) || ((uint16_t)u[0] != (uint32_t)u[0]),
EXC_DE);
dst.val = (uint16_t)u[0];
- *(uint16_t *)&_regs.edx = u[1];
+ _regs.dx = u[1];
break;
#ifdef __x86_64__
case 4:
@@ -4302,24 +4285,24 @@ x86_emulate(
switch ( src.bytes )
{
case 1:
- u[0] = (int16_t)_regs.eax;
+ u[0] = (int16_t)_regs.ax;
u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
v = (int8_t)src.val;
generate_exception_if(
idiv_dbl(u, v) || ((int8_t)u[0] != (int16_t)u[0]),
EXC_DE);
dst.val = (int8_t)u[0];
- ((int8_t *)&_regs.eax)[1] = u[1];
+ _regs.ah = u[1];
break;
case 2:
- u[0] = (int32_t)((_regs.edx << 16) | (uint16_t)_regs.eax);
+ u[0] = (int32_t)((_regs._edx << 16) | _regs.ax);
u[1] = ((long)u[0] < 0) ? ~0UL : 0UL;
v = (int16_t)src.val;
generate_exception_if(
idiv_dbl(u, v) || ((int16_t)u[0] != (int32_t)u[0]),
EXC_DE);
dst.val = (int16_t)u[0];
- *(int16_t *)&_regs.edx = u[1];
+ _regs.dx = u[1];
break;
#ifdef __x86_64__
case 4:
@@ -4347,33 +4330,33 @@ x86_emulate(
break;
case 0xf8: /* clc */
- _regs.eflags &= ~EFLG_CF;
+ _regs._eflags &= ~EFLG_CF;
break;
case 0xf9: /* stc */
- _regs.eflags |= EFLG_CF;
+ _regs._eflags |= EFLG_CF;
break;
case 0xfa: /* cli */
generate_exception_if(!mode_iopl(), EXC_GP, 0);
- _regs.eflags &= ~EFLG_IF;
+ _regs._eflags &= ~EFLG_IF;
break;
case 0xfb: /* sti */
generate_exception_if(!mode_iopl(), EXC_GP, 0);
- if ( !(_regs.eflags & EFLG_IF) )
+ if ( !(_regs._eflags & EFLG_IF) )
{
- _regs.eflags |= EFLG_IF;
+ _regs._eflags |= EFLG_IF;
ctxt->retire.sti = true;
}
break;
case 0xfc: /* cld */
- _regs.eflags &= ~EFLG_DF;
+ _regs._eflags &= ~EFLG_DF;
break;
case 0xfd: /* std */
- _regs.eflags |= EFLG_DF;
+ _regs._eflags |= EFLG_DF;
break;
case 0xfe: /* Grp4 */
@@ -4383,10 +4366,10 @@ x86_emulate(
switch ( modrm_reg & 7 )
{
case 0: /* inc */
- emulate_1op("inc", dst, _regs.eflags);
+ emulate_1op("inc", dst, _regs._eflags);
break;
case 1: /* dec */
- emulate_1op("dec", dst, _regs.eflags);
+ emulate_1op("dec", dst, _regs._eflags);
break;
case 2: /* call (near) */
dst.val = _regs.r(ip);
@@ -4433,7 +4416,7 @@ x86_emulate(
goto done;
break;
case 4: /* verr / verw */
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
switch ( rc = protmode_load_seg(x86_seg_none, src.val, false,
&sreg, ctxt, ops) )
{
@@ -4441,7 +4424,7 @@ x86_emulate(
if ( sreg.attr.fields.s &&
((modrm_reg & 1) ? ((sreg.attr.fields.type & 0xa) == 0x2)
: ((sreg.attr.fields.type & 0xa) != 0x8)) )
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
case X86EMUL_EXCEPTION:
if ( ctxt->event_pending )
@@ -4472,9 +4455,9 @@ x86_emulate(
generate_exception_if(lock_prefix || vex.pfx || !mode_ring0(),
EXC_UD);
- _regs.eflags &= ~EFLG_AC;
+ _regs._eflags &= ~EFLG_AC;
if ( modrm == 0xcb )
- _regs.eflags |= EFLG_AC;
+ _regs._eflags |= EFLG_AC;
goto no_writeback;
#ifdef __XEN__
@@ -4513,7 +4496,7 @@ x86_emulate(
generate_exception_if(!vcpu_has_rtm() && !vcpu_has_hle(),
EXC_UD);
/* Neither HLE nor RTM can be active when we get here. */
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
goto no_writeback;
case 0xdf: /* invlpga */
@@ -4540,8 +4523,8 @@ x86_emulate(
unsigned int eax = 1, ebx = 0, dummy = 0;
unsigned long zero = 0;
- base = ad_bytes == 8 ? _regs.eax :
- ad_bytes == 4 ? (uint32_t)_regs.eax : (uint16_t)_regs.eax;
+ base = ad_bytes == 8 ? _regs.r(ax) :
+ ad_bytes == 4 ? _regs._eax : _regs.ax;
limit = 0;
if ( vcpu_has_clflush() &&
ops->cpuid(&eax, &ebx, &dummy, &dummy, ctxt) == X86EMUL_OKAY )
@@ -4665,7 +4648,7 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0x02): /* lar */
generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
ctxt, ops) )
{
@@ -4685,12 +4668,12 @@ x86_emulate(
case 0x09: /* available 32/64-bit TSS */
case 0x0b: /* busy 32/64-bit TSS */
case 0x0c: /* 32/64-bit call gate */
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
}
}
else
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
case X86EMUL_EXCEPTION:
if ( ctxt->event_pending )
@@ -4703,7 +4686,7 @@ x86_emulate(
rc = X86EMUL_OKAY;
break;
}
- if ( _regs.eflags & EFLG_ZF )
+ if ( _regs._eflags & EFLG_ZF )
dst.val = ((sreg.attr.bytes & 0xff) << 8) |
((sreg.limit >> (sreg.attr.fields.g ? 12 : 0)) &
0xf0000) |
@@ -4714,7 +4697,7 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0x03): /* lsl */
generate_exception_if(!in_protmode(ctxt, ops), EXC_UD);
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
switch ( rc = protmode_load_seg(x86_seg_none, src.val, false, &sreg,
ctxt, ops) )
{
@@ -4731,12 +4714,12 @@ x86_emulate(
case 0x02: /* LDT */
case 0x09: /* available 32/64-bit TSS */
case 0x0b: /* busy 32/64-bit TSS */
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
}
}
else
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
break;
case X86EMUL_EXCEPTION:
if ( ctxt->event_pending )
@@ -4749,7 +4732,7 @@ x86_emulate(
rc = X86EMUL_OKAY;
break;
}
- if ( _regs.eflags & EFLG_ZF )
+ if ( _regs._eflags & EFLG_ZF )
dst.val = sreg.limit;
else
dst.type = OP_NONE;
@@ -4785,7 +4768,7 @@ x86_emulate(
cs.attr.bytes = 0xa9b; /* L+DB+P+S+Code */
_regs.rcx = _regs.rip;
- _regs.r11 = _regs.eflags & ~EFLG_RF;
+ _regs.r11 = _regs._eflags & ~EFLG_RF;
if ( (rc = ops->read_msr(mode_64bit() ? MSR_LSTAR : MSR_CSTAR,
&msr_content, ctxt)) != 0 )
@@ -4794,16 +4777,16 @@ x86_emulate(
if ( (rc = ops->read_msr(MSR_FMASK, &msr_content, ctxt)) != 0 )
goto done;
- _regs.eflags &= ~(msr_content | EFLG_RF);
+ _regs._eflags &= ~(msr_content | EFLG_RF);
}
else
#endif
{
cs.attr.bytes = 0xc9b; /* G+DB+P+S+Code */
- _regs.ecx = (uint32_t)_regs.eip;
- _regs.eip = (uint32_t)msr_content;
- _regs.eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+ _regs.r(cx) = _regs._eip;
+ _regs._eip = msr_content;
+ _regs._eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
}
fail_if(ops->write_segment == NULL);
@@ -4826,7 +4809,7 @@ x86_emulate(
* mitigation is to use a task gate for handling #DB (or to not use
* enable EFER.SCE to start with).
*/
- singlestep = _regs.eflags & EFLG_TF;
+ singlestep = _regs._eflags & EFLG_TF;
break;
}
@@ -5009,14 +4992,14 @@ x86_emulate(
goto done;
break;
- case X86EMUL_OPC(0x0f, 0x30): /* wrmsr */ {
- uint64_t val = ((uint64_t)_regs.edx << 32) | (uint32_t)_regs.eax;
+ case X86EMUL_OPC(0x0f, 0x30): /* wrmsr */
generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->write_msr == NULL);
- if ( (rc = ops->write_msr((uint32_t)_regs.ecx, val, ctxt)) != 0 )
+ if ( (rc = ops->write_msr(_regs._ecx,
+ ((uint64_t)_regs.r(dx) << 32) | _regs._eax,
+ ctxt)) != 0 )
goto done;
break;
- }
case X86EMUL_OPC(0x0f, 0x31): rdtsc: /* rdtsc */ {
unsigned long cr4;
@@ -5040,7 +5023,7 @@ x86_emulate(
uint64_t val;
generate_exception_if(!mode_ring0(), EXC_GP, 0);
fail_if(ops->read_msr == NULL);
- if ( (rc = ops->read_msr((uint32_t)_regs.ecx, &val, ctxt)) != 0 )
+ if ( (rc = ops->read_msr(_regs._ecx, &val, ctxt)) != 0 )
goto done;
_regs.r(dx) = val >> 32;
_regs.r(ax) = (uint32_t)val;
@@ -5049,7 +5032,7 @@ x86_emulate(
case X86EMUL_OPC(0x0f, 0x40) ... X86EMUL_OPC(0x0f, 0x4f): /* cmovcc */
vcpu_must_have(cmov);
- if ( test_cc(b, _regs.eflags) )
+ if ( test_cc(b, _regs._eflags) )
dst.val = src.val;
break;
@@ -5070,7 +5053,7 @@ x86_emulate(
if ( lm < 0 )
goto cannot_emulate;
- _regs.eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+ _regs._eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
cs.sel = msr_content & ~3; /* SELECTOR_RPL_MASK */
cs.base = 0; /* flat segment */
@@ -5134,8 +5117,8 @@ x86_emulate(
(rc = ops->write_segment(x86_seg_ss, &sreg, ctxt)) != 0 )
goto done;
- _regs.eip = op_bytes == 8 ? _regs.edx : (uint32_t)_regs.edx;
- _regs.esp = op_bytes == 8 ? _regs.ecx : (uint32_t)_regs.ecx;
+ _regs.r(ip) = op_bytes == 8 ? _regs.r(dx) : _regs._edx;
+ _regs.r(sp) = op_bytes == 8 ? _regs.r(cx) : _regs._ecx;
break;
}
@@ -5269,17 +5252,18 @@ x86_emulate(
}
case X86EMUL_OPC(0x0f, 0x80) ... X86EMUL_OPC(0x0f, 0x8f): /* jcc (near) */
- if ( test_cc(b, _regs.eflags) )
+ if ( test_cc(b, _regs._eflags) )
jmp_rel((int32_t)src.val);
break;
case X86EMUL_OPC(0x0f, 0x90) ... X86EMUL_OPC(0x0f, 0x9f): /* setcc */
- dst.val = test_cc(b, _regs.eflags);
+ dst.val = test_cc(b, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xa2): /* cpuid */ {
- unsigned int eax = _regs.eax, ebx = _regs.ebx;
- unsigned int ecx = _regs.ecx, edx = _regs.edx;
+ unsigned int eax = _regs._eax, ebx = _regs._ebx;
+ unsigned int ecx = _regs._ecx, edx = _regs._edx;
+
fail_if(ops->cpuid == NULL);
rc = ops->cpuid(&eax, &ebx, &ecx, &edx, ctxt);
generate_exception_if(rc == X86EMUL_EXCEPTION,
@@ -5294,7 +5278,7 @@ x86_emulate(
}
case X86EMUL_OPC(0x0f, 0xa3): bt: /* bt */
- emulate_2op_SrcV_nobyte("bt", src, dst, _regs.eflags);
+ emulate_2op_SrcV_nobyte("bt", src, dst, _regs._eflags);
dst.type = OP_NONE;
break;
@@ -5306,7 +5290,7 @@ x86_emulate(
generate_exception_if(lock_prefix, EXC_UD);
if ( b & 1 )
- shift = _regs.ecx;
+ shift = _regs.cl;
else
{
shift = src.val;
@@ -5325,19 +5309,19 @@ x86_emulate(
((dst.orig_val << shift) |
((src.val >> (width - shift)) & ((1ull << shift) - 1))));
dst.val = truncate_word(dst.val, dst.bytes);
- _regs.eflags &= ~(EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_PF|EFLG_CF);
+ _regs._eflags &= ~(EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_PF|EFLG_CF);
if ( (dst.val >> ((b & 8) ? (shift - 1) : (width - shift))) & 1 )
- _regs.eflags |= EFLG_CF;
+ _regs._eflags |= EFLG_CF;
if ( ((dst.val ^ dst.orig_val) >> (width - 1)) & 1 )
- _regs.eflags |= EFLG_OF;
- _regs.eflags |= ((dst.val >> (width - 1)) & 1) ? EFLG_SF : 0;
- _regs.eflags |= (dst.val == 0) ? EFLG_ZF : 0;
- _regs.eflags |= even_parity(dst.val) ? EFLG_PF : 0;
+ _regs._eflags |= EFLG_OF;
+ _regs._eflags |= ((dst.val >> (width - 1)) & 1) ? EFLG_SF : 0;
+ _regs._eflags |= (dst.val == 0) ? EFLG_ZF : 0;
+ _regs._eflags |= even_parity(dst.val) ? EFLG_PF : 0;
break;
}
case X86EMUL_OPC(0x0f, 0xab): bts: /* bts */
- emulate_2op_SrcV_nobyte("bts", src, dst, _regs.eflags);
+ emulate_2op_SrcV_nobyte("bts", src, dst, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xae): case X86EMUL_OPC_66(0x0f, 0xae): /* Grp15 */
@@ -5399,7 +5383,7 @@ x86_emulate(
}
case X86EMUL_OPC(0x0f, 0xaf): /* imul */
- emulate_2op_SrcV_srcmem("imul", src, dst, _regs.eflags);
+ emulate_2op_SrcV_srcmem("imul", src, dst, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xb0): case X86EMUL_OPC(0x0f, 0xb1): /* cmpxchg */
@@ -5407,8 +5391,8 @@ x86_emulate(
src.orig_val = src.val;
src.val = _regs.r(ax);
/* cmp: %%eax - dst ==> dst and src swapped for macro invocation */
- emulate_2op_SrcV("cmp", dst, src, _regs.eflags);
- if ( _regs.eflags & EFLG_ZF )
+ emulate_2op_SrcV("cmp", dst, src, _regs._eflags);
+ if ( _regs._eflags & EFLG_ZF )
{
/* Success: write back to memory. */
dst.val = src.orig_val;
@@ -5428,7 +5412,7 @@ x86_emulate(
goto les;
case X86EMUL_OPC(0x0f, 0xb3): btr: /* btr */
- emulate_2op_SrcV_nobyte("btr", src, dst, _regs.eflags);
+ emulate_2op_SrcV_nobyte("btr", src, dst, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xb6): /* movzx rm8,r{16,32,64} */
@@ -5454,7 +5438,7 @@ x86_emulate(
break;
case X86EMUL_OPC(0x0f, 0xbb): btc: /* btc */
- emulate_2op_SrcV_nobyte("btc", src, dst, _regs.eflags);
+ emulate_2op_SrcV_nobyte("btc", src, dst, _regs._eflags);
break;
case X86EMUL_OPC(0x0f, 0xbc): /* bsf or tzcnt */
@@ -5464,21 +5448,21 @@ x86_emulate(
asm ( "bsf %2,%0" ASM_FLAG_OUT(, "; setz %1")
: "=r" (dst.val), ASM_FLAG_OUT("=@ccz", "=qm") (zf)
: "rm" (src.val) );
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
if ( (vex.pfx == vex_f3) && vcpu_has_bmi1() )
{
- _regs.eflags &= ~EFLG_CF;
+ _regs._eflags &= ~EFLG_CF;
if ( zf )
{
- _regs.eflags |= EFLG_CF;
+ _regs._eflags |= EFLG_CF;
dst.val = op_bytes * 8;
}
else if ( !dst.val )
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
}
else if ( zf )
{
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
dst.type = OP_NONE;
}
break;
@@ -5491,25 +5475,25 @@ x86_emulate(
asm ( "bsr %2,%0" ASM_FLAG_OUT(, "; setz %1")
: "=r" (dst.val), ASM_FLAG_OUT("=@ccz", "=qm") (zf)
: "rm" (src.val) );
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
if ( (vex.pfx == vex_f3) && vcpu_has_lzcnt() )
{
- _regs.eflags &= ~EFLG_CF;
+ _regs._eflags &= ~EFLG_CF;
if ( zf )
{
- _regs.eflags |= EFLG_CF;
+ _regs._eflags |= EFLG_CF;
dst.val = op_bytes * 8;
}
else
{
dst.val = op_bytes * 8 - 1 - dst.val;
if ( !dst.val )
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
}
}
else if ( zf )
{
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
dst.type = OP_NONE;
}
break;
@@ -5577,8 +5561,8 @@ x86_emulate(
/* Get expected value. */
if ( !(rex_prefix & REX_W) )
{
- aux->u32[0] = _regs.eax;
- aux->u32[1] = _regs.edx;
+ aux->u32[0] = _regs._eax;
+ aux->u32[1] = _regs._edx;
}
else
{
@@ -5591,7 +5575,7 @@ x86_emulate(
/* Expected != actual: store actual to rDX:rAX and clear ZF. */
_regs.r(ax) = !(rex_prefix & REX_W) ? old->u32[0] : old->u64[0];
_regs.r(dx) = !(rex_prefix & REX_W) ? old->u32[1] : old->u64[1];
- _regs.eflags &= ~EFLG_ZF;
+ _regs._eflags &= ~EFLG_ZF;
}
else
{
@@ -5601,8 +5585,8 @@ x86_emulate(
*/
if ( !(rex_prefix & REX_W) )
{
- aux->u32[0] = _regs.ebx;
- aux->u32[1] = _regs.ecx;
+ aux->u32[0] = _regs._ebx;
+ aux->u32[1] = _regs._ecx;
}
else
{
@@ -5613,7 +5597,7 @@ x86_emulate(
if ( (rc = ops->cmpxchg(ea.mem.seg, ea.mem.off, old, aux,
op_bytes, ctxt)) != X86EMUL_OKAY )
goto done;
- _regs.eflags |= EFLG_ZF;
+ _regs._eflags |= EFLG_ZF;
}
break;
}
@@ -5733,7 +5717,7 @@ x86_emulate(
/* Zero the upper 32 bits of %rip if not in 64-bit mode. */
if ( !mode_64bit() )
- _regs.eip = (uint32_t)_regs.eip;
+ _regs.r(ip) = _regs._eip;
/* Should a singlestep #DB be raised? */
if ( rc == X86EMUL_OKAY )
@@ -5747,7 +5731,7 @@ x86_emulate(
rc = X86EMUL_OKAY;
}
- ctxt->regs->eflags &= ~EFLG_RF;
+ ctxt->regs->_eflags &= ~EFLG_RF;
done:
_put_fpu();
@@ -30,7 +30,7 @@ headers-$(CONFIG_X86) += compat/hvm/
headers-y += compat/arch-$(compat-arch-y).h compat/pmu.h compat/xlat.h
headers-$(CONFIG_FLASK) += compat/xsm/flask_op.h
-cppflags-y := -include public/xen-compat.h
+cppflags-y := -include public/xen-compat.h -DXEN_GENERATING_COMPAT_HEADERS
cppflags-$(CONFIG_X86) += -m32
# 8-byte types are 4-byte aligned on x86_32 ...
@@ -109,22 +109,44 @@
#ifndef __ASSEMBLY__
+#if defined(XEN_GENERATING_COMPAT_HEADERS)
+/* nothing */
+#elif defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax). */
+#define __DECL_REG_LO8(which) union { \
+ uint32_t e ## which ## x, _e ## which ## x; \
+ uint16_t which ## x; \
+ struct { \
+ uint8_t which ## l; \
+ uint8_t which ## h; \
+ }; \
+}
+#define __DECL_REG_LO16(name) union { \
+ uint32_t e ## name, _e ## name; \
+ uint16_t name; \
+}
+#else
+/* Other sources must always use the proper 32-bit name (e.g., eax). */
+#define __DECL_REG_LO8(which) uint32_t e ## which ## x
+#define __DECL_REG_LO16(name) uint32_t e ## name
+#endif
+
struct cpu_user_regs {
- uint32_t ebx;
- uint32_t ecx;
- uint32_t edx;
- uint32_t esi;
- uint32_t edi;
- uint32_t ebp;
- uint32_t eax;
+ __DECL_REG_LO8(b);
+ __DECL_REG_LO8(c);
+ __DECL_REG_LO8(d);
+ __DECL_REG_LO16(si);
+ __DECL_REG_LO16(di);
+ __DECL_REG_LO16(bp);
+ __DECL_REG_LO8(a);
uint16_t error_code; /* private */
uint16_t entry_vector; /* private */
- uint32_t eip;
+ __DECL_REG_LO16(ip);
uint16_t cs;
uint8_t saved_upcall_mask;
uint8_t _pad0;
- uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
- uint32_t esp;
+ __DECL_REG_LO16(flags); /* eflags.IF == !saved_upcall_mask */
+ __DECL_REG_LO16(sp);
uint16_t ss, _pad1;
uint16_t es, _pad2;
uint16_t ds, _pad3;
@@ -134,6 +156,9 @@ struct cpu_user_regs {
typedef struct cpu_user_regs cpu_user_regs_t;
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
+
/*
* Page-directory addresses above 4GB do not fit into architectural %cr3.
* When accessing %cr3, or equivalent field in vcpu_guest_context, guests
@@ -130,7 +130,35 @@ struct iret_context {
/* Bottom of iret stack frame. */
};
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Anonymous unions include all permissible names (e.g., al/ah/ax/eax/rax). */
+#define __DECL_REG_LOHI(which) union { \
+ uint64_t r ## which ## x; \
+ uint32_t _e ## which ## x; \
+ uint16_t which ## x; \
+ struct { \
+ uint8_t which ## l; \
+ uint8_t which ## h; \
+ }; \
+}
+#define __DECL_REG_LO8(name) union { \
+ uint64_t r ## name; \
+ uint32_t _e ## name; \
+ uint16_t name; \
+ uint8_t name ## l; \
+}
+#define __DECL_REG_LO16(name) union { \
+ uint64_t r ## name; \
+ uint32_t _e ## name; \
+ uint16_t name; \
+}
+#define __DECL_REG_HI(num) union { \
+ uint64_t r ## num; \
+ uint32_t r ## num ## d; \
+ uint16_t r ## num ## w; \
+ uint8_t r ## num ## b; \
+}
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
#define __DECL_REG(name) union { \
uint64_t r ## name, e ## name; \
@@ -141,30 +169,37 @@ struct iret_context {
#define __DECL_REG(name) uint64_t r ## name
#endif
+#ifndef __DECL_REG_LOHI
+#define __DECL_REG_LOHI(name) __DECL_REG(name ## x)
+#define __DECL_REG_LO8 __DECL_REG
+#define __DECL_REG_LO16 __DECL_REG
+#define __DECL_REG_HI __DECL_REG
+#endif
+
struct cpu_user_regs {
- uint64_t r15;
- uint64_t r14;
- uint64_t r13;
- uint64_t r12;
- __DECL_REG(bp);
- __DECL_REG(bx);
- uint64_t r11;
- uint64_t r10;
- uint64_t r9;
- uint64_t r8;
- __DECL_REG(ax);
- __DECL_REG(cx);
- __DECL_REG(dx);
- __DECL_REG(si);
- __DECL_REG(di);
+ __DECL_REG_HI(15);
+ __DECL_REG_HI(14);
+ __DECL_REG_HI(13);
+ __DECL_REG_HI(12);
+ __DECL_REG_LO8(bp);
+ __DECL_REG_LOHI(b);
+ __DECL_REG_HI(11);
+ __DECL_REG_HI(10);
+ __DECL_REG_HI(9);
+ __DECL_REG_HI(8);
+ __DECL_REG_LOHI(a);
+ __DECL_REG_LOHI(c);
+ __DECL_REG_LOHI(d);
+ __DECL_REG_LO8(si);
+ __DECL_REG_LO8(di);
uint32_t error_code; /* private */
uint32_t entry_vector; /* private */
- __DECL_REG(ip);
+ __DECL_REG_LO16(ip);
uint16_t cs, _pad0[1];
uint8_t saved_upcall_mask;
uint8_t _pad1[3];
- __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
- __DECL_REG(sp);
+ __DECL_REG_LO16(flags); /* rflags.IF == !saved_upcall_mask */
+ __DECL_REG_LO8(sp);
uint16_t ss, _pad2[3];
uint16_t es, _pad3[3];
uint16_t ds, _pad4[3];
@@ -175,6 +210,10 @@ typedef struct cpu_user_regs cpu_user_re
DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
#undef __DECL_REG
+#undef __DECL_REG_LOHI
+#undef __DECL_REG_LO8
+#undef __DECL_REG_LO16
+#undef __DECL_REG_HI
#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
@@ -57,7 +57,17 @@
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
#if defined(__i386__)
+# ifdef __XEN__
+__DeFiNe__ __DECL_REG_LO8(which) uint32_t _e ## which ## x
+__DeFiNe__ __DECL_REG_LO16(name) union { uint32_t e ## name, _e ## name; }
+# endif
#include "xen-x86_32.h"
+# ifdef __XEN__
+__UnDeF__ __DECL_REG_LO8
+__UnDeF__ __DECL_REG_LO16
+__DeFiNe__ __DECL_REG_LO8(which) _e ## which ## x
+__DeFiNe__ __DECL_REG_LO16(name) _e ## name
+# endif
#elif defined(__x86_64__)
#include "xen-x86_64.h"
#endif
@@ -7,6 +7,8 @@ pats = [
[ r"__IfDeF__ (XEN_HAVE.*)", r"#ifdef \1" ],
[ r"__ElSe__", r"#else" ],
[ r"__EnDif__", r"#endif" ],
+ [ r"__DeFiNe__", r"#define" ],
+ [ r"__UnDeF__", r"#undef" ],
[ r"\"xen-compat.h\"", r"<public/xen-compat.h>" ],
[ r"(struct|union|enum)\s+(xen_?)?(\w)", r"\1 compat_\3" ],
[ r"@KeeP@", r"" ],
Eliminate the mis-naming of 64-bit fields with 32-bit register names (eflags instead of rflags etc). To ensure no piece of code was missed, transiently use the underscore prefixed names only for 32-bit register accesses. This will be cleaned up subsequently. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- This is part II of the register renaming project. My intention is for things to stay this way for a couple of weeks, so people having patches in progress can update accordingly before we switch from the _e* names to the proper e* ones.