Message ID | 58B58B06020000780013E3EB@prv-mh.provo.novell.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
> -----Original Message----- > From: Jan Beulich [mailto:JBeulich@suse.com] > Sent: 28 February 2017 13:37 > To: xen-devel <xen-devel@lists.xenproject.org> > Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>; Paul Durrant > <Paul.Durrant@citrix.com>; George Dunlap <George.Dunlap@citrix.com> > Subject: [PATCH 4/8] x86/HVMemul: switch away from temporary 32-bit > register names > > Signed-off-by: Jan Beulich <jbeulich@suse.com> > Reviewed-by: Paul Durrant <paul.durrant@citrix.com> > --- a/xen/arch/x86/hvm/emulate.c > +++ b/xen/arch/x86/hvm/emulate.c > @@ -442,7 +442,7 @@ static int hvmemul_linear_to_phys( > } > > /* Reverse mode if this is a backwards multi-iteration string operation. */ > - reverse = (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && > (*reps > 1); > + reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && > (*reps > 1); > > if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) ) > { > @@ -539,7 +539,7 @@ static int hvmemul_virtual_to_linear( > if ( IS_ERR(reg) ) > return -PTR_ERR(reg); > > - if ( (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1) > ) > + if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) ) > { > /* > * x86_emulate() clips the repetition count to ensure we don't wrap > @@ -1085,7 +1085,7 @@ static int hvmemul_rep_ins( > return X86EMUL_UNHANDLEABLE; > > return hvmemul_do_pio_addr(src_port, reps, bytes_per_rep, > IOREQ_READ, > - !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa); > + !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa); > } > > static int hvmemul_rep_outs_set_context( > @@ -1154,7 +1154,7 @@ static int hvmemul_rep_outs( > return X86EMUL_UNHANDLEABLE; > > return hvmemul_do_pio_addr(dst_port, reps, bytes_per_rep, > IOREQ_WRITE, > - !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa); > + !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa); > } > > static int hvmemul_rep_movs( > @@ -1173,7 +1173,7 @@ static int hvmemul_rep_movs( > paddr_t sgpa, dgpa; > uint32_t pfec = PFEC_page_present; > p2m_type_t sp2mt, dp2mt; > - int rc, df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF); > + int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF); > char *buf; > > rc = hvmemul_virtual_to_linear( > @@ -1327,7 +1327,7 @@ static int hvmemul_rep_stos( > unsigned long addr, bytes; > paddr_t gpa; > p2m_type_t p2mt; > - bool_t df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF); > + bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF); > int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps, > hvm_access_write, hvmemul_ctxt, &addr); > > @@ -1775,7 +1775,7 @@ static int _hvm_emulate_one(struct hvm_e > if ( hvmemul_ctxt->ctxt.retire.hlt && > !hvm_local_events_need_delivery(curr) ) > { > - hvm_hlt(regs->_eflags); > + hvm_hlt(regs->eflags); > } > > return X86EMUL_OKAY; > --- a/xen/arch/x86/hvm/io.c > +++ b/xen/arch/x86/hvm/io.c > @@ -136,7 +136,7 @@ bool handle_pio(uint16_t port, unsigned > ASSERT((size - 1) < 4 && size != 3); > > if ( dir == IOREQ_WRITE ) > - data = guest_cpu_user_regs()->_eax; > + data = guest_cpu_user_regs()->eax; > > rc = hvmemul_do_pio_buffer(port, size, dir, &data); > > >
--- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -442,7 +442,7 @@ static int hvmemul_linear_to_phys( } /* Reverse mode if this is a backwards multi-iteration string operation. */ - reverse = (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1); + reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1); if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) ) { @@ -539,7 +539,7 @@ static int hvmemul_virtual_to_linear( if ( IS_ERR(reg) ) return -PTR_ERR(reg); - if ( (hvmemul_ctxt->ctxt.regs->_eflags & X86_EFLAGS_DF) && (*reps > 1) ) + if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) ) { /* * x86_emulate() clips the repetition count to ensure we don't wrap @@ -1085,7 +1085,7 @@ static int hvmemul_rep_ins( return X86EMUL_UNHANDLEABLE; return hvmemul_do_pio_addr(src_port, reps, bytes_per_rep, IOREQ_READ, - !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa); + !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa); } static int hvmemul_rep_outs_set_context( @@ -1154,7 +1154,7 @@ static int hvmemul_rep_outs( return X86EMUL_UNHANDLEABLE; return hvmemul_do_pio_addr(dst_port, reps, bytes_per_rep, IOREQ_WRITE, - !!(ctxt->regs->_eflags & X86_EFLAGS_DF), gpa); + !!(ctxt->regs->eflags & X86_EFLAGS_DF), gpa); } static int hvmemul_rep_movs( @@ -1173,7 +1173,7 @@ static int hvmemul_rep_movs( paddr_t sgpa, dgpa; uint32_t pfec = PFEC_page_present; p2m_type_t sp2mt, dp2mt; - int rc, df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF); + int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF); char *buf; rc = hvmemul_virtual_to_linear( @@ -1327,7 +1327,7 @@ static int hvmemul_rep_stos( unsigned long addr, bytes; paddr_t gpa; p2m_type_t p2mt; - bool_t df = !!(ctxt->regs->_eflags & X86_EFLAGS_DF); + bool_t df = !!(ctxt->regs->eflags & X86_EFLAGS_DF); int rc = hvmemul_virtual_to_linear(seg, offset, bytes_per_rep, reps, hvm_access_write, hvmemul_ctxt, &addr); @@ -1775,7 +1775,7 @@ static int _hvm_emulate_one(struct hvm_e if ( hvmemul_ctxt->ctxt.retire.hlt && !hvm_local_events_need_delivery(curr) ) { - hvm_hlt(regs->_eflags); + hvm_hlt(regs->eflags); } return X86EMUL_OKAY; --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -136,7 +136,7 @@ bool handle_pio(uint16_t port, unsigned ASSERT((size - 1) < 4 && size != 3); if ( dir == IOREQ_WRITE ) - data = guest_cpu_user_regs()->_eax; + data = guest_cpu_user_regs()->eax; rc = hvmemul_do_pio_buffer(port, size, dir, &data);