diff mbox

[06/15] x86/emul: Rework emulator event injection

Message ID 1479915538-15282-7-git-send-email-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Cooper Nov. 23, 2016, 3:38 p.m. UTC
The emulator needs to gain an understanding of interrupts and exceptions
generated by its actions.

Move hvm_emulate_ctxt.{exn_pending,trap} into struct x86_emulate_ctxt so they
are visible to the emulator.  This removes the need for the
inject_{hw,sw}_interrupt() hooks, which are dropped and replaced with
x86_emul_{hw_exception,software_event}() instead.

The shadow pagetable and PV uses of x86_emulate() previously failed with
X86EMUL_UNHANDLEABLE due to the lack of inject_*() hooks, but this behaviour
has subtly changed.  Adjust the return value checking to cause a pending event
to fall back into the previous codepath.

No overall functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Paul Durrant <paul.durrant@citrix.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
---
 tools/tests/x86_emulator/test_x86_emulator.c |  1 +
 xen/arch/x86/hvm/emulate.c                   | 81 ++++------------------------
 xen/arch/x86/hvm/hvm.c                       |  4 +-
 xen/arch/x86/hvm/io.c                        |  4 +-
 xen/arch/x86/hvm/vmx/realmode.c              | 16 +++---
 xen/arch/x86/mm.c                            |  5 +-
 xen/arch/x86/mm/shadow/multi.c               |  4 +-
 xen/arch/x86/x86_emulate/x86_emulate.c       | 12 ++---
 xen/arch/x86/x86_emulate/x86_emulate.h       | 67 ++++++++++++++++++-----
 xen/include/asm-x86/hvm/emulate.h            |  3 --
 10 files changed, 86 insertions(+), 111 deletions(-)

Comments

Tim Deegan Nov. 23, 2016, 4:19 p.m. UTC | #1
Hi,

At 15:38 +0000 on 23 Nov (1479915529), Andrew Cooper wrote:
> The emulator needs to gain an understanding of interrupts and exceptions
> generated by its actions.
> 
> Move hvm_emulate_ctxt.{exn_pending,trap} into struct x86_emulate_ctxt so they
> are visible to the emulator.  This removes the need for the
> inject_{hw,sw}_interrupt() hooks, which are dropped and replaced with
> x86_emul_{hw_exception,software_event}() instead.
> 
> The shadow pagetable and PV uses of x86_emulate() previously failed with
> X86EMUL_UNHANDLEABLE due to the lack of inject_*() hooks, but this behaviour
> has subtly changed.  Adjust the return value checking to cause a pending event
> to fall back into the previous codepath.
> 
> No overall functional change.

AIUI this does have a change in the shadow callers in the case where
the emulated instruction would inject an event.  Previously we would
have failed the emulation, perhaps unshadowed something, and returned
to the guest to retry.

Now the emulator records the event in the context struct, updates the
register state and returns success, so we'll return on the *next*
instruction.  I think that's OK, though.

Also, handle_mmio() and other callers of the emulator check for that
pending event and pass it to the hardware but you haven't added
anything in the shadow code to do that.  Does the event get dropped?

Tim.
Jan Beulich Nov. 23, 2016, 4:33 p.m. UTC | #2
>>> On 23.11.16 at 17:19, <tim@xen.org> wrote:
> Hi,
> 
> At 15:38 +0000 on 23 Nov (1479915529), Andrew Cooper wrote:
>> The emulator needs to gain an understanding of interrupts and exceptions
>> generated by its actions.
>> 
>> Move hvm_emulate_ctxt.{exn_pending,trap} into struct x86_emulate_ctxt so 
> they
>> are visible to the emulator.  This removes the need for the
>> inject_{hw,sw}_interrupt() hooks, which are dropped and replaced with
>> x86_emul_{hw_exception,software_event}() instead.
>> 
>> The shadow pagetable and PV uses of x86_emulate() previously failed with
>> X86EMUL_UNHANDLEABLE due to the lack of inject_*() hooks, but this behaviour
>> has subtly changed.  Adjust the return value checking to cause a pending 
> event
>> to fall back into the previous codepath.
>> 
>> No overall functional change.
> 
> AIUI this does have a change in the shadow callers in the case where
> the emulated instruction would inject an event.  Previously we would
> have failed the emulation, perhaps unshadowed something, and returned
> to the guest to retry.
> 
> Now the emulator records the event in the context struct, updates the
> register state and returns success, so we'll return on the *next*
> instruction.  I think that's OK, though.

Not exactly - instead of success, X86EMUL_EXCEPTION is being
returned, which would suppress register updates. Also I don't
think continuing on the next instruction would be okay, as we'd
then basically have skipped the one having caused the (not
delivered) exception.

Jan
Tim Deegan Nov. 23, 2016, 4:43 p.m. UTC | #3
At 09:33 -0700 on 23 Nov (1479893609), Jan Beulich wrote:
> >>> On 23.11.16 at 17:19, <tim@xen.org> wrote:
> > Hi,
> > 
> > At 15:38 +0000 on 23 Nov (1479915529), Andrew Cooper wrote:
> >> The emulator needs to gain an understanding of interrupts and exceptions
> >> generated by its actions.
> >> 
> >> Move hvm_emulate_ctxt.{exn_pending,trap} into struct x86_emulate_ctxt so 
> > they
> >> are visible to the emulator.  This removes the need for the
> >> inject_{hw,sw}_interrupt() hooks, which are dropped and replaced with
> >> x86_emul_{hw_exception,software_event}() instead.
> >> 
> >> The shadow pagetable and PV uses of x86_emulate() previously failed with
> >> X86EMUL_UNHANDLEABLE due to the lack of inject_*() hooks, but this behaviour
> >> has subtly changed.  Adjust the return value checking to cause a pending 
> > event
> >> to fall back into the previous codepath.
> >> 
> >> No overall functional change.
> > 
> > AIUI this does have a change in the shadow callers in the case where
> > the emulated instruction would inject an event.  Previously we would
> > have failed the emulation, perhaps unshadowed something, and returned
> > to the guest to retry.
> > 
> > Now the emulator records the event in the context struct, updates the
> > register state and returns success, so we'll return on the *next*
> > instruction.  I think that's OK, though.
> 
> Not exactly - instead of success, X86EMUL_EXCEPTION is being
> returned, which would suppress register updates.

Oh right.  In that case AFAICS neither invocation of x86_emulate() in
shadow/multi.c needs any adjustment.

Tim.
Boris Ostrovsky Nov. 23, 2016, 5:56 p.m. UTC | #4
On 11/23/2016 10:38 AM, Andrew Cooper wrote:
> The emulator needs to gain an understanding of interrupts and exceptions
> generated by its actions.
>
> Move hvm_emulate_ctxt.{exn_pending,trap} into struct x86_emulate_ctxt so they
> are visible to the emulator.  This removes the need for the
> inject_{hw,sw}_interrupt() hooks, which are dropped and replaced with
> x86_emul_{hw_exception,software_event}() instead.
>
> The shadow pagetable and PV uses of x86_emulate() previously failed with
> X86EMUL_UNHANDLEABLE due to the lack of inject_*() hooks, but this behaviour
> has subtly changed.  Adjust the return value checking to cause a pending event
> to fall back into the previous codepath.
>
> No overall functional change.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>


Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Tian, Kevin Nov. 24, 2016, 6:20 a.m. UTC | #5
> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: Wednesday, November 23, 2016 11:39 PM
> 
> The emulator needs to gain an understanding of interrupts and exceptions
> generated by its actions.
> 
> Move hvm_emulate_ctxt.{exn_pending,trap} into struct x86_emulate_ctxt so they
> are visible to the emulator.  This removes the need for the
> inject_{hw,sw}_interrupt() hooks, which are dropped and replaced with
> x86_emul_{hw_exception,software_event}() instead.
> 
> The shadow pagetable and PV uses of x86_emulate() previously failed with
> X86EMUL_UNHANDLEABLE due to the lack of inject_*() hooks, but this behaviour
> has subtly changed.  Adjust the return value checking to cause a pending event
> to fall back into the previous codepath.
> 
> No overall functional change.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Jan Beulich Nov. 24, 2016, 2:53 p.m. UTC | #6
>>> On 23.11.16 at 16:38, <andrew.cooper3@citrix.com> wrote:
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -5377,7 +5377,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
>      page_unlock(page);
>      put_page(page);
>  
> -    if ( rc == X86EMUL_UNHANDLEABLE )
> +    if ( rc == X86EMUL_UNHANDLEABLE || ptwr_ctxt.ctxt.event_pending )
>          goto bail;
>  
>      perfc_incr(ptwr_emulations);
> @@ -5501,7 +5501,8 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
>      else
>          rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
>  
> -    return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
> +    return ((rc != X86EMUL_UNHANDLEABLE && !ctxt.event_pending)
> +            ? EXCRET_fault_fixed : 0);
>  }

Wouldn't these two better be adjusted to check for OKAY and RETRY,
the more that iirc we had settled on it not (yet) being guaranteed to
see event_pending set whenever getting back EXCEPTION?

> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -3378,7 +3378,7 @@ static int sh_page_fault(struct vcpu *v,
>       * would be a good unshadow hint. If we *do* decide to unshadow-on-fault
>       * then it must be 'failable': we cannot require the unshadow to succeed.
>       */
> -    if ( r == X86EMUL_UNHANDLEABLE )
> +    if ( r == X86EMUL_UNHANDLEABLE || emul_ctxt.ctxt.event_pending )

Same here then.

> @@ -3433,7 +3433,7 @@ static int sh_page_fault(struct vcpu *v,
>              shadow_continue_emulation(&emul_ctxt, regs);
>              v->arch.paging.last_write_was_pt = 0;
>              r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
> -            if ( r == X86EMUL_OKAY )
> +            if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.event_pending )

Aiui you need this for the swint case. But wouldn't you then need to
add similar checks in OKAY paths elsewhere? Or alternatively,
wouldn't it be better to have x86_emulate() return EXCEPTION also
for successful swint emulation (albeit that would likely require other
not very nice adjustments)?

Jan
Andrew Cooper Nov. 24, 2016, 5 p.m. UTC | #7
On 24/11/16 14:53, Jan Beulich wrote:
>>>> On 23.11.16 at 16:38, <andrew.cooper3@citrix.com> wrote:
>> --- a/xen/arch/x86/mm.c
>> +++ b/xen/arch/x86/mm.c
>> @@ -5377,7 +5377,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
>>      page_unlock(page);
>>      put_page(page);
>>  
>> -    if ( rc == X86EMUL_UNHANDLEABLE )
>> +    if ( rc == X86EMUL_UNHANDLEABLE || ptwr_ctxt.ctxt.event_pending )
>>          goto bail;
>>  
>>      perfc_incr(ptwr_emulations);
>> @@ -5501,7 +5501,8 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
>>      else
>>          rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
>>  
>> -    return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
>> +    return ((rc != X86EMUL_UNHANDLEABLE && !ctxt.event_pending)
>> +            ? EXCRET_fault_fixed : 0);
>>  }
> Wouldn't these two better be adjusted to check for OKAY and RETRY,
> the more that iirc we had settled on it not (yet) being guaranteed to
> see event_pending set whenever getting back EXCEPTION?

In this patch, the key point I am guarding against is that, without the
->inject_*() hooks, some actions which previously took a fail_if() path
now succeed and latch an event.

From that point of view, it doesn't matter how the event became pending,
but the fact that one is means that it was a codepath which would
previously have returned UNHANDLEABLE.


Later patches, which stop raising faults behind the back of emulator,
are the ones where new consideration is needed towards the handling of
EXCEPTION/event_pending.  Following Tim's feedback, I have more work to
do in patch 9, as propagate_page_fault() raises #PF behind the back of
the emulator for PV guests.

In other words, I think this patch wants to stay like this, and a later
one change to be better accommodating.

>> @@ -3433,7 +3433,7 @@ static int sh_page_fault(struct vcpu *v,
>>              shadow_continue_emulation(&emul_ctxt, regs);
>>              v->arch.paging.last_write_was_pt = 0;
>>              r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
>> -            if ( r == X86EMUL_OKAY )
>> +            if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.event_pending )
> Aiui you need this for the swint case.

Why?  software interrupts were never previously tolerated in shadow
emulation.

> But wouldn't you then need to add similar checks in OKAY paths elsewhere?

I don't see why I would.  Does my explanation above resolve your concern?

> Or alternatively, wouldn't it be better to have x86_emulate() return EXCEPTION also
> for successful swint emulation (albeit that would likely require other
> not very nice adjustments)?

That would indeed be nasty.  If we were to go down that route, it would
be better to swap X86EMUL_EXCEPTION for X86EMUL_EVENT which explicitly
also includes traps where a register writeback has been completed.

~Andrew
Jan Beulich Nov. 24, 2016, 5:08 p.m. UTC | #8
>>> On 24.11.16 at 18:00, <andrew.cooper3@citrix.com> wrote:
> On 24/11/16 14:53, Jan Beulich wrote:
>>>>> On 23.11.16 at 16:38, <andrew.cooper3@citrix.com> wrote:
>>> --- a/xen/arch/x86/mm.c
>>> +++ b/xen/arch/x86/mm.c
>>> @@ -5377,7 +5377,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
>>>      page_unlock(page);
>>>      put_page(page);
>>>  
>>> -    if ( rc == X86EMUL_UNHANDLEABLE )
>>> +    if ( rc == X86EMUL_UNHANDLEABLE || ptwr_ctxt.ctxt.event_pending )
>>>          goto bail;
>>>  
>>>      perfc_incr(ptwr_emulations);
>>> @@ -5501,7 +5501,8 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
>>>      else
>>>          rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
>>>  
>>> -    return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
>>> +    return ((rc != X86EMUL_UNHANDLEABLE && !ctxt.event_pending)
>>> +            ? EXCRET_fault_fixed : 0);
>>>  }
>> Wouldn't these two better be adjusted to check for OKAY and RETRY,
>> the more that iirc we had settled on it not (yet) being guaranteed to
>> see event_pending set whenever getting back EXCEPTION?
> 
> In this patch, the key point I am guarding against is that, without the
> ->inject_*() hooks, some actions which previously took a fail_if() path
> now succeed and latch an event.
> 
> From that point of view, it doesn't matter how the event became pending,
> but the fact that one is means that it was a codepath which would
> previously have returned UNHANDLEABLE.
> 
> 
> Later patches, which stop raising faults behind the back of emulator,
> are the ones where new consideration is needed towards the handling of
> EXCEPTION/event_pending.  Following Tim's feedback, I have more work to
> do in patch 9, as propagate_page_fault() raises #PF behind the back of
> the emulator for PV guests.
> 
> In other words, I think this patch wants to stay like this, and a later
> one change to be better accommodating.

Okay.

>>> @@ -3433,7 +3433,7 @@ static int sh_page_fault(struct vcpu *v,
>>>              shadow_continue_emulation(&emul_ctxt, regs);
>>>              v->arch.paging.last_write_was_pt = 0;
>>>              r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
>>> -            if ( r == X86EMUL_OKAY )
>>> +            if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.event_pending )
>> Aiui you need this for the swint case.
> 
> Why?  software interrupts were never previously tolerated in shadow
> emulation.

Then why would you expect OKAY together with event_pending set?
I'm not saying swint handling needs to succeed here, but I can't see
anything else to cause that particular state to occur.

>> But wouldn't you then need to add similar checks in OKAY paths elsewhere?
> 
> I don't see why I would.  Does my explanation above resolve your concern?

I'm afraid not: On the same basis as above, code not expecting to
handle swint may now see OKAY together with event_pending set,
and would need to indicate failure to their callers just like you do in
sh_page_fault().

Jan
Andrew Cooper Nov. 24, 2016, 5:19 p.m. UTC | #9
On 24/11/16 17:08, Jan Beulich wrote:
>>>> On 24.11.16 at 18:00, <andrew.cooper3@citrix.com> wrote:
>> On 24/11/16 14:53, Jan Beulich wrote:
>>>>>> On 23.11.16 at 16:38, <andrew.cooper3@citrix.com> wrote:
>>>> --- a/xen/arch/x86/mm.c
>>>> +++ b/xen/arch/x86/mm.c
>>>> @@ -5377,7 +5377,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
>>>>      page_unlock(page);
>>>>      put_page(page);
>>>>  
>>>> -    if ( rc == X86EMUL_UNHANDLEABLE )
>>>> +    if ( rc == X86EMUL_UNHANDLEABLE || ptwr_ctxt.ctxt.event_pending )
>>>>          goto bail;
>>>>  
>>>>      perfc_incr(ptwr_emulations);
>>>> @@ -5501,7 +5501,8 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
>>>>      else
>>>>          rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
>>>>  
>>>> -    return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
>>>> +    return ((rc != X86EMUL_UNHANDLEABLE && !ctxt.event_pending)
>>>> +            ? EXCRET_fault_fixed : 0);
>>>>  }
>>> Wouldn't these two better be adjusted to check for OKAY and RETRY,
>>> the more that iirc we had settled on it not (yet) being guaranteed to
>>> see event_pending set whenever getting back EXCEPTION?
>> In this patch, the key point I am guarding against is that, without the
>> ->inject_*() hooks, some actions which previously took a fail_if() path
>> now succeed and latch an event.
>>
>> From that point of view, it doesn't matter how the event became pending,
>> but the fact that one is means that it was a codepath which would
>> previously have returned UNHANDLEABLE.
>>
>>
>> Later patches, which stop raising faults behind the back of emulator,
>> are the ones where new consideration is needed towards the handling of
>> EXCEPTION/event_pending.  Following Tim's feedback, I have more work to
>> do in patch 9, as propagate_page_fault() raises #PF behind the back of
>> the emulator for PV guests.
>>
>> In other words, I think this patch wants to stay like this, and a later
>> one change to be better accommodating.
> Okay.
>
>>>> @@ -3433,7 +3433,7 @@ static int sh_page_fault(struct vcpu *v,
>>>>              shadow_continue_emulation(&emul_ctxt, regs);
>>>>              v->arch.paging.last_write_was_pt = 0;
>>>>              r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
>>>> -            if ( r == X86EMUL_OKAY )
>>>> +            if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.event_pending )
>>> Aiui you need this for the swint case.
>> Why?  software interrupts were never previously tolerated in shadow
>> emulation.
> Then why would you expect OKAY together with event_pending set?
> I'm not saying swint handling needs to succeed here, but I can't see
> anything else to cause that particular state to occur.

Before this patch, a VM playing race conditions with the emulator could
cause this case to emulate 0xcc, which would fail because of the lack of
->inject_sw_interrupt() hook, and return X86_UNHANDLEABLE.

The changes in this patch now mean that the same case would properly
latch #BP, returning OKAY because its a trap not an exception.

By not explicitly failing the OKAY case with an event pending, we are
suddenly opening up rather more functionality than previously existed.

>
>>> But wouldn't you then need to add similar checks in OKAY paths elsewhere?
>> I don't see why I would.  Does my explanation above resolve your concern?
> I'm afraid not: On the same basis as above, code not expecting to
> handle swint may now see OKAY together with event_pending set,
> and would need to indicate failure to their callers just like you do in
> sh_page_fault().

That is my intent with the current code.  I have double checked it, and
it still looks correct.

~Andrew
Tim Deegan Nov. 24, 2016, 5:30 p.m. UTC | #10
At 17:19 +0000 on 24 Nov (1480007992), Andrew Cooper wrote:
> On 24/11/16 17:08, Jan Beulich wrote:
> >>>> On 24.11.16 at 18:00, <andrew.cooper3@citrix.com> wrote:
> >> On 24/11/16 14:53, Jan Beulich wrote:
> >>>>>> On 23.11.16 at 16:38, <andrew.cooper3@citrix.com> wrote:
> >>>> --- a/xen/arch/x86/mm.c
> >>>> +++ b/xen/arch/x86/mm.c
> >>>> @@ -5377,7 +5377,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
> >>>>      page_unlock(page);
> >>>>      put_page(page);
> >>>>  
> >>>> -    if ( rc == X86EMUL_UNHANDLEABLE )
> >>>> +    if ( rc == X86EMUL_UNHANDLEABLE || ptwr_ctxt.ctxt.event_pending )
> >>>>          goto bail;
> >>>>  
> >>>>      perfc_incr(ptwr_emulations);
> >>>> @@ -5501,7 +5501,8 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
> >>>>      else
> >>>>          rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
> >>>>  
> >>>> -    return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
> >>>> +    return ((rc != X86EMUL_UNHANDLEABLE && !ctxt.event_pending)
> >>>> +            ? EXCRET_fault_fixed : 0);
> >>>>  }
> >>> Wouldn't these two better be adjusted to check for OKAY and RETRY,
> >>> the more that iirc we had settled on it not (yet) being guaranteed to
> >>> see event_pending set whenever getting back EXCEPTION?
> >> In this patch, the key point I am guarding against is that, without the
> >> ->inject_*() hooks, some actions which previously took a fail_if() path
> >> now succeed and latch an event.
> >>
> >> From that point of view, it doesn't matter how the event became pending,
> >> but the fact that one is means that it was a codepath which would
> >> previously have returned UNHANDLEABLE.
> >>
> >>
> >> Later patches, which stop raising faults behind the back of emulator,
> >> are the ones where new consideration is needed towards the handling of
> >> EXCEPTION/event_pending.  Following Tim's feedback, I have more work to
> >> do in patch 9, as propagate_page_fault() raises #PF behind the back of
> >> the emulator for PV guests.
> >>
> >> In other words, I think this patch wants to stay like this, and a later
> >> one change to be better accommodating.
> > Okay.
> >
> >>>> @@ -3433,7 +3433,7 @@ static int sh_page_fault(struct vcpu *v,
> >>>>              shadow_continue_emulation(&emul_ctxt, regs);
> >>>>              v->arch.paging.last_write_was_pt = 0;
> >>>>              r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
> >>>> -            if ( r == X86EMUL_OKAY )
> >>>> +            if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.event_pending )
> >>> Aiui you need this for the swint case.
> >> Why?  software interrupts were never previously tolerated in shadow
> >> emulation.
> > Then why would you expect OKAY together with event_pending set?
> > I'm not saying swint handling needs to succeed here, but I can't see
> > anything else to cause that particular state to occur.
> 
> Before this patch, a VM playing race conditions with the emulator could
> cause this case to emulate 0xcc, which would fail because of the lack of
> ->inject_sw_interrupt() hook, and return X86_UNHANDLEABLE.
> 
> The changes in this patch now mean that the same case would properly
> latch #BP, returning OKAY because its a trap not an exception.
> 
> By not explicitly failing the OKAY case with an event pending, we are
> suddenly opening up rather more functionality than previously existed.
> 
> >
> >>> But wouldn't you then need to add similar checks in OKAY paths elsewhere?
> >> I don't see why I would.  Does my explanation above resolve your concern?
> > I'm afraid not: On the same basis as above, code not expecting to
> > handle swint may now see OKAY together with event_pending set,
> > and would need to indicate failure to their callers just like you do in
> > sh_page_fault().
> 
> That is my intent with the current code.  I have double checked it, and
> it still looks correct.

So is that not the case I was worried about, where the emulator
updates register state but we then drop the expected event on the
floor?

Tim.
Andrew Cooper Nov. 24, 2016, 5:37 p.m. UTC | #11
On 24/11/16 17:30, Tim Deegan wrote:
> At 17:19 +0000 on 24 Nov (1480007992), Andrew Cooper wrote:
>> On 24/11/16 17:08, Jan Beulich wrote:
>>>>>> On 24.11.16 at 18:00, <andrew.cooper3@citrix.com> wrote:
>>>> On 24/11/16 14:53, Jan Beulich wrote:
>>>>>>>> On 23.11.16 at 16:38, <andrew.cooper3@citrix.com> wrote:
>>>>>> --- a/xen/arch/x86/mm.c
>>>>>> +++ b/xen/arch/x86/mm.c
>>>>>> @@ -5377,7 +5377,7 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
>>>>>>      page_unlock(page);
>>>>>>      put_page(page);
>>>>>>  
>>>>>> -    if ( rc == X86EMUL_UNHANDLEABLE )
>>>>>> +    if ( rc == X86EMUL_UNHANDLEABLE || ptwr_ctxt.ctxt.event_pending )
>>>>>>          goto bail;
>>>>>>  
>>>>>>      perfc_incr(ptwr_emulations);
>>>>>> @@ -5501,7 +5501,8 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
>>>>>>      else
>>>>>>          rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
>>>>>>  
>>>>>> -    return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
>>>>>> +    return ((rc != X86EMUL_UNHANDLEABLE && !ctxt.event_pending)
>>>>>> +            ? EXCRET_fault_fixed : 0);
>>>>>>  }
>>>>> Wouldn't these two better be adjusted to check for OKAY and RETRY,
>>>>> the more that iirc we had settled on it not (yet) being guaranteed to
>>>>> see event_pending set whenever getting back EXCEPTION?
>>>> In this patch, the key point I am guarding against is that, without the
>>>> ->inject_*() hooks, some actions which previously took a fail_if() path
>>>> now succeed and latch an event.
>>>>
>>>> From that point of view, it doesn't matter how the event became pending,
>>>> but the fact that one is means that it was a codepath which would
>>>> previously have returned UNHANDLEABLE.
>>>>
>>>>
>>>> Later patches, which stop raising faults behind the back of emulator,
>>>> are the ones where new consideration is needed towards the handling of
>>>> EXCEPTION/event_pending.  Following Tim's feedback, I have more work to
>>>> do in patch 9, as propagate_page_fault() raises #PF behind the back of
>>>> the emulator for PV guests.
>>>>
>>>> In other words, I think this patch wants to stay like this, and a later
>>>> one change to be better accommodating.
>>> Okay.
>>>
>>>>>> @@ -3433,7 +3433,7 @@ static int sh_page_fault(struct vcpu *v,
>>>>>>              shadow_continue_emulation(&emul_ctxt, regs);
>>>>>>              v->arch.paging.last_write_was_pt = 0;
>>>>>>              r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
>>>>>> -            if ( r == X86EMUL_OKAY )
>>>>>> +            if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.event_pending )
>>>>> Aiui you need this for the swint case.
>>>> Why?  software interrupts were never previously tolerated in shadow
>>>> emulation.
>>> Then why would you expect OKAY together with event_pending set?
>>> I'm not saying swint handling needs to succeed here, but I can't see
>>> anything else to cause that particular state to occur.
>> Before this patch, a VM playing race conditions with the emulator could
>> cause this case to emulate 0xcc, which would fail because of the lack of
>> ->inject_sw_interrupt() hook, and return X86_UNHANDLEABLE.
>>
>> The changes in this patch now mean that the same case would properly
>> latch #BP, returning OKAY because its a trap not an exception.
>>
>> By not explicitly failing the OKAY case with an event pending, we are
>> suddenly opening up rather more functionality than previously existed.
>>
>>>>> But wouldn't you then need to add similar checks in OKAY paths elsewhere?
>>>> I don't see why I would.  Does my explanation above resolve your concern?
>>> I'm afraid not: On the same basis as above, code not expecting to
>>> handle swint may now see OKAY together with event_pending set,
>>> and would need to indicate failure to their callers just like you do in
>>> sh_page_fault().
>> That is my intent with the current code.  I have double checked it, and
>> it still looks correct.
> So is that not the case I was worried about, where the emulator
> updates register state but we then drop the expected event on the
> floor?

Oh right, yes.  Sorry for being dense.

As an interim between now and getting a proper audit hook, would a bool
permit_traps in x86_emulate_ctxt suffice?

~Andrew
Jan Beulich Nov. 25, 2016, 7:25 a.m. UTC | #12
>>> On 24.11.16 at 18:37, <andrew.cooper3@citrix.com> wrote:
> As an interim between now and getting a proper audit hook, would a bool
> permit_traps in x86_emulate_ctxt suffice?

That's one option; the other would be to do away with only the
exception injection hook for now, and keep the swint one. That's
effectively the same as the boolean flag, just with less overall
code churn (and keeping the hook would not undermine this
series' goal of eliminating event injection behind the back of the
emulator).

Jan
Jan Beulich Nov. 25, 2016, 7:42 a.m. UTC | #13
>>> On 24.11.16 at 18:19, <andrew.cooper3@citrix.com> wrote:
> On 24/11/16 17:08, Jan Beulich wrote:
>>>>> On 24.11.16 at 18:00, <andrew.cooper3@citrix.com> wrote:
>>>> But wouldn't you then need to add similar checks in OKAY paths elsewhere?
>>> I don't see why I would.  Does my explanation above resolve your concern?
>> I'm afraid not: On the same basis as above, code not expecting to
>> handle swint may now see OKAY together with event_pending set,
>> and would need to indicate failure to their callers just like you do in
>> sh_page_fault().
> 
> That is my intent with the current code.  I have double checked it, and
> it still looks correct.

Then what about the handling immediately after the x86_emulate()
invocation in _hvm_emulate_one()? Apart from that I've now also
convinced myself that you handle all existing callers.

Jan
Tim Deegan Nov. 25, 2016, 9:41 a.m. UTC | #14
At 00:25 -0700 on 25 Nov (1480033543), Jan Beulich wrote:
> >>> On 24.11.16 at 18:37, <andrew.cooper3@citrix.com> wrote:
> > As an interim between now and getting a proper audit hook, would a bool
> > permit_traps in x86_emulate_ctxt suffice?
> 
> That's one option; the other would be to do away with only the
> exception injection hook for now, and keep the swint one. That's
> effectively the same as the boolean flag, just with less overall
> code churn (and keeping the hook would not undermine this
> series' goal of eliminating event injection behind the back of the
> emulator).

I'd be OK with either of those.  Or indeed with
 - adjusting the emulator so it always returns a non-OK value when it
   needs the caller to do something to make the state consistent; or
 - keeping the injection hook but moving it to the end of the
   emulation, as a sort of write-back of the emulator's internal event
   state to the guest.  That would let the emulator DTRT about
   tracking events internally but still avoid any risk that the caller
   forgets to inject the event.

Tim.
diff mbox

Patch

diff --git a/tools/tests/x86_emulator/test_x86_emulator.c b/tools/tests/x86_emulator/test_x86_emulator.c
index 948ee8d..146e15e 100644
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -1,3 +1,4 @@ 
+#include <assert.h>
 #include <errno.h>
 #include <limits.h>
 #include <stdbool.h>
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 790e9c1..c0fbde1 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -568,12 +568,9 @@  static int hvmemul_virtual_to_linear(
         return X86EMUL_UNHANDLEABLE;
 
     /* This is a singleton operation: fail it with an exception. */
-    hvmemul_ctxt->exn_pending = 1;
-    hvmemul_ctxt->trap.vector =
-        (seg == x86_seg_ss) ? TRAP_stack_error : TRAP_gp_fault;
-    hvmemul_ctxt->trap.type = X86_EVENTTYPE_HW_EXCEPTION;
-    hvmemul_ctxt->trap.error_code = 0;
-    hvmemul_ctxt->trap.insn_len = 0;
+    x86_emul_hw_exception((seg == x86_seg_ss)
+                          ? TRAP_stack_error
+                          : TRAP_gp_fault, 0, &hvmemul_ctxt->ctxt);
     return X86EMUL_EXCEPTION;
 }
 
@@ -1562,59 +1559,6 @@  int hvmemul_cpuid(
     return X86EMUL_OKAY;
 }
 
-static int hvmemul_inject_hw_exception(
-    uint8_t vector,
-    int32_t error_code,
-    struct x86_emulate_ctxt *ctxt)
-{
-    struct hvm_emulate_ctxt *hvmemul_ctxt =
-        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-
-    hvmemul_ctxt->exn_pending = 1;
-    hvmemul_ctxt->trap.vector = vector;
-    hvmemul_ctxt->trap.type = X86_EVENTTYPE_HW_EXCEPTION;
-    hvmemul_ctxt->trap.error_code = error_code;
-    hvmemul_ctxt->trap.insn_len = 0;
-
-    return X86EMUL_OKAY;
-}
-
-static int hvmemul_inject_sw_interrupt(
-    enum x86_swint_type type,
-    uint8_t vector,
-    uint8_t insn_len,
-    struct x86_emulate_ctxt *ctxt)
-{
-    struct hvm_emulate_ctxt *hvmemul_ctxt =
-        container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-
-    switch ( type )
-    {
-    case x86_swint_icebp:
-        hvmemul_ctxt->trap.type = X86_EVENTTYPE_PRI_SW_EXCEPTION;
-        break;
-
-    case x86_swint_int3:
-    case x86_swint_into:
-        hvmemul_ctxt->trap.type = X86_EVENTTYPE_SW_EXCEPTION;
-        break;
-
-    case x86_swint_int:
-        hvmemul_ctxt->trap.type = X86_EVENTTYPE_SW_INTERRUPT;
-        break;
-
-    default:
-        return X86EMUL_UNHANDLEABLE;
-    }
-
-    hvmemul_ctxt->exn_pending = 1;
-    hvmemul_ctxt->trap.vector = vector;
-    hvmemul_ctxt->trap.error_code = X86_EVENT_NO_EC;
-    hvmemul_ctxt->trap.insn_len = insn_len;
-
-    return X86EMUL_OKAY;
-}
-
 static int hvmemul_get_fpu(
     void (*exception_callback)(void *, struct cpu_user_regs *),
     void *exception_callback_arg,
@@ -1678,8 +1622,7 @@  static int hvmemul_invlpg(
          * hvmemul_virtual_to_linear() raises exceptions for type/limit
          * violations, so squash them.
          */
-        hvmemul_ctxt->exn_pending = 0;
-        hvmemul_ctxt->trap = (struct x86_event){};
+        x86_emul_reset_event(ctxt);
         rc = X86EMUL_OKAY;
     }
 
@@ -1696,7 +1639,7 @@  static int hvmemul_vmfunc(
 
     rc = hvm_funcs.altp2m_vcpu_emulate_vmfunc(ctxt->regs);
     if ( rc != X86EMUL_OKAY )
-        hvmemul_inject_hw_exception(TRAP_invalid_op, 0, ctxt);
+        x86_emul_hw_exception(TRAP_invalid_op, 0, ctxt);
 
     return rc;
 }
@@ -1720,8 +1663,6 @@  static const struct x86_emulate_ops hvm_emulate_ops = {
     .write_msr     = hvmemul_write_msr,
     .wbinvd        = hvmemul_wbinvd,
     .cpuid         = hvmemul_cpuid,
-    .inject_hw_exception = hvmemul_inject_hw_exception,
-    .inject_sw_interrupt = hvmemul_inject_sw_interrupt,
     .get_fpu       = hvmemul_get_fpu,
     .put_fpu       = hvmemul_put_fpu,
     .invlpg        = hvmemul_invlpg,
@@ -1747,8 +1688,6 @@  static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
     .write_msr     = hvmemul_write_msr_discard,
     .wbinvd        = hvmemul_wbinvd_discard,
     .cpuid         = hvmemul_cpuid,
-    .inject_hw_exception = hvmemul_inject_hw_exception,
-    .inject_sw_interrupt = hvmemul_inject_sw_interrupt,
     .get_fpu       = hvmemul_get_fpu,
     .put_fpu       = hvmemul_put_fpu,
     .invlpg        = hvmemul_invlpg,
@@ -1867,8 +1806,8 @@  int hvm_emulate_one_mmio(unsigned long mfn, unsigned long gla)
         hvm_dump_emulation_state(XENLOG_G_WARNING "MMCFG", &ctxt);
         break;
     case X86EMUL_EXCEPTION:
-        if ( ctxt.exn_pending )
-            hvm_inject_event(&ctxt.trap);
+        if ( ctxt.ctxt.event_pending )
+            hvm_inject_event(&ctxt.ctxt.event);
         /* fallthrough */
     default:
         hvm_emulate_writeback(&ctxt);
@@ -1927,8 +1866,8 @@  void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
         hvm_inject_hw_exception(trapnr, errcode);
         break;
     case X86EMUL_EXCEPTION:
-        if ( ctx.exn_pending )
-            hvm_inject_event(&ctx.trap);
+        if ( ctx.ctxt.event_pending )
+            hvm_inject_event(&ctx.ctxt.event);
         break;
     }
 
@@ -2005,8 +1944,6 @@  void hvm_emulate_init_per_insn(
         hvmemul_ctxt->insn_buf_bytes = insn_bytes;
         memcpy(hvmemul_ctxt->insn_buf, insn_buf, insn_bytes);
     }
-
-    hvmemul_ctxt->exn_pending = 0;
 }
 
 void hvm_emulate_writeback(
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b950842..ef83100 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4076,8 +4076,8 @@  void hvm_ud_intercept(struct cpu_user_regs *regs)
         hvm_inject_hw_exception(TRAP_invalid_op, X86_EVENT_NO_EC);
         break;
     case X86EMUL_EXCEPTION:
-        if ( ctxt.exn_pending )
-            hvm_inject_event(&ctxt.trap);
+        if ( ctxt.ctxt.event_pending )
+            hvm_inject_event(&ctxt.ctxt.event);
         /* fall through */
     default:
         hvm_emulate_writeback(&ctxt);
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 1279f68..abb9d51 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -102,8 +102,8 @@  int handle_mmio(void)
         hvm_dump_emulation_state(XENLOG_G_WARNING "MMIO", &ctxt);
         return 0;
     case X86EMUL_EXCEPTION:
-        if ( ctxt.exn_pending )
-            hvm_inject_event(&ctxt.trap);
+        if ( ctxt.ctxt.event_pending )
+            hvm_inject_event(&ctxt.ctxt.event);
         break;
     default:
         break;
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 9002638..dc3ab44 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -122,7 +122,7 @@  void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
 
     if ( rc == X86EMUL_EXCEPTION )
     {
-        if ( !hvmemul_ctxt->exn_pending )
+        if ( !hvmemul_ctxt->ctxt.event_pending )
         {
             unsigned long intr_info;
 
@@ -133,27 +133,27 @@  void vmx_realmode_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt)
                 gdprintk(XENLOG_ERR, "Exception pending but no info.\n");
                 goto fail;
             }
-            hvmemul_ctxt->trap.vector = (uint8_t)intr_info;
-            hvmemul_ctxt->trap.insn_len = 0;
+            hvmemul_ctxt->ctxt.event.vector = (uint8_t)intr_info;
+            hvmemul_ctxt->ctxt.event.insn_len = 0;
         }
 
         if ( unlikely(curr->domain->debugger_attached) &&
-             ((hvmemul_ctxt->trap.vector == TRAP_debug) ||
-              (hvmemul_ctxt->trap.vector == TRAP_int3)) )
+             ((hvmemul_ctxt->ctxt.event.vector == TRAP_debug) ||
+              (hvmemul_ctxt->ctxt.event.vector == TRAP_int3)) )
         {
             domain_pause_for_debugger();
         }
         else if ( curr->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE )
         {
             gdprintk(XENLOG_ERR, "Exception %02x in protected mode.\n",
-                     hvmemul_ctxt->trap.vector);
+                     hvmemul_ctxt->ctxt.event.vector);
             goto fail;
         }
         else
         {
             realmode_deliver_exception(
-                hvmemul_ctxt->trap.vector,
-                hvmemul_ctxt->trap.insn_len,
+                hvmemul_ctxt->ctxt.event.vector,
+                hvmemul_ctxt->ctxt.event.insn_len,
                 hvmemul_ctxt);
         }
     }
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 9901f6f..66ecdab 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -5377,7 +5377,7 @@  int ptwr_do_page_fault(struct vcpu *v, unsigned long addr,
     page_unlock(page);
     put_page(page);
 
-    if ( rc == X86EMUL_UNHANDLEABLE )
+    if ( rc == X86EMUL_UNHANDLEABLE || ptwr_ctxt.ctxt.event_pending )
         goto bail;
 
     perfc_incr(ptwr_emulations);
@@ -5501,7 +5501,8 @@  int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr,
     else
         rc = x86_emulate(&ctxt, &mmio_ro_emulate_ops);
 
-    return rc != X86EMUL_UNHANDLEABLE ? EXCRET_fault_fixed : 0;
+    return ((rc != X86EMUL_UNHANDLEABLE && !ctxt.event_pending)
+            ? EXCRET_fault_fixed : 0);
 }
 
 void *alloc_xen_pagetable(void)
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index d70b1c6..84cb6b6 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -3378,7 +3378,7 @@  static int sh_page_fault(struct vcpu *v,
      * would be a good unshadow hint. If we *do* decide to unshadow-on-fault
      * then it must be 'failable': we cannot require the unshadow to succeed.
      */
-    if ( r == X86EMUL_UNHANDLEABLE )
+    if ( r == X86EMUL_UNHANDLEABLE || emul_ctxt.ctxt.event_pending )
     {
         perfc_incr(shadow_fault_emulate_failed);
 #if SHADOW_OPTIMIZATIONS & SHOPT_FAST_EMULATION
@@ -3433,7 +3433,7 @@  static int sh_page_fault(struct vcpu *v,
             shadow_continue_emulation(&emul_ctxt, regs);
             v->arch.paging.last_write_was_pt = 0;
             r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
-            if ( r == X86EMUL_OKAY )
+            if ( r == X86EMUL_OKAY && !emul_ctxt.ctxt.event_pending )
             {
                 emulation_count++;
                 if ( v->arch.paging.last_write_was_pt )
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index f8271b3..768a436 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -680,9 +680,8 @@  static inline int mkec(uint8_t e, int32_t ec, ...)
 
 #define generate_exception_if(p, e, ec...)                                \
 ({  if ( (p) ) {                                                          \
-        fail_if(ops->inject_hw_exception == NULL);                        \
-        rc = ops->inject_hw_exception(e, mkec(e, ##ec, 0), ctxt)          \
-            ? : X86EMUL_EXCEPTION;                                        \
+        x86_emul_hw_exception(e, mkec(e, ##ec, 0), ctxt);                 \
+        rc = X86EMUL_EXCEPTION;                                           \
         goto done;                                                        \
     }                                                                     \
 })
@@ -1604,9 +1603,6 @@  static int inject_swint(enum x86_swint_type type,
 {
     int rc, error_code, fault_type = EXC_GP;
 
-    fail_if(ops->inject_sw_interrupt == NULL);
-    fail_if(ops->inject_hw_exception == NULL);
-
     /*
      * Without hardware support, injecting software interrupts/exceptions is
      * problematic.
@@ -1703,7 +1699,8 @@  static int inject_swint(enum x86_swint_type type,
         ctxt->regs->eip += insn_len;
     }
 
-    rc = ops->inject_sw_interrupt(type, vector, insn_len, ctxt);
+    x86_emul_software_event(type, vector, insn_len, ctxt);
+    rc = X86EMUL_OKAY;
 
  done:
     return rc;
@@ -1912,6 +1909,7 @@  x86_decode(
     /* Initialise output state in x86_emulate_ctxt */
     ctxt->opcode = ~0u;
     ctxt->retire.byte = 0;
+    x86_emul_reset_event(ctxt);
 
     op_bytes = def_op_bytes = ad_bytes = def_ad_bytes = ctxt->addr_size/8;
     if ( op_bytes == 8 )
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h
index 9df083e..ddcd93c 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -388,19 +388,6 @@  struct x86_emulate_ops
         unsigned int *edx,
         struct x86_emulate_ctxt *ctxt);
 
-    /* inject_hw_exception */
-    int (*inject_hw_exception)(
-        uint8_t vector,
-        int32_t error_code,
-        struct x86_emulate_ctxt *ctxt);
-
-    /* inject_sw_interrupt */
-    int (*inject_sw_interrupt)(
-        enum x86_swint_type type,
-        uint8_t vector,
-        uint8_t insn_len,
-        struct x86_emulate_ctxt *ctxt);
-
     /*
      * get_fpu: Load emulated environment's FPU state onto processor.
      *  @exn_callback: On any FPU or SIMD exception, pass control to
@@ -473,6 +460,9 @@  struct x86_emulate_ctxt
         } flags;
         uint8_t byte;
     } retire;
+
+    bool event_pending;
+    struct x86_event event;
 };
 
 /*
@@ -594,4 +584,55 @@  void x86_emulate_free_state(struct x86_emulate_state *state);
 
 #endif
 
+#ifndef ASSERT
+#define ASSERT assert
+#endif
+
+static inline void x86_emul_hw_exception(
+    unsigned int vector, unsigned int error_code, struct x86_emulate_ctxt *ctxt)
+{
+    ASSERT(!ctxt->event_pending);
+
+    ctxt->event.vector = vector;
+    ctxt->event.type = X86_EVENTTYPE_HW_EXCEPTION;
+    ctxt->event.error_code = error_code;
+
+    ctxt->event_pending = true;
+}
+
+static inline void x86_emul_software_event(
+    enum x86_swint_type type, uint8_t vector, uint8_t insn_len,
+    struct x86_emulate_ctxt *ctxt)
+{
+    ASSERT(!ctxt->event_pending);
+
+    switch ( type )
+    {
+    case x86_swint_icebp:
+        ctxt->event.type = X86_EVENTTYPE_PRI_SW_EXCEPTION;
+        break;
+
+    case x86_swint_int3:
+    case x86_swint_into:
+        ctxt->event.type = X86_EVENTTYPE_SW_EXCEPTION;
+        break;
+
+    case x86_swint_int:
+        ctxt->event.type = X86_EVENTTYPE_SW_INTERRUPT;
+        break;
+    }
+
+    ctxt->event.vector = vector;
+    ctxt->event.error_code = X86_EVENT_NO_EC;
+    ctxt->event.insn_len = insn_len;
+
+    ctxt->event_pending = true;
+}
+
+static inline void x86_emul_reset_event(struct x86_emulate_ctxt *ctxt)
+{
+    ctxt->event_pending = false;
+    ctxt->event = (struct x86_event){};
+}
+
 #endif /* __X86_EMULATE_H__ */
diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h
index 3b7ec33..d64d834 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -29,9 +29,6 @@  struct hvm_emulate_ctxt {
     unsigned long seg_reg_accessed;
     unsigned long seg_reg_dirty;
 
-    bool_t exn_pending;
-    struct x86_event trap;
-
     uint32_t intr_shadow;
 
     bool_t set_context;