diff mbox

[RFC] x86/vm_event: Allow returning i-cache for emulation

Message ID 20160909154125.15976-1-tamas.lengyel@zentific.com (mailing list archive)
State New, archived
Headers show

Commit Message

Tamas Lengyel Sept. 9, 2016, 3:41 p.m. UTC
When emulating instructions the emulator maintains a small i-cache fetched
from the guest memory. Under certain scenarios this memory region may contain
instructions that a monitor subscriber would prefer to hide, namely INT3, and
instead would prefer to emulate a different instruction in-place.

This patch extends the vm_event interface to allow returning this i-cache via
the vm_event response.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
---
Cc: Paul Durrant <paul.durrant@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
Cc: Kevin Tian <kevin.tian@intel.com>
Cc: George Dunlap <george.dunlap@eu.citrix.com>
Cc: Razvan Cojocaru <rcojocaru@bitdefender.com>
Cc: Tamas K Lengyel <tamas@tklengyel.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Julien Grall <julien.grall@arm.com>
---
 xen/arch/x86/hvm/emulate.c        | 47 +++++++++++++++++++++++++--------------
 xen/arch/x86/hvm/hvm.c            |  2 +-
 xen/arch/x86/hvm/vmx/vmx.c        |  1 +
 xen/arch/x86/mm/p2m.c             |  7 ++++--
 xen/arch/x86/vm_event.c           | 10 +++++++++
 xen/common/vm_event.c             |  5 ++++-
 xen/include/asm-arm/vm_event.h    |  6 +++++
 xen/include/asm-x86/hvm/emulate.h |  6 +++--
 xen/include/asm-x86/vm_event.h    |  4 +++-
 xen/include/public/vm_event.h     | 11 +++++++--
 10 files changed, 73 insertions(+), 26 deletions(-)

Comments

Razvan Cojocaru Sept. 9, 2016, 3:56 p.m. UTC | #1
On 09/09/16 18:41, Tamas K Lengyel wrote:
> When emulating instructions the emulator maintains a small i-cache fetched
> from the guest memory. Under certain scenarios this memory region may contain
> instructions that a monitor subscriber would prefer to hide, namely INT3, and
> instead would prefer to emulate a different instruction in-place.
> 
> This patch extends the vm_event interface to allow returning this i-cache via
> the vm_event response.
> 
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
> ---
> Cc: Paul Durrant <paul.durrant@citrix.com>
> Cc: Jan Beulich <jbeulich@suse.com>
> Cc: Andrew Cooper <andrew.cooper3@citrix.com>
> Cc: Jun Nakajima <jun.nakajima@intel.com>
> Cc: Kevin Tian <kevin.tian@intel.com>
> Cc: George Dunlap <george.dunlap@eu.citrix.com>
> Cc: Razvan Cojocaru <rcojocaru@bitdefender.com>
> Cc: Tamas K Lengyel <tamas@tklengyel.com>
> Cc: Stefano Stabellini <sstabellini@kernel.org>
> Cc: Julien Grall <julien.grall@arm.com>
> ---
>  xen/arch/x86/hvm/emulate.c        | 47 +++++++++++++++++++++++++--------------
>  xen/arch/x86/hvm/hvm.c            |  2 +-
>  xen/arch/x86/hvm/vmx/vmx.c        |  1 +
>  xen/arch/x86/mm/p2m.c             |  7 ++++--
>  xen/arch/x86/vm_event.c           | 10 +++++++++
>  xen/common/vm_event.c             |  5 ++++-
>  xen/include/asm-arm/vm_event.h    |  6 +++++
>  xen/include/asm-x86/hvm/emulate.h |  6 +++--
>  xen/include/asm-x86/vm_event.h    |  4 +++-
>  xen/include/public/vm_event.h     | 11 +++++++--
>  10 files changed, 73 insertions(+), 26 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index c55ad7b..968fb7b 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -76,9 +76,12 @@ static int set_context_data(void *buffer, unsigned int size)
>      if ( curr->arch.vm_event )
>      {
>          unsigned int safe_size =
> -            min(size, curr->arch.vm_event->emul_read_data.size);
> +            min(size, curr->arch.vm_event->emul_data.size);
>  
> -        memcpy(buffer, curr->arch.vm_event->emul_read_data.data, safe_size);
> +        gdprintk(XENLOG_WARNING, "Got buffer of size: %u. Request is for %u. Safe size: %u\n",
> +                 curr->arch.vm_event->emul_data.size, size, safe_size);
> +
> +        memcpy(buffer, curr->arch.vm_event->emul_data.data, safe_size);
>          memset(buffer + safe_size, 0, size - safe_size);
>          return X86EMUL_OKAY;
>      }
> @@ -825,7 +828,7 @@ static int hvmemul_read(
>      struct hvm_emulate_ctxt *hvmemul_ctxt =
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>  
> -    if ( unlikely(hvmemul_ctxt->set_context) )
> +    if ( unlikely(hvmemul_ctxt->set_context_data) )
>          return set_context_data(p_data, bytes);
>  
>      return __hvmemul_read(
> @@ -1027,7 +1030,7 @@ static int hvmemul_cmpxchg(
>      struct hvm_emulate_ctxt *hvmemul_ctxt =
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>  
> -    if ( unlikely(hvmemul_ctxt->set_context) )
> +    if ( unlikely(hvmemul_ctxt->set_context_data) )
>      {
>          int rc = set_context_data(p_new, bytes);
>  
> @@ -1120,7 +1123,7 @@ static int hvmemul_rep_outs(
>      p2m_type_t p2mt;
>      int rc;
>  
> -    if ( unlikely(hvmemul_ctxt->set_context) )
> +    if ( unlikely(hvmemul_ctxt->set_context_data) )
>          return hvmemul_rep_outs_set_context(src_seg, src_offset, dst_port,
>                                              bytes_per_rep, reps, ctxt);
>  
> @@ -1262,7 +1265,7 @@ static int hvmemul_rep_movs(
>      if ( buf == NULL )
>          return X86EMUL_UNHANDLEABLE;
>  
> -    if ( unlikely(hvmemul_ctxt->set_context) )
> +    if ( unlikely(hvmemul_ctxt->set_context_data) )
>      {
>          rc = set_context_data(buf, bytes);
>  
> @@ -1460,7 +1463,7 @@ static int hvmemul_read_io(
>  
>      *val = 0;
>  
> -    if ( unlikely(hvmemul_ctxt->set_context) )
> +    if ( unlikely(hvmemul_ctxt->set_context_data) )
>          return set_context_data(val, bytes);
>  
>      return hvmemul_do_pio_buffer(port, bytes, IOREQ_READ, val);
> @@ -1783,7 +1786,14 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
>          pfec |= PFEC_user_mode;
>  
>      hvmemul_ctxt->insn_buf_eip = regs->eip;
> -    if ( !vio->mmio_insn_bytes )
> +
> +    if ( unlikely(hvmemul_ctxt->set_context_insn) )
> +    {
> +        memcpy(hvmemul_ctxt->insn_buf, curr->arch.vm_event->emul_data.data,
> +               curr->arch.vm_event->emul_data.size);
> +        hvmemul_ctxt->insn_buf_bytes = curr->arch.vm_event->emul_data.size;
> +    }

All these places where we're working with curr->arch.vm_event should
check that it's not NULL. Other than that, I like the concept.


Thanks,
Razvan
Stefano Stabellini Sept. 9, 2016, 11:11 p.m. UTC | #2
On Fri, 9 Sep 2016, Tamas K Lengyel wrote:
> When emulating instructions the emulator maintains a small i-cache fetched
> from the guest memory. Under certain scenarios this memory region may contain
> instructions that a monitor subscriber would prefer to hide, namely INT3, and
> instead would prefer to emulate a different instruction in-place.
> 
> This patch extends the vm_event interface to allow returning this i-cache via
> the vm_event response.
> 
> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
>
> diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
> index 8398af7..161d149 100644
> --- a/xen/common/vm_event.c
> +++ b/xen/common/vm_event.c
> @@ -407,8 +407,11 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
>              vm_event_register_write_resume(v, &rsp);
>              break;
>  
> +        case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
> +            vm_event_interrupt_emulate_check(v, &rsp);
> +            break;
> +
>  #ifdef CONFIG_HAS_MEM_ACCESS
> -        case VM_EVENT_REASON_MEM_ACCESS:
>              mem_access_resume(v, &rsp);
>              break;
>  #endif
> diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
> index ccc4b60..e56bc78 100644
> --- a/xen/include/asm-arm/vm_event.h
> +++ b/xen/include/asm-arm/vm_event.h
> @@ -40,6 +40,12 @@ static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
>  }
>  
>  static inline
> +void vm_event_interrupt_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
> +{
> +    /* Not supported on ARM. */
> +}
> +
> +static inline
>  void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
>  {
>      /* Not supported on ARM. */

Doesn't it make sense to return some sort of error?
Tamas Lengyel Sept. 9, 2016, 11:21 p.m. UTC | #3
On Fri, Sep 9, 2016 at 5:11 PM, Stefano Stabellini
<sstabellini@kernel.org> wrote:
> On Fri, 9 Sep 2016, Tamas K Lengyel wrote:
>> When emulating instructions the emulator maintains a small i-cache fetched
>> from the guest memory. Under certain scenarios this memory region may contain
>> instructions that a monitor subscriber would prefer to hide, namely INT3, and
>> instead would prefer to emulate a different instruction in-place.
>>
>> This patch extends the vm_event interface to allow returning this i-cache via
>> the vm_event response.
>>
>> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
>>
>> diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
>> index 8398af7..161d149 100644
>> --- a/xen/common/vm_event.c
>> +++ b/xen/common/vm_event.c
>> @@ -407,8 +407,11 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
>>              vm_event_register_write_resume(v, &rsp);
>>              break;
>>
>> +        case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
>> +            vm_event_interrupt_emulate_check(v, &rsp);
>> +            break;
>> +
>>  #ifdef CONFIG_HAS_MEM_ACCESS
>> -        case VM_EVENT_REASON_MEM_ACCESS:
>>              mem_access_resume(v, &rsp);
>>              break;
>>  #endif
>> diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
>> index ccc4b60..e56bc78 100644
>> --- a/xen/include/asm-arm/vm_event.h
>> +++ b/xen/include/asm-arm/vm_event.h
>> @@ -40,6 +40,12 @@ static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
>>  }
>>
>>  static inline
>> +void vm_event_interrupt_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
>> +{
>> +    /* Not supported on ARM. */
>> +}
>> +
>> +static inline
>>  void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
>>  {
>>      /* Not supported on ARM. */
>
> Doesn't it make sense to return some sort of error?

Not really, there is no path for that error to reach the user who
triggered this via the vm_event response. Usually if there is an error
in the operation specified by the response flag we just print a
message to the Xen console. But since these ops are not supported on
ARM at all - there is no emulator - it would be kind-of be redundant.

Tamas
Stefano Stabellini Sept. 10, 2016, 1:03 a.m. UTC | #4
On Fri, 9 Sep 2016, Tamas Lengyel wrote:
> On Fri, Sep 9, 2016 at 5:11 PM, Stefano Stabellini
> <sstabellini@kernel.org> wrote:
> > On Fri, 9 Sep 2016, Tamas K Lengyel wrote:
> >> When emulating instructions the emulator maintains a small i-cache fetched
> >> from the guest memory. Under certain scenarios this memory region may contain
> >> instructions that a monitor subscriber would prefer to hide, namely INT3, and
> >> instead would prefer to emulate a different instruction in-place.
> >>
> >> This patch extends the vm_event interface to allow returning this i-cache via
> >> the vm_event response.
> >>
> >> Signed-off-by: Tamas K Lengyel <tamas.lengyel@zentific.com>
> >>
> >> diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
> >> index 8398af7..161d149 100644
> >> --- a/xen/common/vm_event.c
> >> +++ b/xen/common/vm_event.c
> >> @@ -407,8 +407,11 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
> >>              vm_event_register_write_resume(v, &rsp);
> >>              break;
> >>
> >> +        case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
> >> +            vm_event_interrupt_emulate_check(v, &rsp);
> >> +            break;
> >> +
> >>  #ifdef CONFIG_HAS_MEM_ACCESS
> >> -        case VM_EVENT_REASON_MEM_ACCESS:
> >>              mem_access_resume(v, &rsp);
> >>              break;
> >>  #endif
> >> diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
> >> index ccc4b60..e56bc78 100644
> >> --- a/xen/include/asm-arm/vm_event.h
> >> +++ b/xen/include/asm-arm/vm_event.h
> >> @@ -40,6 +40,12 @@ static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
> >>  }
> >>
> >>  static inline
> >> +void vm_event_interrupt_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
> >> +{
> >> +    /* Not supported on ARM. */
> >> +}
> >> +
> >> +static inline
> >>  void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
> >>  {
> >>      /* Not supported on ARM. */
> >
> > Doesn't it make sense to return some sort of error?
> 
> Not really, there is no path for that error to reach the user who
> triggered this via the vm_event response. Usually if there is an error
> in the operation specified by the response flag we just print a
> message to the Xen console. But since these ops are not supported on
> ARM at all - there is no emulator - it would be kind-of be redundant.

All right then
George Dunlap Sept. 12, 2016, 2:16 p.m. UTC | #5
On 09/09/16 16:41, Tamas K Lengyel wrote:
> When emulating instructions the emulator maintains a small i-cache fetched
> from the guest memory. Under certain scenarios this memory region may contain
> instructions that a monitor subscriber would prefer to hide, namely INT3, and
> instead would prefer to emulate a different instruction in-place.
> 
> This patch extends the vm_event interface to allow returning this i-cache via
> the vm_event response.

So do you have a problem right now with stale caches (i.e., you modify
an INT3 back to something else in guest RAM but the emulator still
emulates the INT3)?  Or is the idea here that instead of doing the
replace-singlestep-replace loop, you just tell the emulator, "Here,
emulate this instead" (without removing the INT3 from guest memory at all)?

(Or am I completely missing the point here?)

 -George
Tamas Lengyel Sept. 12, 2016, 2:31 p.m. UTC | #6
On Sep 12, 2016 08:17, "George Dunlap" <george.dunlap@citrix.com> wrote:
>
> On 09/09/16 16:41, Tamas K Lengyel wrote:
> > When emulating instructions the emulator maintains a small i-cache
fetched
> > from the guest memory. Under certain scenarios this memory region may
contain
> > instructions that a monitor subscriber would prefer to hide, namely
INT3, and
> > instead would prefer to emulate a different instruction in-place.
> >
> > This patch extends the vm_event interface to allow returning this
i-cache via
> > the vm_event response.
>
> So do you have a problem right now with stale caches (i.e., you modify
> an INT3 back to something else in guest RAM but the emulator still
> emulates the INT3)?  Or is the idea here that instead of doing the
> replace-singlestep-replace loop, you just tell the emulator, "Here,
> emulate this instead" (without removing the INT3 from guest memory at
all)?
>
> (Or am I completely missing the point here?)
>

Hi George,
it's the latter! This would make tracing with int3s a bit more flexible on
multi-vcpu guests as there would be no racecondition. I use altp2m right
now to get around this problem but it's always good to have a backup in
case altp2m is disabled.

Tamas
George Dunlap Sept. 12, 2016, 2:34 p.m. UTC | #7
On 12/09/16 15:31, Tamas Lengyel wrote:
> On Sep 12, 2016 08:17, "George Dunlap" <george.dunlap@citrix.com> wrote:
>>
>> On 09/09/16 16:41, Tamas K Lengyel wrote:
>>> When emulating instructions the emulator maintains a small i-cache
> fetched
>>> from the guest memory. Under certain scenarios this memory region may
> contain
>>> instructions that a monitor subscriber would prefer to hide, namely
> INT3, and
>>> instead would prefer to emulate a different instruction in-place.
>>>
>>> This patch extends the vm_event interface to allow returning this
> i-cache via
>>> the vm_event response.
>>
>> So do you have a problem right now with stale caches (i.e., you modify
>> an INT3 back to something else in guest RAM but the emulator still
>> emulates the INT3)?  Or is the idea here that instead of doing the
>> replace-singlestep-replace loop, you just tell the emulator, "Here,
>> emulate this instead" (without removing the INT3 from guest memory at
> all)?
>>
>> (Or am I completely missing the point here?)
>>
> 
> Hi George,
> it's the latter! This would make tracing with int3s a bit more flexible on
> multi-vcpu guests as there would be no racecondition. I use altp2m right
> now to get around this problem but it's always good to have a backup in
> case altp2m is disabled.

OK -- in that case, it sounds like a good idea (particularly since
there's a race I hadn't considered).  :-)

 -George
Jan Beulich Sept. 12, 2016, 2:56 p.m. UTC | #8
>>> On 09.09.16 at 17:41, <tamas.lengyel@zentific.com> wrote:
> When emulating instructions the emulator maintains a small i-cache fetched
> from the guest memory. Under certain scenarios this memory region may contain
> instructions that a monitor subscriber would prefer to hide, namely INT3, and
> instead would prefer to emulate a different instruction in-place.
> 
> This patch extends the vm_event interface to allow returning this i-cache via
> the vm_event response.

Please try to explain better what this change is about, perhaps along
the lines of what George used as description, which you then
confirmed.

> @@ -1783,7 +1786,14 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
>          pfec |= PFEC_user_mode;
>  
>      hvmemul_ctxt->insn_buf_eip = regs->eip;
> -    if ( !vio->mmio_insn_bytes )
> +
> +    if ( unlikely(hvmemul_ctxt->set_context_insn) )
> +    {
> +        memcpy(hvmemul_ctxt->insn_buf, curr->arch.vm_event->emul_data.data,
> +               curr->arch.vm_event->emul_data.size);
> +        hvmemul_ctxt->insn_buf_bytes = curr->arch.vm_event->emul_data.size;
> +    }
> +    else if ( !vio->mmio_insn_bytes )

I'm not sure about this ordering: Do you really mean to allow an
external entity to override insn bytes e.g. in an MMIO retry, i.e.
allowing the retried insn to be different from the original one?

And additionally I think you need to deal with the case of the
supplied data not being a full insn. There shouldn't be any
fetching from guest memory in that case imo, emulation should
just fail.

> @@ -1927,17 +1937,19 @@ void hvm_mem_access_emulate_one(enum emul_kind kind, unsigned int trapnr,
>      struct hvm_emulate_ctxt ctx = {{ 0 }};
>      int rc;
>  
> +    gdprintk(XENLOG_WARNING, "memaccess emulate one called\n");
> +
>      hvm_emulate_prepare(&ctx, guest_cpu_user_regs());
>  
> -    switch ( kind )
> -    {
> -    case EMUL_KIND_NOWRITE:
> +    if ( kind == EMUL_KIND_NOWRITE )
>          rc = hvm_emulate_one_no_write(&ctx);
> -        break;
> -    case EMUL_KIND_SET_CONTEXT:
> -        ctx.set_context = 1;
> -        /* Intentional fall-through. */
> -    default:
> +    else
> +    {
> +         if ( kind == EMUL_KIND_SET_CONTEXT_DATA )
> +            ctx.set_context_data = 1;
> +         else if ( kind == EMUL_KIND_SET_CONTEXT_INSN )
> +            ctx.set_context_insn = 1;
> +
>          rc = hvm_emulate_one(&ctx);
>      }

Could you try to retain the switch() statement?

> --- a/xen/arch/x86/vm_event.c
> +++ b/xen/arch/x86/vm_event.c
> @@ -66,6 +66,16 @@ void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
>      hvm_toggle_singlestep(v);
>  }
>  
> +void vm_event_interrupt_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
> +{
> +    if ( !!(rsp->flags & VM_EVENT_FLAG_EMULATE) &&
> +         !!(rsp->flags & VM_EVENT_FLAG_SET_EMUL_INSN_DATA) )

Please avoid !! when not really needed.

> +    {
> +        v->arch.vm_event->emulate_flags = rsp->flags;
> +        v->arch.vm_event->emul_data = rsp->data.emul_data;
> +    }
> +}

Overall I'm having difficulty associating the function's name with
what it does.

> --- a/xen/common/vm_event.c
> +++ b/xen/common/vm_event.c
> @@ -407,8 +407,11 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
>              vm_event_register_write_resume(v, &rsp);
>              break;
>  
> +        case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
> +            vm_event_interrupt_emulate_check(v, &rsp);
> +            break;
> +
>  #ifdef CONFIG_HAS_MEM_ACCESS
> -        case VM_EVENT_REASON_MEM_ACCESS:
>              mem_access_resume(v, &rsp);
>              break;

I don't think you meant to remove that line?

Jan
George Dunlap Sept. 12, 2016, 2:59 p.m. UTC | #9
On 12/09/16 15:56, Jan Beulich wrote:
>>>> On 09.09.16 at 17:41, <tamas.lengyel@zentific.com> wrote:
>> When emulating instructions the emulator maintains a small i-cache fetched
>> from the guest memory. Under certain scenarios this memory region may contain
>> instructions that a monitor subscriber would prefer to hide, namely INT3, and
>> instead would prefer to emulate a different instruction in-place.
>>
>> This patch extends the vm_event interface to allow returning this i-cache via
>> the vm_event response.
> 
> Please try to explain better what this change is about, perhaps along
> the lines of what George used as description, which you then
> confirmed.
> 
>> @@ -1783,7 +1786,14 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
>>          pfec |= PFEC_user_mode;
>>  
>>      hvmemul_ctxt->insn_buf_eip = regs->eip;
>> -    if ( !vio->mmio_insn_bytes )
>> +
>> +    if ( unlikely(hvmemul_ctxt->set_context_insn) )
>> +    {
>> +        memcpy(hvmemul_ctxt->insn_buf, curr->arch.vm_event->emul_data.data,
>> +               curr->arch.vm_event->emul_data.size);
>> +        hvmemul_ctxt->insn_buf_bytes = curr->arch.vm_event->emul_data.size;
>> +    }
>> +    else if ( !vio->mmio_insn_bytes )
> 
> I'm not sure about this ordering: Do you really mean to allow an
> external entity to override insn bytes e.g. in an MMIO retry, i.e.
> allowing the retried insn to be different from the original one?
> 
> And additionally I think you need to deal with the case of the
> supplied data not being a full insn. There shouldn't be any
> fetching from guest memory in that case imo, emulation should
> just fail.

This patch was marked "RFC", which to me means, "This isn't ready for a
full review, but what do you think of the basic idea"?

I would wait until Tamas submits a non-RFC patch before doing a detailed
review, as it's quite possible this was just a prototype he didn't want
to bother fixing up until he knew it would be accepted.

 -George
Tamas Lengyel Sept. 12, 2016, 3:04 p.m. UTC | #10
On Sep 12, 2016 08:59, "George Dunlap" <george.dunlap@citrix.com> wrote:
>
> On 12/09/16 15:56, Jan Beulich wrote:
> >>>> On 09.09.16 at 17:41, <tamas.lengyel@zentific.com> wrote:
> >> When emulating instructions the emulator maintains a small i-cache
fetched
> >> from the guest memory. Under certain scenarios this memory region may
contain
> >> instructions that a monitor subscriber would prefer to hide, namely
INT3, and
> >> instead would prefer to emulate a different instruction in-place.
> >>
> >> This patch extends the vm_event interface to allow returning this
i-cache via
> >> the vm_event response.
> >
> > Please try to explain better what this change is about, perhaps along
> > the lines of what George used as description, which you then
> > confirmed.
> >
> >> @@ -1783,7 +1786,14 @@ static int _hvm_emulate_one(struct
hvm_emulate_ctxt *hvmemul_ctxt,
> >>          pfec |= PFEC_user_mode;
> >>
> >>      hvmemul_ctxt->insn_buf_eip = regs->eip;
> >> -    if ( !vio->mmio_insn_bytes )
> >> +
> >> +    if ( unlikely(hvmemul_ctxt->set_context_insn) )
> >> +    {
> >> +        memcpy(hvmemul_ctxt->insn_buf,
curr->arch.vm_event->emul_data.data,
> >> +               curr->arch.vm_event->emul_data.size);
> >> +        hvmemul_ctxt->insn_buf_bytes =
curr->arch.vm_event->emul_data.size;
> >> +    }
> >> +    else if ( !vio->mmio_insn_bytes )
> >
> > I'm not sure about this ordering: Do you really mean to allow an
> > external entity to override insn bytes e.g. in an MMIO retry, i.e.
> > allowing the retried insn to be different from the original one?
> >
> > And additionally I think you need to deal with the case of the
> > supplied data not being a full insn. There shouldn't be any
> > fetching from guest memory in that case imo, emulation should
> > just fail.
>
> This patch was marked "RFC", which to me means, "This isn't ready for a
> full review, but what do you think of the basic idea"?
>
> I would wait until Tamas submits a non-RFC patch before doing a detailed
> review, as it's quite possible this was just a prototype he didn't want
> to bother fixing up until he knew it would be accepted.
>
>  -George

Or that it even makes sense for others too :) I still appreciate the review
though.

Tamas
Tamas Lengyel Sept. 12, 2016, 3:48 p.m. UTC | #11
On Mon, Sep 12, 2016 at 8:56 AM, Jan Beulich <JBeulich@suse.com> wrote:

> >>> On 09.09.16 at 17:41, <tamas.lengyel@zentific.com> wrote:
> > When emulating instructions the emulator maintains a small i-cache
> fetched
> > from the guest memory. Under certain scenarios this memory region may
> contain
> > instructions that a monitor subscriber would prefer to hide, namely
> INT3, and
> > instead would prefer to emulate a different instruction in-place.
> >
> > This patch extends the vm_event interface to allow returning this
> i-cache via
> > the vm_event response.
>
> Please try to explain better what this change is about, perhaps along
> the lines of what George used as description, which you then
> confirmed.
>

Sure.


>
> > @@ -1783,7 +1786,14 @@ static int _hvm_emulate_one(struct
> hvm_emulate_ctxt *hvmemul_ctxt,
> >          pfec |= PFEC_user_mode;
> >
> >      hvmemul_ctxt->insn_buf_eip = regs->eip;
> > -    if ( !vio->mmio_insn_bytes )
> > +
> > +    if ( unlikely(hvmemul_ctxt->set_context_insn) )
> > +    {
> > +        memcpy(hvmemul_ctxt->insn_buf, curr->arch.vm_event->emul_
> data.data,
> > +               curr->arch.vm_event->emul_data.size);
> > +        hvmemul_ctxt->insn_buf_bytes = curr->arch.vm_event->emul_
> data.size;
> > +    }
> > +    else if ( !vio->mmio_insn_bytes )
>
> I'm not sure about this ordering: Do you really mean to allow an
> external entity to override insn bytes e.g. in an MMIO retry, i.e.
> allowing the retried insn to be different from the original one?
>

I'm not sure but there shouldn't be INT3's coming from MMIO, right? So we
would not have a response-opportunity to override it.


>
> And additionally I think you need to deal with the case of the
> supplied data not being a full insn. There shouldn't be any
> fetching from guest memory in that case imo, emulation should
> just fail.
>

So the idea was to allow partial "masking" of the i-cache. For example, if
all I want to hide is the 0xCC then it's enough to return 1 byte in
vm_event, the rest can be (and already is) grabbed from  memory.


> > @@ -1927,17 +1937,19 @@ void hvm_mem_access_emulate_one(enum emul_kind
> kind, unsigned int trapnr,
> >      struct hvm_emulate_ctxt ctx = {{ 0 }};
> >      int rc;
> >
> > +    gdprintk(XENLOG_WARNING, "memaccess emulate one called\n");
> > +
> >      hvm_emulate_prepare(&ctx, guest_cpu_user_regs());
> >
> > -    switch ( kind )
> > -    {
> > -    case EMUL_KIND_NOWRITE:
> > +    if ( kind == EMUL_KIND_NOWRITE )
> >          rc = hvm_emulate_one_no_write(&ctx);
> > -        break;
> > -    case EMUL_KIND_SET_CONTEXT:
> > -        ctx.set_context = 1;
> > -        /* Intentional fall-through. */
> > -    default:
> > +    else
> > +    {
> > +         if ( kind == EMUL_KIND_SET_CONTEXT_DATA )
> > +            ctx.set_context_data = 1;
> > +         else if ( kind == EMUL_KIND_SET_CONTEXT_INSN )
> > +            ctx.set_context_insn = 1;
> > +
> >          rc = hvm_emulate_one(&ctx);
> >      }
>
> Could you try to retain the switch() statement?
>

Sure.


>
> > --- a/xen/arch/x86/vm_event.c
> > +++ b/xen/arch/x86/vm_event.c
> > @@ -66,6 +66,16 @@ void vm_event_toggle_singlestep(struct domain *d,
> struct vcpu *v)
> >      hvm_toggle_singlestep(v);
> >  }
> >
> > +void vm_event_interrupt_emulate_check(struct vcpu *v,
> vm_event_response_t *rsp)
> > +{
> > +    if ( !!(rsp->flags & VM_EVENT_FLAG_EMULATE) &&
> > +         !!(rsp->flags & VM_EVENT_FLAG_SET_EMUL_INSN_DATA) )
>
> Please avoid !! when not really needed.
>

Ack.


>
> > +    {
> > +        v->arch.vm_event->emulate_flags = rsp->flags;
> > +        v->arch.vm_event->emul_data = rsp->data.emul_data;
> > +    }
> > +}
>
> Overall I'm having difficulty associating the function's name with
> what it does.
>

Yea, I'm not too happy with it either. Will move some things around.


>
> > --- a/xen/common/vm_event.c
> > +++ b/xen/common/vm_event.c
> > @@ -407,8 +407,11 @@ void vm_event_resume(struct domain *d, struct
> vm_event_domain *ved)
> >              vm_event_register_write_resume(v, &rsp);
> >              break;
> >
> > +        case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
> > +            vm_event_interrupt_emulate_check(v, &rsp);
> > +            break;
> > +
> >  #ifdef CONFIG_HAS_MEM_ACCESS
> > -        case VM_EVENT_REASON_MEM_ACCESS:
> >              mem_access_resume(v, &rsp);
> >              break;
>
> I don't think you meant to remove that line?
>

Oops.


>
> Jan
>
>
Thanks,
Tamas
Jan Beulich Sept. 12, 2016, 4:02 p.m. UTC | #12
>>> On 12.09.16 at 17:48, <tamas.lengyel@zentific.com> wrote:
> On Mon, Sep 12, 2016 at 8:56 AM, Jan Beulich <JBeulich@suse.com> wrote:
>> >>> On 09.09.16 at 17:41, <tamas.lengyel@zentific.com> wrote:
>> > @@ -1783,7 +1786,14 @@ static int _hvm_emulate_one(struct
>> hvm_emulate_ctxt *hvmemul_ctxt,
>> >          pfec |= PFEC_user_mode;
>> >
>> >      hvmemul_ctxt->insn_buf_eip = regs->eip;
>> > -    if ( !vio->mmio_insn_bytes )
>> > +
>> > +    if ( unlikely(hvmemul_ctxt->set_context_insn) )
>> > +    {
>> > +        memcpy(hvmemul_ctxt->insn_buf, curr->arch.vm_event->emul_
>> data.data,
>> > +               curr->arch.vm_event->emul_data.size);
>> > +        hvmemul_ctxt->insn_buf_bytes = curr->arch.vm_event->emul_
>> data.size;
>> > +    }
>> > +    else if ( !vio->mmio_insn_bytes )
>>
>> I'm not sure about this ordering: Do you really mean to allow an
>> external entity to override insn bytes e.g. in an MMIO retry, i.e.
>> allowing the retried insn to be different from the original one?
>>
> 
> I'm not sure but there shouldn't be INT3's coming from MMIO, right? So we
> would not have a response-opportunity to override it.

Can't you modify insns at any time from the monitoring app? If
so, wouldn't there be a chance for things to change between
the original insn invocation and the retry (after qemu completed
the request to it)?

>> And additionally I think you need to deal with the case of the
>> supplied data not being a full insn. There shouldn't be any
>> fetching from guest memory in that case imo, emulation should
>> just fail.
>>
> 
> So the idea was to allow partial "masking" of the i-cache. For example, if
> all I want to hide is the 0xCC then it's enough to return 1 byte in
> vm_event, the rest can be (and already is) grabbed from  memory.

Oh, interesting. That makes it even more problematic, imo. Can you
ensure that your patching any whatever the guest may do to its own
insn stream actually remains consistent, even in situations like the
insn crossing a page boundary? IOW what you suggest seems pretty
fragile to me, but would in any case need spelling out very clearly.

Jan
Tamas Lengyel Sept. 12, 2016, 4:30 p.m. UTC | #13
On Mon, Sep 12, 2016 at 10:02 AM, Jan Beulich <JBeulich@suse.com> wrote:
>>>> On 12.09.16 at 17:48, <tamas.lengyel@zentific.com> wrote:
>> On Mon, Sep 12, 2016 at 8:56 AM, Jan Beulich <JBeulich@suse.com> wrote:
>>> >>> On 09.09.16 at 17:41, <tamas.lengyel@zentific.com> wrote:
>>> > @@ -1783,7 +1786,14 @@ static int _hvm_emulate_one(struct
>>> hvm_emulate_ctxt *hvmemul_ctxt,
>>> >          pfec |= PFEC_user_mode;
>>> >
>>> >      hvmemul_ctxt->insn_buf_eip = regs->eip;
>>> > -    if ( !vio->mmio_insn_bytes )
>>> > +
>>> > +    if ( unlikely(hvmemul_ctxt->set_context_insn) )
>>> > +    {
>>> > +        memcpy(hvmemul_ctxt->insn_buf, curr->arch.vm_event->emul_
>>> data.data,
>>> > +               curr->arch.vm_event->emul_data.size);
>>> > +        hvmemul_ctxt->insn_buf_bytes = curr->arch.vm_event->emul_
>>> data.size;
>>> > +    }
>>> > +    else if ( !vio->mmio_insn_bytes )
>>>
>>> I'm not sure about this ordering: Do you really mean to allow an
>>> external entity to override insn bytes e.g. in an MMIO retry, i.e.
>>> allowing the retried insn to be different from the original one?
>>>
>>
>> I'm not sure but there shouldn't be INT3's coming from MMIO, right? So we
>> would not have a response-opportunity to override it.
>
> Can't you modify insns at any time from the monitoring app? If
> so, wouldn't there be a chance for things to change between
> the original insn invocation and the retry (after qemu completed
> the request to it)?

No, right now it's only allowed in response to an INT3 event as that's
really the only case where I see this being needed. We could extend
it's scope so we can trigger it in response to any kind of vm_event,
but right now I don't see that being necessary.

>
>>> And additionally I think you need to deal with the case of the
>>> supplied data not being a full insn. There shouldn't be any
>>> fetching from guest memory in that case imo, emulation should
>>> just fail.
>>>
>>
>> So the idea was to allow partial "masking" of the i-cache. For example, if
>> all I want to hide is the 0xCC then it's enough to return 1 byte in
>> vm_event, the rest can be (and already is) grabbed from  memory.
>
> Oh, interesting. That makes it even more problematic, imo. Can you
> ensure that your patching any whatever the guest may do to its own
> insn stream actually remains consistent, even in situations like the
> insn crossing a page boundary? IOW what you suggest seems pretty
> fragile to me, but would in any case need spelling out very clearly.

Ah, good point. With LibVMI we already handle reading a buffer with VA
addressing even if the PA layout crosses page boundaries, so it's
going to be simpler and safer to have the client return a full buffer
as you suggested.

Thanks,
Tamas
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index c55ad7b..968fb7b 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -76,9 +76,12 @@  static int set_context_data(void *buffer, unsigned int size)
     if ( curr->arch.vm_event )
     {
         unsigned int safe_size =
-            min(size, curr->arch.vm_event->emul_read_data.size);
+            min(size, curr->arch.vm_event->emul_data.size);
 
-        memcpy(buffer, curr->arch.vm_event->emul_read_data.data, safe_size);
+        gdprintk(XENLOG_WARNING, "Got buffer of size: %u. Request is for %u. Safe size: %u\n",
+                 curr->arch.vm_event->emul_data.size, size, safe_size);
+
+        memcpy(buffer, curr->arch.vm_event->emul_data.data, safe_size);
         memset(buffer + safe_size, 0, size - safe_size);
         return X86EMUL_OKAY;
     }
@@ -825,7 +828,7 @@  static int hvmemul_read(
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
 
-    if ( unlikely(hvmemul_ctxt->set_context) )
+    if ( unlikely(hvmemul_ctxt->set_context_data) )
         return set_context_data(p_data, bytes);
 
     return __hvmemul_read(
@@ -1027,7 +1030,7 @@  static int hvmemul_cmpxchg(
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
 
-    if ( unlikely(hvmemul_ctxt->set_context) )
+    if ( unlikely(hvmemul_ctxt->set_context_data) )
     {
         int rc = set_context_data(p_new, bytes);
 
@@ -1120,7 +1123,7 @@  static int hvmemul_rep_outs(
     p2m_type_t p2mt;
     int rc;
 
-    if ( unlikely(hvmemul_ctxt->set_context) )
+    if ( unlikely(hvmemul_ctxt->set_context_data) )
         return hvmemul_rep_outs_set_context(src_seg, src_offset, dst_port,
                                             bytes_per_rep, reps, ctxt);
 
@@ -1262,7 +1265,7 @@  static int hvmemul_rep_movs(
     if ( buf == NULL )
         return X86EMUL_UNHANDLEABLE;
 
-    if ( unlikely(hvmemul_ctxt->set_context) )
+    if ( unlikely(hvmemul_ctxt->set_context_data) )
     {
         rc = set_context_data(buf, bytes);
 
@@ -1460,7 +1463,7 @@  static int hvmemul_read_io(
 
     *val = 0;
 
-    if ( unlikely(hvmemul_ctxt->set_context) )
+    if ( unlikely(hvmemul_ctxt->set_context_data) )
         return set_context_data(val, bytes);
 
     return hvmemul_do_pio_buffer(port, bytes, IOREQ_READ, val);
@@ -1783,7 +1786,14 @@  static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
         pfec |= PFEC_user_mode;
 
     hvmemul_ctxt->insn_buf_eip = regs->eip;
-    if ( !vio->mmio_insn_bytes )
+
+    if ( unlikely(hvmemul_ctxt->set_context_insn) )
+    {
+        memcpy(hvmemul_ctxt->insn_buf, curr->arch.vm_event->emul_data.data,
+               curr->arch.vm_event->emul_data.size);
+        hvmemul_ctxt->insn_buf_bytes = curr->arch.vm_event->emul_data.size;
+    }
+    else if ( !vio->mmio_insn_bytes )
     {
         hvmemul_ctxt->insn_buf_bytes =
             hvm_get_insn_bytes(curr, hvmemul_ctxt->insn_buf) ?:
@@ -1927,17 +1937,19 @@  void hvm_mem_access_emulate_one(enum emul_kind kind, unsigned int trapnr,
     struct hvm_emulate_ctxt ctx = {{ 0 }};
     int rc;
 
+    gdprintk(XENLOG_WARNING, "memaccess emulate one called\n");
+
     hvm_emulate_prepare(&ctx, guest_cpu_user_regs());
 
-    switch ( kind )
-    {
-    case EMUL_KIND_NOWRITE:
+    if ( kind == EMUL_KIND_NOWRITE )
         rc = hvm_emulate_one_no_write(&ctx);
-        break;
-    case EMUL_KIND_SET_CONTEXT:
-        ctx.set_context = 1;
-        /* Intentional fall-through. */
-    default:
+    else
+    {
+         if ( kind == EMUL_KIND_SET_CONTEXT_DATA )
+            ctx.set_context_data = 1;
+         else if ( kind == EMUL_KIND_SET_CONTEXT_INSN )
+            ctx.set_context_insn = 1;
+
         rc = hvm_emulate_one(&ctx);
     }
 
@@ -1973,7 +1985,8 @@  void hvm_emulate_prepare(
     hvmemul_ctxt->ctxt.force_writeback = 1;
     hvmemul_ctxt->seg_reg_accessed = 0;
     hvmemul_ctxt->seg_reg_dirty = 0;
-    hvmemul_ctxt->set_context = 0;
+    hvmemul_ctxt->set_context_data = 0;
+    hvmemul_ctxt->set_context_insn = 0;
     hvmemul_get_seg_reg(x86_seg_cs, hvmemul_ctxt);
     hvmemul_get_seg_reg(x86_seg_ss, hvmemul_ctxt);
 }
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 2c89984..fb11f3b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -481,7 +481,7 @@  void hvm_do_resume(struct vcpu *v)
 
             if ( v->arch.vm_event->emulate_flags &
                  VM_EVENT_FLAG_SET_EMUL_READ_DATA )
-                kind = EMUL_KIND_SET_CONTEXT;
+                kind = EMUL_KIND_SET_CONTEXT_DATA;
             else if ( v->arch.vm_event->emulate_flags &
                       VM_EVENT_FLAG_EMULATE_NOWRITE )
                 kind = EMUL_KIND_NOWRITE;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index bb7a329..41dc678 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -57,6 +57,7 @@ 
 #include <asm/altp2m.h>
 #include <asm/event.h>
 #include <asm/monitor.h>
+#include <asm/vm_event.h>
 #include <public/arch-x86/cpuid.h>
 
 static bool_t __initdata opt_force_ept;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 812dbf6..dfed0b3 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1641,8 +1641,11 @@  void p2m_mem_access_emulate_check(struct vcpu *v,
 
         v->arch.vm_event->emulate_flags = violation ? rsp->flags : 0;
 
-        if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) )
-            v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
+        if ( !!(rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) )
+        {
+            gdprintk(XENLOG_WARNING, "Mem access emulate check setting emul read data buffer\n");
+            v->arch.vm_event->emul_data = rsp->data.emul_data;
+        }
     }
 }
 
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index e938ca3..4a9327f 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -66,6 +66,16 @@  void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
     hvm_toggle_singlestep(v);
 }
 
+void vm_event_interrupt_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
+{
+    if ( !!(rsp->flags & VM_EVENT_FLAG_EMULATE) &&
+         !!(rsp->flags & VM_EVENT_FLAG_SET_EMUL_INSN_DATA) )
+    {
+        v->arch.vm_event->emulate_flags = rsp->flags;
+        v->arch.vm_event->emul_data = rsp->data.emul_data;
+    }
+}
+
 void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
 {
     if ( rsp->flags & VM_EVENT_FLAG_DENY )
diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
index 8398af7..161d149 100644
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -407,8 +407,11 @@  void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
             vm_event_register_write_resume(v, &rsp);
             break;
 
+        case VM_EVENT_REASON_SOFTWARE_BREAKPOINT:
+            vm_event_interrupt_emulate_check(v, &rsp);
+            break;
+
 #ifdef CONFIG_HAS_MEM_ACCESS
-        case VM_EVENT_REASON_MEM_ACCESS:
             mem_access_resume(v, &rsp);
             break;
 #endif
diff --git a/xen/include/asm-arm/vm_event.h b/xen/include/asm-arm/vm_event.h
index ccc4b60..e56bc78 100644
--- a/xen/include/asm-arm/vm_event.h
+++ b/xen/include/asm-arm/vm_event.h
@@ -40,6 +40,12 @@  static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
 }
 
 static inline
+void vm_event_interrupt_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
+{
+    /* Not supported on ARM. */
+}
+
+static inline
 void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
 {
     /* Not supported on ARM. */
diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h
index 142d1b6..8e69371 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -33,13 +33,15 @@  struct hvm_emulate_ctxt {
 
     uint32_t intr_shadow;
 
-    bool_t set_context;
+    bool_t set_context_data;
+    bool_t set_context_insn;
 };
 
 enum emul_kind {
     EMUL_KIND_NORMAL,
     EMUL_KIND_NOWRITE,
-    EMUL_KIND_SET_CONTEXT
+    EMUL_KIND_SET_CONTEXT_DATA,
+    EMUL_KIND_SET_CONTEXT_INSN
 };
 
 int hvm_emulate_one(
diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h
index 7e6adff..2e3f30b 100644
--- a/xen/include/asm-x86/vm_event.h
+++ b/xen/include/asm-x86/vm_event.h
@@ -27,7 +27,7 @@ 
  */
 struct arch_vm_event {
     uint32_t emulate_flags;
-    struct vm_event_emul_read_data emul_read_data;
+    struct vm_event_emul_data emul_data;
     struct monitor_write_data write_data;
 };
 
@@ -37,6 +37,8 @@  void vm_event_cleanup_domain(struct domain *d);
 
 void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v);
 
+void vm_event_interrupt_emulate_check(struct vcpu *v, vm_event_response_t *rsp);
+
 void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp);
 
 void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
diff --git a/xen/include/public/vm_event.h b/xen/include/public/vm_event.h
index 99d60ea..b83b57d 100644
--- a/xen/include/public/vm_event.h
+++ b/xen/include/public/vm_event.h
@@ -97,6 +97,13 @@ 
  * Requires the vCPU to be paused already (synchronous events only).
  */
 #define VM_EVENT_FLAG_SET_REGISTERS      (1 << 8)
+/*
+ * Instruction cache is being sent back to the hypervisor in the event response
+ * to be used by the emulator. This flag is only useful when combined with
+ * VM_EVENT_FLAG_EMULATE and is incompatible with also setting
+ * VM_EVENT_FLAG_EMULATE_NOWRITE or VM_EVENT_FLAG_SET_EMUL_READ_DATA.
+ */
+#define VM_EVENT_FLAG_SET_EMUL_INSN_DATA (1 << 9)
 
 /*
  * Reasons for the vm event request
@@ -246,7 +253,7 @@  struct vm_event_sharing {
     uint32_t _pad;
 };
 
-struct vm_event_emul_read_data {
+struct vm_event_emul_data {
     uint32_t size;
     /* The struct is used in a union with vm_event_regs_x86. */
     uint8_t  data[sizeof(struct vm_event_regs_x86) - sizeof(uint32_t)];
@@ -277,7 +284,7 @@  typedef struct vm_event_st {
             struct vm_event_regs_x86 x86;
         } regs;
 
-        struct vm_event_emul_read_data emul_read_data;
+        struct vm_event_emul_data emul_data;
     } data;
 } vm_event_request_t, vm_event_response_t;