diff mbox

[4/7] vm-event/x86: use vm_event_vcpu_enter properly

Message ID 1466086187-7607-1-git-send-email-czuzu@bitdefender.com (mailing list archive)
State New, archived
Headers show

Commit Message

Corneliu ZUZU June 16, 2016, 2:09 p.m. UTC
After introducing vm_event_vcpu_enter, it makes sense to move the following
code there:
- handling of monitor_write_data from hvm_do_resume
- enabling/disabling CPU_BASED_CR3_LOAD_EXITING from vmx_update_guest_cr(v, 0)

Signed-off-by: Corneliu ZUZU <czuzu@bitdefender.com>
---
 xen/arch/x86/hvm/hvm.c         |  62 +++++--------------------
 xen/arch/x86/hvm/vmx/vmx.c     |  12 ++---
 xen/arch/x86/monitor.c         |   9 ----
 xen/arch/x86/vm_event.c        | 102 +++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/vm_event.h |   5 +-
 5 files changed, 119 insertions(+), 71 deletions(-)

Comments

Jan Beulich June 16, 2016, 3 p.m. UTC | #1
>>> On 16.06.16 at 16:09, <czuzu@bitdefender.com> wrote:
> @@ -2199,7 +2153,9 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
>  
>          if ( hvm_event_crX(CR0, value, old_value) )
>          {
> -            /* The actual write will occur in hvm_do_resume(), if permitted. */
> +            /* The actual write will occur in vcpu_enter_write_data(), if
> +             * permitted.
> +             */

Coding style.

> @@ -1432,18 +1430,16 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
>          if ( paging_mode_hap(v->domain) )
>          {
>              /* Manage GUEST_CR3 when CR0.PE=0. */
> +            uint32_t old_ctls = v->arch.hvm_vmx.exec_control;
>              uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
>                                   CPU_BASED_CR3_STORE_EXITING);
> +
>              v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
>              if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
>                  v->arch.hvm_vmx.exec_control |= cr3_ctls;
>  
> -            /* Trap CR3 updates if CR3 memory events are enabled. */
> -            if ( v->domain->arch.monitor.write_ctrlreg_enabled &
> -                 monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) )
> -                v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
> -
> -            vmx_update_cpu_exec_control(v);
> +            if ( old_ctls != v->arch.hvm_vmx.exec_control )
> +                vmx_update_cpu_exec_control(v);
>          }

How does this match up with the rest of this patch?

> @@ -179,6 +182,105 @@ void vm_event_fill_regs(vm_event_request_t *req)
>      req->data.regs.x86.cs_arbytes = seg.attr.bytes;
>  }
>  
> +static inline void vcpu_enter_write_data(struct vcpu *v)

Please allow the compiler to decide whether to inline such larger
functions.

> +void arch_vm_event_vcpu_enter(struct vcpu *v)
> +{
> +    /* vmx only */
> +    ASSERT( cpu_has_vmx );

Stray blanks.

Jan
Tamas K Lengyel June 16, 2016, 4:27 p.m. UTC | #2
> diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
> index 1fec412..1e5445f 100644
> --- a/xen/arch/x86/monitor.c
> +++ b/xen/arch/x86/monitor.c
> @@ -20,7 +20,6 @@
>   */
>
>  #include <asm/monitor.h>
> -#include <public/vm_event.h>
>
>  int arch_monitor_domctl_event(struct domain *d,
>                                struct xen_domctl_monitor_op *mop)
> @@ -62,14 +61,6 @@ int arch_monitor_domctl_event(struct domain *d,
>          else
>              ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;
>
> -        if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
> -        {
> -            struct vcpu *v;
> -            /* Latches new CR3 mask through CR0 code. */
> -            for_each_vcpu ( d, v )
> -                hvm_update_guest_cr(v, 0);
> -        }
> -

So this block is not really getting relocated as the commit message
suggests as much as being completely reworked at a different location?
It would be better for it to be it's own separate patch as the changes
are not trivial.

>          domain_unpause(d);
>
>          break;

Thanks,
Tamas
Corneliu ZUZU June 16, 2016, 8:20 p.m. UTC | #3
On 6/16/2016 6:00 PM, Jan Beulich wrote:
>>>> On 16.06.16 at 16:09, <czuzu@bitdefender.com> wrote:
>> @@ -2199,7 +2153,9 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
>>   
>>           if ( hvm_event_crX(CR0, value, old_value) )
>>           {
>> -            /* The actual write will occur in hvm_do_resume(), if permitted. */
>> +            /* The actual write will occur in vcpu_enter_write_data(), if
>> +             * permitted.
>> +             */
> Coding style.

Ack.

>
>> @@ -1432,18 +1430,16 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
>>           if ( paging_mode_hap(v->domain) )
>>           {
>>               /* Manage GUEST_CR3 when CR0.PE=0. */
>> +            uint32_t old_ctls = v->arch.hvm_vmx.exec_control;
>>               uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
>>                                    CPU_BASED_CR3_STORE_EXITING);
>> +
>>               v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
>>               if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
>>                   v->arch.hvm_vmx.exec_control |= cr3_ctls;
>>   
>> -            /* Trap CR3 updates if CR3 memory events are enabled. */
>> -            if ( v->domain->arch.monitor.write_ctrlreg_enabled &
>> -                 monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) )
>> -                v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
>> -
>> -            vmx_update_cpu_exec_control(v);
>> +            if ( old_ctls != v->arch.hvm_vmx.exec_control )
>> +                vmx_update_cpu_exec_control(v);
>>           }
> How does this match up with the rest of this patch?

And by 'this' you mean slightly optimizing this sequence by adding in 
old_ctls?
It seems pretty straight-forward to me, I figured if I am to move the 
monitor.write_ctrlreg_enabled part from here
it wouldn't be much of a stretch to also do this little 
optimization...what would have been appropriate?
To do this in a separate patch? To mention it in the commit message?

>
>> @@ -179,6 +182,105 @@ void vm_event_fill_regs(vm_event_request_t *req)
>>       req->data.regs.x86.cs_arbytes = seg.attr.bytes;
>>   }
>>   
>> +static inline void vcpu_enter_write_data(struct vcpu *v)
> Please allow the compiler to decide whether to inline such larger
> functions.

Ack.

>> +void arch_vm_event_vcpu_enter(struct vcpu *v)
>> +{
>> +    /* vmx only */
>> +    ASSERT( cpu_has_vmx );
> Stray blanks.
>
> Jan
>
>

Ack.

Corneliu.
Jan Beulich June 17, 2016, 7:20 a.m. UTC | #4
>>> On 16.06.16 at 22:20, <czuzu@bitdefender.com> wrote:
> On 6/16/2016 6:00 PM, Jan Beulich wrote:
>>>>> On 16.06.16 at 16:09, <czuzu@bitdefender.com> wrote:
>>> @@ -1432,18 +1430,16 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
>>>           if ( paging_mode_hap(v->domain) )
>>>           {
>>>               /* Manage GUEST_CR3 when CR0.PE=0. */
>>> +            uint32_t old_ctls = v->arch.hvm_vmx.exec_control;
>>>               uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
>>>                                    CPU_BASED_CR3_STORE_EXITING);
>>> +
>>>               v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
>>>               if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
>>>                   v->arch.hvm_vmx.exec_control |= cr3_ctls;
>>>   
>>> -            /* Trap CR3 updates if CR3 memory events are enabled. */
>>> -            if ( v->domain->arch.monitor.write_ctrlreg_enabled &
>>> -                 monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) )
>>> -                v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
>>> -
>>> -            vmx_update_cpu_exec_control(v);
>>> +            if ( old_ctls != v->arch.hvm_vmx.exec_control )
>>> +                vmx_update_cpu_exec_control(v);
>>>           }
>> How does this match up with the rest of this patch?
> 
> And by 'this' you mean slightly optimizing this sequence by adding in 
> old_ctls?
> It seems pretty straight-forward to me, I figured if I am to move the 
> monitor.write_ctrlreg_enabled part from here
> it wouldn't be much of a stretch to also do this little 
> optimization...what would have been appropriate?
> To do this in a separate patch? To mention it in the commit message?

At least the latter, and perhaps better the former. Without even
mentioning it the readers (reviewers) have to guess whether this
is an integral part of the change, or - as you now confirm - just a
minor optimization done along the road.

Jan
Corneliu ZUZU June 17, 2016, 9:24 a.m. UTC | #5
On 6/16/2016 7:27 PM, Tamas K Lengyel wrote:
>> diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
>> index 1fec412..1e5445f 100644
>> --- a/xen/arch/x86/monitor.c
>> +++ b/xen/arch/x86/monitor.c
>> @@ -20,7 +20,6 @@
>>    */
>>
>>   #include <asm/monitor.h>
>> -#include <public/vm_event.h>
>>
>>   int arch_monitor_domctl_event(struct domain *d,
>>                                 struct xen_domctl_monitor_op *mop)
>> @@ -62,14 +61,6 @@ int arch_monitor_domctl_event(struct domain *d,
>>           else
>>               ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;
>>
>> -        if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
>> -        {
>> -            struct vcpu *v;
>> -            /* Latches new CR3 mask through CR0 code. */
>> -            for_each_vcpu ( d, v )
>> -                hvm_update_guest_cr(v, 0);
>> -        }
>> -
> So this block is not really getting relocated as the commit message
> suggests as much as being completely reworked at a different location?
> It would be better for it to be it's own separate patch as the changes
> are not trivial.

That's actually not reworked, it's completely removed since there's no 
need for it anymore.
That is: "latching of CR3 mask" is not done "through CR0" anymore but 
rather through the vm_event_vcpu_enter function instead and you don't 
have to do anything more here in arch_monitor_domctl_event for that to 
happen.

>>           domain_unpause(d);
>>
>>           break;
> Thanks,
> Tamas
>
Corneliu ZUZU June 17, 2016, 11:23 a.m. UTC | #6
On 6/17/2016 10:20 AM, Jan Beulich wrote:
>>>> On 16.06.16 at 22:20, <czuzu@bitdefender.com> wrote:
>> On 6/16/2016 6:00 PM, Jan Beulich wrote:
>>>>>> On 16.06.16 at 16:09, <czuzu@bitdefender.com> wrote:
>>>> @@ -1432,18 +1430,16 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
>>>>            if ( paging_mode_hap(v->domain) )
>>>>            {
>>>>                /* Manage GUEST_CR3 when CR0.PE=0. */
>>>> +            uint32_t old_ctls = v->arch.hvm_vmx.exec_control;
>>>>                uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
>>>>                                     CPU_BASED_CR3_STORE_EXITING);
>>>> +
>>>>                v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
>>>>                if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
>>>>                    v->arch.hvm_vmx.exec_control |= cr3_ctls;
>>>>    
>>>> -            /* Trap CR3 updates if CR3 memory events are enabled. */
>>>> -            if ( v->domain->arch.monitor.write_ctrlreg_enabled &
>>>> -                 monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) )
>>>> -                v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
>>>> -
>>>> -            vmx_update_cpu_exec_control(v);
>>>> +            if ( old_ctls != v->arch.hvm_vmx.exec_control )
>>>> +                vmx_update_cpu_exec_control(v);
>>>>            }
>>> How does this match up with the rest of this patch?
>> And by 'this' you mean slightly optimizing this sequence by adding in
>> old_ctls?
>> It seems pretty straight-forward to me, I figured if I am to move the
>> monitor.write_ctrlreg_enabled part from here
>> it wouldn't be much of a stretch to also do this little
>> optimization...what would have been appropriate?
>> To do this in a separate patch? To mention it in the commit message?
> At least the latter, and perhaps better the former. Without even
> mentioning it the readers (reviewers) have to guess whether this
> is an integral part of the change, or - as you now confirm - just a
> minor optimization done along the road.
>
> Jan

Ack, will split in separate patch in v2.
You're right, I've got to be more attentive to always separate unrelated 
code changes, however minor they are :)

Thanks,
Corneliu.
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 770bb50..2f48846 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -462,52 +462,6 @@  void hvm_do_resume(struct vcpu *v)
     if ( !handle_hvm_io_completion(v) )
         return;
 
-    if ( unlikely(v->arch.vm_event) )
-    {
-        struct monitor_write_data *w = &v->arch.vm_event->write_data;
-
-        if ( v->arch.vm_event->emulate_flags )
-        {
-            enum emul_kind kind = EMUL_KIND_NORMAL;
-
-            if ( v->arch.vm_event->emulate_flags &
-                 VM_EVENT_FLAG_SET_EMUL_READ_DATA )
-                kind = EMUL_KIND_SET_CONTEXT;
-            else if ( v->arch.vm_event->emulate_flags &
-                      VM_EVENT_FLAG_EMULATE_NOWRITE )
-                kind = EMUL_KIND_NOWRITE;
-
-            hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
-                                       HVM_DELIVER_NO_ERROR_CODE);
-
-            v->arch.vm_event->emulate_flags = 0;
-        }
-
-        if ( w->do_write.msr )
-        {
-            hvm_msr_write_intercept(w->msr, w->value, 0);
-            w->do_write.msr = 0;
-        }
-
-        if ( w->do_write.cr0 )
-        {
-            hvm_set_cr0(w->cr0, 0);
-            w->do_write.cr0 = 0;
-        }
-
-        if ( w->do_write.cr4 )
-        {
-            hvm_set_cr4(w->cr4, 0);
-            w->do_write.cr4 = 0;
-        }
-
-        if ( w->do_write.cr3 )
-        {
-            hvm_set_cr3(w->cr3, 0);
-            w->do_write.cr3 = 0;
-        }
-    }
-
     vm_event_vcpu_enter(v);
 
     /* Inject pending hw/sw trap */
@@ -2199,7 +2153,9 @@  int hvm_set_cr0(unsigned long value, bool_t may_defer)
 
         if ( hvm_event_crX(CR0, value, old_value) )
         {
-            /* The actual write will occur in hvm_do_resume(), if permitted. */
+            /* The actual write will occur in vcpu_enter_write_data(), if
+             * permitted.
+             */
             v->arch.vm_event->write_data.do_write.cr0 = 1;
             v->arch.vm_event->write_data.cr0 = value;
 
@@ -2301,7 +2257,9 @@  int hvm_set_cr3(unsigned long value, bool_t may_defer)
 
         if ( hvm_event_crX(CR3, value, old) )
         {
-            /* The actual write will occur in hvm_do_resume(), if permitted. */
+            /* The actual write will occur in vcpu_enter_write_data(), if
+             * permitted.
+             */
             v->arch.vm_event->write_data.do_write.cr3 = 1;
             v->arch.vm_event->write_data.cr3 = value;
 
@@ -2381,7 +2339,9 @@  int hvm_set_cr4(unsigned long value, bool_t may_defer)
 
         if ( hvm_event_crX(CR4, value, old_cr) )
         {
-            /* The actual write will occur in hvm_do_resume(), if permitted. */
+            /* The actual write will occur in vcpu_enter_write_data(), if
+             * permitted.
+             */
             v->arch.vm_event->write_data.do_write.cr4 = 1;
             v->arch.vm_event->write_data.cr4 = value;
 
@@ -3761,7 +3721,9 @@  int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
     {
         ASSERT(v->arch.vm_event);
 
-        /* The actual write will occur in hvm_do_resume() (if permitted). */
+        /* The actual write will occur in vcpu_enter_write_data(), if
+         * permitted.
+         */
         v->arch.vm_event->write_data.do_write.msr = 1;
         v->arch.vm_event->write_data.msr = msr;
         v->arch.vm_event->write_data.value = msr_content;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index b43b94a..8b76ef9 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -35,7 +35,6 @@ 
 #include <asm/guest_access.h>
 #include <asm/debugreg.h>
 #include <asm/msr.h>
-#include <asm/paging.h>
 #include <asm/p2m.h>
 #include <asm/mem_sharing.h>
 #include <asm/hvm/emulate.h>
@@ -58,7 +57,6 @@ 
 #include <asm/hvm/nestedhvm.h>
 #include <asm/altp2m.h>
 #include <asm/event.h>
-#include <asm/monitor.h>
 #include <public/arch-x86/cpuid.h>
 
 static bool_t __initdata opt_force_ept;
@@ -1432,18 +1430,16 @@  static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
         if ( paging_mode_hap(v->domain) )
         {
             /* Manage GUEST_CR3 when CR0.PE=0. */
+            uint32_t old_ctls = v->arch.hvm_vmx.exec_control;
             uint32_t cr3_ctls = (CPU_BASED_CR3_LOAD_EXITING |
                                  CPU_BASED_CR3_STORE_EXITING);
+
             v->arch.hvm_vmx.exec_control &= ~cr3_ctls;
             if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
                 v->arch.hvm_vmx.exec_control |= cr3_ctls;
 
-            /* Trap CR3 updates if CR3 memory events are enabled. */
-            if ( v->domain->arch.monitor.write_ctrlreg_enabled &
-                 monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) )
-                v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
-
-            vmx_update_cpu_exec_control(v);
+            if ( old_ctls != v->arch.hvm_vmx.exec_control )
+                vmx_update_cpu_exec_control(v);
         }
 
         if ( !nestedhvm_vcpu_in_guestmode(v) )
diff --git a/xen/arch/x86/monitor.c b/xen/arch/x86/monitor.c
index 1fec412..1e5445f 100644
--- a/xen/arch/x86/monitor.c
+++ b/xen/arch/x86/monitor.c
@@ -20,7 +20,6 @@ 
  */
 
 #include <asm/monitor.h>
-#include <public/vm_event.h>
 
 int arch_monitor_domctl_event(struct domain *d,
                               struct xen_domctl_monitor_op *mop)
@@ -62,14 +61,6 @@  int arch_monitor_domctl_event(struct domain *d,
         else
             ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;
 
-        if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
-        {
-            struct vcpu *v;
-            /* Latches new CR3 mask through CR0 code. */
-            for_each_vcpu ( d, v )
-                hvm_update_guest_cr(v, 0);
-        }
-
         domain_unpause(d);
 
         break;
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index f7eb24a..94b50fc 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -19,6 +19,9 @@ 
  */
 
 #include <xen/vm_event.h>
+#include <asm/monitor.h>
+#include <asm/paging.h>
+#include <asm/hvm/vmx/vmx.h>
 
 /* Implicitly serialized by the domctl lock. */
 int vm_event_init_domain(struct domain *d)
@@ -179,6 +182,105 @@  void vm_event_fill_regs(vm_event_request_t *req)
     req->data.regs.x86.cs_arbytes = seg.attr.bytes;
 }
 
+static inline void vcpu_enter_write_data(struct vcpu *v)
+{
+    struct monitor_write_data *w;
+
+    if ( likely(!v->arch.vm_event) )
+        return;
+
+    w = &v->arch.vm_event->write_data;
+
+    if ( unlikely(v->arch.vm_event->emulate_flags) )
+    {
+        enum emul_kind kind = EMUL_KIND_NORMAL;
+
+        if ( v->arch.vm_event->emulate_flags &
+             VM_EVENT_FLAG_SET_EMUL_READ_DATA )
+            kind = EMUL_KIND_SET_CONTEXT;
+        else if ( v->arch.vm_event->emulate_flags &
+                  VM_EVENT_FLAG_EMULATE_NOWRITE )
+            kind = EMUL_KIND_NOWRITE;
+
+        hvm_mem_access_emulate_one(kind, TRAP_invalid_op,
+                                   HVM_DELIVER_NO_ERROR_CODE);
+
+        v->arch.vm_event->emulate_flags = 0;
+    }
+
+    if ( w->do_write.msr )
+    {
+        hvm_msr_write_intercept(w->msr, w->value, 0);
+        w->do_write.msr = 0;
+    }
+
+    if ( w->do_write.cr0 )
+    {
+        hvm_set_cr0(w->cr0, 0);
+        w->do_write.cr0 = 0;
+    }
+
+    if ( w->do_write.cr4 )
+    {
+        hvm_set_cr4(w->cr4, 0);
+        w->do_write.cr4 = 0;
+    }
+
+    if ( w->do_write.cr3 )
+    {
+        hvm_set_cr3(w->cr3, 0);
+        w->do_write.cr3 = 0;
+    }
+}
+
+static inline void vcpu_enter_adjust_traps(struct vcpu *v)
+{
+    struct domain *d = v->domain;
+    struct arch_vmx_struct *avmx = &v->arch.hvm_vmx;
+    bool_t cr3_ldexit, cr3_vmevent;
+    unsigned int cr3_bitmask;
+
+    /* Adjust CR3 load-exiting (for monitor vm-events). */
+
+    cr3_bitmask = monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3);
+    cr3_vmevent = !!(d->arch.monitor.write_ctrlreg_enabled & cr3_bitmask);
+    cr3_ldexit = !!(avmx->exec_control & CPU_BASED_CR3_LOAD_EXITING);
+
+    if ( likely(cr3_vmevent == cr3_ldexit) )
+        return;
+
+    if ( !paging_mode_hap(d) )
+    {
+        /* non-hap domains trap CR3 writes unconditionally */
+        ASSERT(cr3_ldexit);
+        return;
+    }
+
+    /*
+     * If CR0.PE=0, CR3 load exiting must remain enabled.
+     * See vmx_update_guest_cr code motion for cr = 0.
+     */
+    if ( cr3_ldexit && !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
+        return;
+
+    if ( cr3_vmevent )
+        avmx->exec_control |= CPU_BASED_CR3_LOAD_EXITING;
+    else
+        avmx->exec_control &= ~CPU_BASED_CR3_LOAD_EXITING;
+
+    vmx_vmcs_enter(v);
+    vmx_update_cpu_exec_control(v);
+    vmx_vmcs_exit(v);
+}
+
+void arch_vm_event_vcpu_enter(struct vcpu *v)
+{
+    /* vmx only */
+    ASSERT( cpu_has_vmx );
+    vcpu_enter_write_data(v);
+    vcpu_enter_adjust_traps(v);
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/vm_event.h b/xen/include/asm-x86/vm_event.h
index 6fb3b58..c4b5def 100644
--- a/xen/include/asm-x86/vm_event.h
+++ b/xen/include/asm-x86/vm_event.h
@@ -43,10 +43,7 @@  void vm_event_set_registers(struct vcpu *v, vm_event_response_t *rsp);
 
 void vm_event_fill_regs(vm_event_request_t *req);
 
-static inline void arch_vm_event_vcpu_enter(struct vcpu *v)
-{
-    /* Nothing to do. */
-}
+void arch_vm_event_vcpu_enter(struct vcpu *v);
 
 /*
  * Monitor vm-events.