@@ -475,8 +475,6 @@ void hvm_do_resume(struct vcpu *v)
if ( unlikely(v->arch.vm_event) )
{
- struct monitor_write_data *w = &v->arch.vm_event->write_data;
-
if ( v->arch.vm_event->emulate_flags )
{
enum emul_kind kind = EMUL_KIND_NORMAL;
@@ -494,29 +492,7 @@ void hvm_do_resume(struct vcpu *v)
v->arch.vm_event->emulate_flags = 0;
}
- if ( w->do_write.msr )
- {
- hvm_msr_write_intercept(w->msr, w->value, 0);
- w->do_write.msr = 0;
- }
-
- if ( w->do_write.cr0 )
- {
- hvm_set_cr0(w->cr0, 0);
- w->do_write.cr0 = 0;
- }
-
- if ( w->do_write.cr4 )
- {
- hvm_set_cr4(w->cr4, 0);
- w->do_write.cr4 = 0;
- }
-
- if ( w->do_write.cr3 )
- {
- hvm_set_cr3(w->cr3, 0);
- w->do_write.cr3 = 0;
- }
+ arch_monitor_write_data(v);
}
/* Inject pending hw/sw trap */
@@ -2206,7 +2182,10 @@ int hvm_set_cr0(unsigned long value, bool_t may_defer)
if ( hvm_monitor_crX(CR0, value, old_value) )
{
- /* The actual write will occur in hvm_do_resume(), if permitted. */
+ /*
+ * The actual write will occur in arch_monitor_write_data(), if
+ * permitted.
+ */
v->arch.vm_event->write_data.do_write.cr0 = 1;
v->arch.vm_event->write_data.cr0 = value;
@@ -2308,7 +2287,10 @@ int hvm_set_cr3(unsigned long value, bool_t may_defer)
if ( hvm_monitor_crX(CR3, value, old) )
{
- /* The actual write will occur in hvm_do_resume(), if permitted. */
+ /*
+ * The actual write will occur in arch_monitor_write_data(), if
+ * permitted.
+ */
v->arch.vm_event->write_data.do_write.cr3 = 1;
v->arch.vm_event->write_data.cr3 = value;
@@ -2388,7 +2370,10 @@ int hvm_set_cr4(unsigned long value, bool_t may_defer)
if ( hvm_monitor_crX(CR4, value, old_cr) )
{
- /* The actual write will occur in hvm_do_resume(), if permitted. */
+ /*
+ * The actual write will occur in arch_monitor_write_data(), if
+ * permitted.
+ */
v->arch.vm_event->write_data.do_write.cr4 = 1;
v->arch.vm_event->write_data.cr4 = value;
@@ -3767,7 +3752,10 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
{
ASSERT(v->arch.vm_event);
- /* The actual write will occur in hvm_do_resume() (if permitted). */
+ /*
+ * The actual write will occur in arch_monitor_write_data(), if
+ * permitted.
+ */
v->arch.vm_event->write_data.do_write.msr = 1;
v->arch.vm_event->write_data.msr = msr;
v->arch.vm_event->write_data.value = msr_content;
@@ -1442,11 +1442,6 @@ static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
v->arch.hvm_vmx.exec_control |= cr3_ctls;
- /* Trap CR3 updates if CR3 memory events are enabled. */
- if ( v->domain->arch.monitor.write_ctrlreg_enabled &
- monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3) )
- v->arch.hvm_vmx.exec_control |= CPU_BASED_CR3_LOAD_EXITING;
-
if ( old_ctls != v->arch.hvm_vmx.exec_control )
vmx_update_cpu_exec_control(v);
}
@@ -3899,6 +3894,59 @@ void vmx_vmenter_helper(const struct cpu_user_regs *regs)
}
/*
+ * Adjusts domain CR3 load-exiting execution control for CR3-write monitor
+ * vm-event.
+ */
+void vmx_vm_event_update_cr3_traps(struct domain *d)
+{
+ struct vcpu *v;
+ struct arch_vmx_struct *avmx;
+ unsigned int cr3_bitmask;
+ bool_t cr3_vmevent, cr3_ldexit;
+
+ /* domain must be paused */
+ ASSERT(atomic_read(&d->pause_count));
+
+ /* non-hap domains trap CR3 writes unconditionally */
+ if ( !paging_mode_hap(d) )
+ {
+#ifndef NDEBUG
+ for_each_vcpu ( d, v )
+ ASSERT(v->arch.hvm_vmx.exec_control & CPU_BASED_CR3_LOAD_EXITING);
+#endif
+ return;
+ }
+
+ cr3_bitmask = monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3);
+ cr3_vmevent = !!(d->arch.monitor.write_ctrlreg_enabled & cr3_bitmask);
+
+ for_each_vcpu ( d, v )
+ {
+ avmx = &v->arch.hvm_vmx;
+ cr3_ldexit = !!(avmx->exec_control & CPU_BASED_CR3_LOAD_EXITING);
+
+ if ( cr3_vmevent == cr3_ldexit )
+ continue;
+
+ /*
+ * If CR0.PE=0, CR3 load exiting must remain enabled.
+ * See vmx_update_guest_cr code motion for cr = 0.
+ */
+ if ( cr3_ldexit && !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
+ continue;
+
+ if ( cr3_vmevent )
+ avmx->exec_control |= CPU_BASED_CR3_LOAD_EXITING;
+ else
+ avmx->exec_control &= ~CPU_BASED_CR3_LOAD_EXITING;
+
+ vmx_vmcs_enter(v);
+ vmx_update_cpu_exec_control(v);
+ vmx_vmcs_exit(v);
+ }
+}
+
+/*
* Local variables:
* mode: C
* c-file-style: "BSD"
@@ -19,7 +19,9 @@
* License along with this program; If not, see <http://www.gnu.org/licenses/>.
*/
+#include <asm/hvm/vmx/vmx.h>
#include <asm/monitor.h>
+#include <asm/vm_event.h>
#include <public/vm_event.h>
int arch_monitor_init_domain(struct domain *d)
@@ -41,6 +43,35 @@ void arch_monitor_cleanup_domain(struct domain *d)
memset(&d->monitor, 0, sizeof(d->monitor));
}
+void arch_monitor_write_data(struct vcpu *v)
+{
+ struct monitor_write_data *w = &v->arch.vm_event->write_data;
+
+ if ( w->do_write.msr )
+ {
+ hvm_msr_write_intercept(w->msr, w->value, 0);
+ w->do_write.msr = 0;
+ }
+
+ if ( w->do_write.cr0 )
+ {
+ hvm_set_cr0(w->cr0, 0);
+ w->do_write.cr0 = 0;
+ }
+
+ if ( w->do_write.cr4 )
+ {
+ hvm_set_cr4(w->cr4, 0);
+ w->do_write.cr4 = 0;
+ }
+
+ if ( w->do_write.cr3 )
+ {
+ hvm_set_cr3(w->cr3, 0);
+ w->do_write.cr3 = 0;
+ }
+}
+
static unsigned long *monitor_bitmap_for_msr(const struct domain *d, u32 *msr)
{
ASSERT(d->arch.monitor.msr_bitmap && msr);
@@ -119,6 +150,15 @@ bool_t monitored_msr(const struct domain *d, u32 msr)
return test_bit(msr, bitmap);
}
+static inline void write_ctrlreg_adjust_traps(struct domain *d, uint8_t index)
+{
+ /* vmx only */
+ ASSERT(cpu_has_vmx);
+
+ if ( VM_EVENT_X86_CR3 == index )
+ vmx_vm_event_update_cr3_traps(d);
+}
+
int arch_monitor_domctl_event(struct domain *d,
struct xen_domctl_monitor_op *mop)
{
@@ -159,13 +199,7 @@ int arch_monitor_domctl_event(struct domain *d,
else
ad->monitor.write_ctrlreg_enabled &= ~ctrlreg_bitmask;
- if ( VM_EVENT_X86_CR3 == mop->u.mov_to_cr.index )
- {
- struct vcpu *v;
- /* Latches new CR3 mask through CR0 code. */
- for_each_vcpu ( d, v )
- hvm_update_guest_cr(v, 0);
- }
+ write_ctrlreg_adjust_traps(d, mop->u.mov_to_cr.index);
domain_unpause(d);
@@ -101,6 +101,7 @@ void vmx_update_debug_state(struct vcpu *v);
void vmx_update_exception_bitmap(struct vcpu *v);
void vmx_update_cpu_exec_control(struct vcpu *v);
void vmx_update_secondary_exec_control(struct vcpu *v);
+void vmx_vm_event_update_cr3_traps(struct domain *d);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
@@ -94,6 +94,8 @@ int arch_monitor_init_domain(struct domain *d);
void arch_monitor_cleanup_domain(struct domain *d);
+void arch_monitor_write_data(struct vcpu *v);
+
bool_t monitored_msr(const struct domain *d, u32 msr);
#endif /* __ASM_X86_MONITOR_H__ */