@@ -1588,62 +1588,55 @@ void p2m_mem_paging_resume(struct domain *d, vm_event_response_t *rsp)
}
}
-void p2m_mem_access_emulate_check(struct vcpu *v,
+bool p2m_mem_access_emulate_check(struct vcpu *v,
const vm_event_response_t *rsp)
{
- /* Mark vcpu for skipping one instruction upon rescheduling. */
- if ( rsp->flags & VM_EVENT_FLAG_EMULATE )
- {
- xenmem_access_t access;
- bool_t violation = 1;
- const struct vm_event_mem_access *data = &rsp->u.mem_access;
+ xenmem_access_t access;
+ bool violation = 1;
+ const struct vm_event_mem_access *data = &rsp->u.mem_access;
- if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
+ if ( p2m_get_mem_access(v->domain, _gfn(data->gfn), &access) == 0 )
+ {
+ switch ( access )
{
- switch ( access )
- {
- case XENMEM_access_n:
- case XENMEM_access_n2rwx:
- default:
- violation = data->flags & MEM_ACCESS_RWX;
- break;
+ case XENMEM_access_n:
+ case XENMEM_access_n2rwx:
+ default:
+ violation = data->flags & MEM_ACCESS_RWX;
+ break;
- case XENMEM_access_r:
- violation = data->flags & MEM_ACCESS_WX;
- break;
+ case XENMEM_access_r:
+ violation = data->flags & MEM_ACCESS_WX;
+ break;
- case XENMEM_access_w:
- violation = data->flags & MEM_ACCESS_RX;
- break;
+ case XENMEM_access_w:
+ violation = data->flags & MEM_ACCESS_RX;
+ break;
- case XENMEM_access_x:
- violation = data->flags & MEM_ACCESS_RW;
- break;
+ case XENMEM_access_x:
+ violation = data->flags & MEM_ACCESS_RW;
+ break;
- case XENMEM_access_rx:
- case XENMEM_access_rx2rw:
- violation = data->flags & MEM_ACCESS_W;
- break;
+ case XENMEM_access_rx:
+ case XENMEM_access_rx2rw:
+ violation = data->flags & MEM_ACCESS_W;
+ break;
- case XENMEM_access_wx:
- violation = data->flags & MEM_ACCESS_R;
- break;
+ case XENMEM_access_wx:
+ violation = data->flags & MEM_ACCESS_R;
+ break;
- case XENMEM_access_rw:
- violation = data->flags & MEM_ACCESS_X;
- break;
+ case XENMEM_access_rw:
+ violation = data->flags & MEM_ACCESS_X;
+ break;
- case XENMEM_access_rwx:
- violation = 0;
- break;
- }
+ case XENMEM_access_rwx:
+ violation = 0;
+ break;
}
-
- v->arch.vm_event->emulate_flags = violation ? rsp->flags : 0;
-
- if ( (rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA) )
- v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
}
+
+ return violation;
}
void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
@@ -18,6 +18,7 @@
* License along with this program; If not, see <http://www.gnu.org/licenses/>.
*/
+#include <asm/p2m.h>
#include <asm/vm_event.h>
/* Implicitly serialized by the domctl lock. */
@@ -56,8 +57,12 @@ void vm_event_cleanup_domain(struct domain *d)
d->arch.mem_access_emulate_each_rep = 0;
}
-void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
+void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
+ vm_event_response_t *rsp)
{
+ if ( !(rsp->flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP) )
+ return;
+
if ( !is_hvm_domain(d) )
return;
@@ -186,6 +191,34 @@ void vm_event_fill_regs(vm_event_request_t *req)
req->data.regs.x86.cs_arbytes = seg.attr.bytes;
}
+void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
+{
+ if ( !(rsp->flags & VM_EVENT_FLAG_EMULATE) )
+ {
+ v->arch.vm_event->emulate_flags = 0;
+ return;
+ }
+
+ switch ( rsp->reason )
+ {
+ case VM_EVENT_REASON_MEM_ACCESS:
+ /*
+ * Emulate iff this is a response to a mem_access violation and there
+ * are still conflicting mem_access permissions in-place.
+ */
+ if ( p2m_mem_access_emulate_check(v, rsp) )
+ {
+ if ( rsp->flags & VM_EVENT_FLAG_SET_EMUL_READ_DATA )
+ v->arch.vm_event->emul_read_data = rsp->data.emul_read_data;
+
+ v->arch.vm_event->emulate_flags = rsp->flags;
+ }
+ break;
+ default:
+ break;
+ };
+}
+
/*
* Local variables:
* mode: C
@@ -398,42 +398,41 @@ void vm_event_resume(struct domain *d, struct vm_event_domain *ved)
* In some cases the response type needs extra handling, so here
* we call the appropriate handlers.
*/
- switch ( rsp.reason )
- {
-#ifdef CONFIG_X86
- case VM_EVENT_REASON_MOV_TO_MSR:
-#endif
- case VM_EVENT_REASON_WRITE_CTRLREG:
- vm_event_register_write_resume(v, &rsp);
- break;
-
-#ifdef CONFIG_HAS_MEM_ACCESS
- case VM_EVENT_REASON_MEM_ACCESS:
- mem_access_resume(v, &rsp);
- break;
-#endif
+ /* Check flags which apply only when the vCPU is paused */
+ if ( atomic_read(&v->vm_event_pause_count) )
+ {
#ifdef CONFIG_HAS_MEM_PAGING
- case VM_EVENT_REASON_MEM_PAGING:
- p2m_mem_paging_resume(d, &rsp);
- break;
+ if ( rsp.reason == VM_EVENT_REASON_MEM_PAGING )
+ p2m_mem_paging_resume(d, &rsp);
#endif
- };
+ /*
+ * Check emulation flags in the arch-specific handler only, as it
+ * has to set arch-specific flags when supported, and to avoid
+ * bitmask overhead when it isn't supported.
+ */
+ vm_event_emulate_check(v, &rsp);
+
+ /*
+ * Check in arch-specific handler to avoid bitmask overhead when
+ * not supported.
+ */
+ vm_event_register_write_resume(v, &rsp);
- /* Check for altp2m switch */
- if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
- p2m_altp2m_check(v, rsp.altp2m_idx);
+ /*
+ * Check in arch-specific handler to avoid bitmask overhead when
+ * not supported.
+ */
+ vm_event_toggle_singlestep(d, v, &rsp);
+
+ /* Check for altp2m switch */
+ if ( rsp.flags & VM_EVENT_FLAG_ALTERNATE_P2M )
+ p2m_altp2m_check(v, rsp.altp2m_idx);
- /* Check flags which apply only when the vCPU is paused */
- if ( atomic_read(&v->vm_event_pause_count) )
- {
if ( rsp.flags & VM_EVENT_FLAG_SET_REGISTERS )
vm_event_set_registers(v, &rsp);
- if ( rsp.flags & VM_EVENT_FLAG_TOGGLE_SINGLESTEP )
- vm_event_toggle_singlestep(d, v);
-
if ( rsp.flags & VM_EVENT_FLAG_VCPU_PAUSED )
vm_event_vcpu_unpause(v);
}
@@ -121,10 +121,11 @@ typedef enum {
p2m_to_mask(p2m_map_foreign)))
static inline
-void p2m_mem_access_emulate_check(struct vcpu *v,
+bool p2m_mem_access_emulate_check(struct vcpu *v,
const vm_event_response_t *rsp)
{
/* Not supported on ARM. */
+ return 0;
}
static inline
@@ -34,7 +34,8 @@ static inline void vm_event_cleanup_domain(struct domain *d)
memset(&d->monitor, 0, sizeof(d->monitor));
}
-static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v)
+static inline void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
+ vm_event_response_t *rsp)
{
/* Not supported on ARM. */
}
@@ -45,4 +46,10 @@ void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp)
/* Not supported on ARM. */
}
+static inline
+void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
+{
+ /* Not supported on ARM. */
+}
+
#endif /* __ASM_ARM_VM_EVENT_H__ */
@@ -677,7 +677,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long gla,
/* Check for emulation and mark vcpu for skipping one instruction
* upon rescheduling if required. */
-void p2m_mem_access_emulate_check(struct vcpu *v,
+bool p2m_mem_access_emulate_check(struct vcpu *v,
const vm_event_response_t *rsp);
/* Sanity check for mem_access hardware support */
@@ -35,8 +35,11 @@ int vm_event_init_domain(struct domain *d);
void vm_event_cleanup_domain(struct domain *d);
-void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v);
+void vm_event_toggle_singlestep(struct domain *d, struct vcpu *v,
+ vm_event_response_t *rsp);
void vm_event_register_write_resume(struct vcpu *v, vm_event_response_t *rsp);
+void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp);
+
#endif /* __ASM_X86_VM_EVENT_H__ */
@@ -30,12 +30,6 @@
int mem_access_memop(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
-static inline
-void mem_access_resume(struct vcpu *v, vm_event_response_t *rsp)
-{
- p2m_mem_access_emulate_check(v, rsp);
-}
-
#else
static inline
@@ -45,12 +39,6 @@ int mem_access_memop(unsigned long cmd,
return -ENOSYS;
}
-static inline
-void mem_access_resume(struct vcpu *vcpu, vm_event_response_t *rsp)
-{
- /* Nothing to do. */
-}
-
#endif /* HAS_MEM_ACCESS */
#endif /* _XEN_ASM_MEM_ACCESS_H */