@@ -274,17 +274,15 @@ static void ctxt_switch_to(struct vcpu *n)
virt_timer_restore(n);
}
-/* Update per-VCPU guest runstate shared memory area (if registered). */
-static void update_runstate_area(struct vcpu *v)
+static void update_runstate_by_gvaddr(struct vcpu *v)
{
void __user *guest_handle = NULL;
- if ( guest_handle_is_null(runstate_guest(v)) )
- return;
+ ASSERT(!guest_handle_is_null(runstate_guest_virt(v)));
if ( VM_ASSIST(v->domain, runstate_update_flag) )
{
- guest_handle = &v->runstate_guest.p->state_entry_time + 1;
+ guest_handle = &v->runstate_guest.virt.p->state_entry_time + 1;
guest_handle--;
v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
__raw_copy_to_guest(guest_handle,
@@ -292,7 +290,7 @@ static void update_runstate_area(struct vcpu *v)
smp_wmb();
}
- __copy_to_guest(runstate_guest(v), &v->runstate, 1);
+ __copy_to_guest(runstate_guest_virt(v), &v->runstate, 1);
if ( guest_handle )
{
@@ -303,6 +301,58 @@ static void update_runstate_area(struct vcpu *v)
}
}
+extern int map_runstate_area(struct vcpu *v, struct vcpu_runstate_info **area);
+extern void unmap_runstate_area(struct vcpu_runstate_info *area);
+
+static void update_runstate_by_gpaddr(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ paddr_t gpaddr = 0;
+
+
+ if ( VM_ASSIST(v->domain, runstate_update_flag) )
+ {
+ gpaddr = v->runstate_guest.phys + offsetof(struct vcpu_runstate_info, state_entry_time) + sizeof(uint64_t) - 1;
+ v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+ copy_to_guest_phys_flush_dcache (d, gpaddr,
+ (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+ smp_wmb();
+ }
+
+ copy_to_guest_phys_flush_dcache (d, v->runstate_guest.phys, &v->runstate, sizeof(struct vcpu_runstate_info));
+
+ if ( gpaddr )
+ {
+ v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+ smp_wmb();
+ copy_to_guest_phys_flush_dcache (d, gpaddr,
+ (void *)(&v->runstate.state_entry_time + 1) - 1, 1);
+ }
+}
+
+/* Update per-VCPU guest runstate shared memory area (if registered). */
+static void update_runstate_area(struct vcpu *v)
+{
+ if ( xchg(&v->runstate_in_use, 1) )
+ return;
+
+ switch ( v->runstate_guest_type )
+ {
+ case RUNSTATE_NONE:
+ break;
+
+ case RUNSTATE_VADDR:
+ update_runstate_by_gvaddr(v);
+ break;
+
+ case RUNSTATE_PADDR:
+ update_runstate_by_gpaddr(v);
+ break;
+ }
+
+ xchg(&v->runstate_in_use, 0);
+}
+
static void schedule_tail(struct vcpu *prev)
{
ctxt_switch_from(prev);
@@ -998,6 +1048,7 @@ long do_arm_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) a
{
case VCPUOP_register_vcpu_info:
case VCPUOP_register_runstate_memory_area:
+ case VCPUOP_register_runstate_phys_memory_area:
return do_vcpu_op(cmd, vcpuid, arg);
default:
return -EINVAL;
@@ -700,6 +700,18 @@ int rcu_lock_live_remote_domain_by_id(domid_t dom, struct domain **d)
return 0;
}
+static void discard_runstate_area(struct vcpu *v)
+{
+ v->runstate_guest_type = RUNSTATE_NONE;
+}
+
+static void discard_runstate_area_locked(struct vcpu *v)
+{
+ while ( xchg(&v->runstate_in_use, 1) );
+ discard_runstate_area(v);
+ xchg(&v->runstate_in_use, 0);
+}
+
int domain_kill(struct domain *d)
{
int rc = 0;
@@ -738,7 +750,10 @@ int domain_kill(struct domain *d)
if ( cpupool_move_domain(d, cpupool0) )
return -ERESTART;
for_each_vcpu ( d, v )
+ {
+ discard_runstate_area_locked(v);
unmap_vcpu_info(v);
+ }
d->is_dying = DOMDYING_dead;
/* Mem event cleanup has to go here because the rings
* have to be put before we call put_domain. */
@@ -1192,7 +1207,7 @@ int domain_soft_reset(struct domain *d)
for_each_vcpu ( d, v )
{
- set_xen_guest_handle(runstate_guest(v), NULL);
+ discard_runstate_area_locked(v);
unmap_vcpu_info(v);
}
@@ -1520,18 +1535,46 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
rc = 0;
- runstate_guest(v) = area.addr.h;
+
+ while( xchg(&v->runstate_in_use, 1) == 0);
+
+ discard_runstate_area(v);
+
+ runstate_guest_virt(v) = area.addr.h;
+ v->runstate_guest_type = RUNSTATE_VADDR;
if ( v == current )
{
- __copy_to_guest(runstate_guest(v), &v->runstate, 1);
+ __copy_to_guest(runstate_guest_virt(v), &v->runstate, 1);
}
else
{
vcpu_runstate_get(v, &runstate);
- __copy_to_guest(runstate_guest(v), &runstate, 1);
+ __copy_to_guest(runstate_guest_virt(v), &runstate, 1);
}
+ xchg(&v->runstate_in_use, 0);
+
+ break;
+ }
+
+ case VCPUOP_register_runstate_phys_memory_area:
+ {
+ struct vcpu_register_runstate_memory_area area;
+
+ rc = -EFAULT;
+ if ( copy_from_guest(&area, arg, 1) )
+ break;
+
+ while( xchg(&v->runstate_in_use, 1) == 0);
+
+ discard_runstate_area(v);
+ v->runstate_guest.phys = area.addr.p;
+ v->runstate_guest_type = RUNSTATE_PADDR;
+
+ xchg(&v->runstate_in_use, 0);
+ rc = 0;
+
break;
}
@@ -235,6 +235,21 @@ struct vcpu_register_time_memory_area {
typedef struct vcpu_register_time_memory_area vcpu_register_time_memory_area_t;
DEFINE_XEN_GUEST_HANDLE(vcpu_register_time_memory_area_t);
+/*
+ * Register a shared memory area from which the guest may obtain its own
+ * runstate information without needing to execute a hypercall.
+ * Notes:
+ * 1. The registered address must be guest's physical address.
+ * 2. The registered runstate area should not cross page boundary.
+ * 3. Only one shared area may be registered per VCPU. The shared area is
+ * updated by the hypervisor each time the VCPU is scheduled. Thus
+ * runstate.state will always be RUNSTATE_running and
+ * runstate.state_entry_time will indicate the system time at which the
+ * VCPU was last scheduled to run.
+ * @extra_arg == pointer to vcpu_register_runstate_memory_area structure.
+ */
+#define VCPUOP_register_runstate_phys_memory_area 14
+
#endif /* __XEN_PUBLIC_VCPU_H__ */
/*
@@ -163,17 +163,31 @@ struct vcpu
void *sched_priv; /* scheduler-specific data */
struct vcpu_runstate_info runstate;
+
+ enum {
+ RUNSTATE_NONE = 0,
+ RUNSTATE_PADDR = 1,
+ RUNSTATE_VADDR = 2,
+ } runstate_guest_type;
+
+ unsigned long runstate_in_use;
+
+ union
+ {
#ifndef CONFIG_COMPAT
-# define runstate_guest(v) ((v)->runstate_guest)
- XEN_GUEST_HANDLE(vcpu_runstate_info_t) runstate_guest; /* guest address */
+# define runstate_guest_virt(v) ((v)->runstate_guest.virt)
+ XEN_GUEST_HANDLE(vcpu_runstate_info_t) virt; /* guest address */
#else
-# define runstate_guest(v) ((v)->runstate_guest.native)
- union {
- XEN_GUEST_HANDLE(vcpu_runstate_info_t) native;
- XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t) compat;
- } runstate_guest; /* guest address */
+# define runstate_guest_virt(v) ((v)->runstate_guest.virt.native)
+ union {
+ XEN_GUEST_HANDLE(vcpu_runstate_info_t) native;
+ XEN_GUEST_HANDLE(vcpu_runstate_info_compat_t) compat;
+ } virt; /* guest address */
#endif
+ paddr_t phys;
+ } runstate_guest;
+
/* last time when vCPU is scheduled out */
uint64_t last_run_time;