@@ -1791,6 +1791,7 @@ typedef struct CPUArchState {
uint64_t xen_vcpu_info_gpa;
uint64_t xen_vcpu_info_default_gpa;
uint64_t xen_vcpu_time_info_gpa;
+ uint64_t xen_vcpu_runstate_gpa;
#endif
#if defined(CONFIG_HVF)
HVFX86LazyFlags hvf_lflags;
@@ -160,6 +160,7 @@ int kvm_xen_init_vcpu(CPUState *cs)
env->xen_vcpu_info_gpa = INVALID_GPA;
env->xen_vcpu_info_default_gpa = INVALID_GPA;
env->xen_vcpu_time_info_gpa = INVALID_GPA;
+ env->xen_vcpu_runstate_gpa = INVALID_GPA;
return 0;
}
@@ -254,6 +255,17 @@ static void do_set_vcpu_time_info_gpa(CPUState *cs, run_on_cpu_data data)
env->xen_vcpu_time_info_gpa);
}
+static void do_set_vcpu_runstate_gpa(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_runstate_gpa = data.host_ulong;
+
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
+ env->xen_vcpu_runstate_gpa);
+}
+
static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
{
X86CPU *cpu = X86_CPU(cs);
@@ -262,10 +274,14 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
env->xen_vcpu_info_gpa = INVALID_GPA;
env->xen_vcpu_info_default_gpa = INVALID_GPA;
env->xen_vcpu_time_info_gpa = INVALID_GPA;
+ env->xen_vcpu_runstate_gpa = INVALID_GPA;
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA);
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
INVALID_GPA);
+ kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
+ INVALID_GPA);
+
}
static int xen_set_shared_info(uint64_t gfn)
@@ -517,6 +533,35 @@ static int vcpuop_register_vcpu_time_info(CPUState *cs, CPUState *target,
return 0;
}
+static int vcpuop_register_runstate_info(CPUState *cs, CPUState *target,
+ uint64_t arg)
+{
+ struct vcpu_register_runstate_memory_area rma;
+ uint64_t gpa;
+ size_t len;
+
+ /* No need for 32/64 compat handling */
+ qemu_build_assert(sizeof(rma) == 8);
+ /* The runstate area actually does change size, but Linux copes. */
+
+ if (!target) {
+ return -ENOENT;
+ }
+
+ if (kvm_copy_from_gva(cs, arg, &rma, sizeof(rma))) {
+ return -EFAULT;
+ }
+
+ /* As with vcpu_time_info, Xen actually uses the GVA but KVM doesn't. */
+ if (!kvm_gva_to_gpa(cs, rma.addr.p, &gpa, &len, false)) {
+ return -EFAULT;
+ }
+
+ async_run_on_cpu(target, do_set_vcpu_runstate_gpa,
+ RUN_ON_CPU_HOST_ULONG(gpa));
+ return 0;
+}
+
static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, int vcpu_id, uint64_t arg)
{
@@ -525,6 +570,9 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int err;
switch (cmd) {
+ case VCPUOP_register_runstate_memory_area:
+ err = vcpuop_register_runstate_info(cs, dest, arg);
+ break;
case VCPUOP_register_vcpu_time_memory_area:
err = vcpuop_register_vcpu_time_info(cs, dest, arg);
break;
@@ -730,6 +778,15 @@ int kvm_put_xen_state(CPUState *cs)
}
}
+ gpa = env->xen_vcpu_runstate_gpa;
+ if (gpa != INVALID_GPA) {
+ ret = kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
+ gpa);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
return 0;
}
@@ -1273,6 +1273,7 @@ static const VMStateDescription vmstate_xen_vcpu = {
VMSTATE_UINT64(env.xen_vcpu_info_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_info_default_gpa, X86CPU),
VMSTATE_UINT64(env.xen_vcpu_time_info_gpa, X86CPU),
+ VMSTATE_UINT64(env.xen_vcpu_runstate_gpa, X86CPU),
VMSTATE_END_OF_LIST()
}
};