@@ -1081,6 +1081,37 @@ to control events for any other register will fail with -KVM_EINVAL::
* -KVM_EINVAL - padding is not zero
* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+23. KVMI_GET_XSAVE
+------------------
+
+:Architecture: x86
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_vcpu_hdr;
+
+:Returns:
+
+::
+
+ struct kvmi_error_code;
+ struct kvmi_get_xsave_reply {
+ __u32 region[0];
+ };
+
+Returns a buffer containing the XSAVE area. Currently, the size of
+``kvm_xsave`` is used, but it could change. The userspace should get
+the buffer size from the message size.
+
+:Errors:
+
+* -KVM_EINVAL - the selected vCPU is invalid
+* -KVM_EINVAL - padding is not zero
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+* -KVM_ENOMEM - not enough memory to allocate the reply
+
Events
======
@@ -97,4 +97,8 @@ struct kvmi_event_msr_reply {
__u64 new_val;
};
+struct kvmi_get_xsave_reply {
+ __u32 region[0];
+};
+
#endif /* _UAPI_ASM_X86_KVMI_H */
@@ -790,3 +790,24 @@ int kvmi_arch_cmd_control_spp(struct kvmi *ikvm)
{
return kvm_arch_init_spp(ikvm->kvm);
}
+
+int kvmi_arch_cmd_get_xsave(struct kvm_vcpu *vcpu,
+ struct kvmi_get_xsave_reply **dest,
+ size_t *dest_size)
+{
+ struct kvmi_get_xsave_reply *rpl = NULL;
+ size_t rpl_size = sizeof(*rpl) + sizeof(struct kvm_xsave);
+ struct kvm_xsave *area;
+
+ rpl = kvmi_msg_alloc_check(rpl_size);
+ if (!rpl)
+ return -KVM_ENOMEM;
+
+ area = (struct kvm_xsave *) &rpl->region[0];
+ kvm_vcpu_ioctl_x86_get_xsave(vcpu, area);
+
+ *dest = rpl;
+ *dest_size = rpl_size;
+
+ return 0;
+}
@@ -3745,8 +3745,8 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
}
}
-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
- struct kvm_xsave *guest_xsave)
+void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *guest_xsave)
{
if (boot_cpu_has(X86_FEATURE_XSAVE)) {
memset(guest_xsave, 0, sizeof(struct kvm_xsave));
@@ -805,6 +805,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+ struct kvm_xsave *guest_xsave);
int kvm_arch_init(void *opaque);
void kvm_arch_exit(void);
@@ -255,6 +255,9 @@ void kvmi_arch_trap_event(struct kvm_vcpu *vcpu);
int kvmi_arch_cmd_get_cpuid(struct kvm_vcpu *vcpu,
const struct kvmi_get_cpuid *req,
struct kvmi_get_cpuid_reply *rpl);
+int kvmi_arch_cmd_get_xsave(struct kvm_vcpu *vcpu,
+ struct kvmi_get_xsave_reply **dest,
+ size_t *dest_size);
int kvmi_arch_cmd_get_vcpu_info(struct kvm_vcpu *vcpu,
struct kvmi_get_vcpu_info_reply *rpl);
int kvmi_arch_cmd_inject_exception(struct kvm_vcpu *vcpu, u8 vector,
@@ -38,6 +38,7 @@ static const char *const msg_IDs[] = {
[KVMI_GET_REGISTERS] = "KVMI_GET_REGISTERS",
[KVMI_GET_VCPU_INFO] = "KVMI_GET_VCPU_INFO",
[KVMI_GET_VERSION] = "KVMI_GET_VERSION",
+ [KVMI_GET_XSAVE] = "KVMI_GET_XSAVE",
[KVMI_INJECT_EXCEPTION] = "KVMI_INJECT_EXCEPTION",
[KVMI_PAUSE_VCPU] = "KVMI_PAUSE_VCPU",
[KVMI_READ_PHYSICAL] = "KVMI_READ_PHYSICAL",
@@ -700,6 +701,21 @@ static int handle_get_cpuid(struct kvm_vcpu *vcpu,
return reply_cb(vcpu, msg, ec, &rpl, sizeof(rpl));
}
+static int handle_get_xsave(struct kvm_vcpu *vcpu,
+ const struct kvmi_msg_hdr *msg, const void *req,
+ vcpu_reply_fct reply_cb)
+{
+ struct kvmi_get_xsave_reply *rpl = NULL;
+ size_t rpl_size = 0;
+ int err, ec;
+
+ ec = kvmi_arch_cmd_get_xsave(vcpu, &rpl, &rpl_size);
+
+ err = reply_cb(vcpu, msg, ec, rpl, rpl_size);
+ kvmi_msg_free(rpl);
+ return err;
+}
+
/*
* These commands are executed on the vCPU thread. The receiving thread
* passes the messages using a newly allocated 'struct kvmi_vcpu_cmd'
@@ -716,6 +732,7 @@ static int(*const msg_vcpu[])(struct kvm_vcpu *,
[KVMI_GET_CPUID] = handle_get_cpuid,
[KVMI_GET_REGISTERS] = handle_get_registers,
[KVMI_GET_VCPU_INFO] = handle_get_vcpu_info,
+ [KVMI_GET_XSAVE] = handle_get_xsave,
[KVMI_INJECT_EXCEPTION] = handle_inject_exception,
[KVMI_SET_REGISTERS] = handle_set_registers,
};