@@ -830,6 +830,35 @@ Returns the value of an extended control register XCR.
* -KVM_EINVAL - the padding is not zero
* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+19. KVMI_VCPU_GET_XSAVE
+-----------------------
+
+:Architectures: x86
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_vcpu_hdr;
+
+:Returns:
+
+::
+
+ struct kvmi_error_code;
+ struct kvmi_vcpu_get_xsave_reply {
+ struct kvm_xsave xsave;
+ };
+
+Returns a buffer containing the XSAVE area.
+
+:Errors:
+
+* -KVM_EINVAL - the selected vCPU is invalid
+* -KVM_EINVAL - the padding is not zero
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+* -KVM_ENOMEM - there is not enough memory to allocate the reply
+
Events
======
@@ -111,4 +111,8 @@ struct kvmi_vcpu_get_xcr_reply {
u64 value;
};
+struct kvmi_vcpu_get_xsave_reply {
+ struct kvm_xsave xsave;
+};
+
#endif /* _UAPI_ASM_X86_KVMI_H */
@@ -194,12 +194,32 @@ static int handle_vcpu_get_xcr(const struct kvmi_vcpu_msg_job *job,
return kvmi_msg_vcpu_reply(job, msg, ec, &rpl, sizeof(rpl));
}
+static int handle_vcpu_get_xsave(const struct kvmi_vcpu_msg_job *job,
+ const struct kvmi_msg_hdr *msg,
+ const void *req)
+{
+ struct kvmi_vcpu_get_xsave_reply *rpl;
+ int err, ec = 0;
+
+ rpl = kvmi_msg_alloc();
+ if (!rpl)
+ ec = -KVM_ENOMEM;
+ else
+ kvm_vcpu_ioctl_x86_get_xsave(job->vcpu, &rpl->xsave);
+
+ err = kvmi_msg_vcpu_reply(job, msg, 0, rpl, sizeof(*rpl));
+
+ kvmi_msg_free(rpl);
+ return err;
+}
+
static kvmi_vcpu_msg_job_fct const msg_vcpu[] = {
[KVMI_VCPU_CONTROL_CR] = handle_vcpu_control_cr,
[KVMI_VCPU_GET_CPUID] = handle_vcpu_get_cpuid,
[KVMI_VCPU_GET_INFO] = handle_vcpu_get_info,
[KVMI_VCPU_GET_REGISTERS] = handle_vcpu_get_registers,
[KVMI_VCPU_GET_XCR] = handle_vcpu_get_xcr,
+ [KVMI_VCPU_GET_XSAVE] = handle_vcpu_get_xsave,
[KVMI_VCPU_INJECT_EXCEPTION] = handle_vcpu_inject_exception,
[KVMI_VCPU_SET_REGISTERS] = handle_vcpu_set_registers,
};
@@ -45,6 +45,7 @@ enum {
KVMI_VCPU_CONTROL_CR = KVMI_VCPU_MESSAGE_ID(6),
KVMI_VCPU_INJECT_EXCEPTION = KVMI_VCPU_MESSAGE_ID(7),
KVMI_VCPU_GET_XCR = KVMI_VCPU_MESSAGE_ID(8),
+ KVMI_VCPU_GET_XSAVE = KVMI_VCPU_MESSAGE_ID(9),
KVMI_NEXT_VCPU_MESSAGE
};
@@ -1448,6 +1448,31 @@ static void test_cmd_vcpu_get_xcr(struct kvm_vm *vm)
cmd_vcpu_get_xcr(vm, xcr1, &value, -KVM_EINVAL);
}
+static void cmd_vcpu_get_xsave(struct kvm_vm *vm)
+{
+ struct {
+ struct kvmi_msg_hdr hdr;
+ struct kvmi_vcpu_hdr vcpu_hdr;
+ } req = {};
+ struct kvm_xsave rpl;
+
+ test_vcpu0_command(vm, KVMI_VCPU_GET_XSAVE, &req.hdr, sizeof(req),
+ &rpl, sizeof(rpl), 0);
+}
+
+static void test_cmd_vcpu_get_xsave(struct kvm_vm *vm)
+{
+ struct kvm_cpuid_entry2 *entry;
+
+ entry = kvm_get_supported_cpuid_entry(1);
+ if (!(entry->ecx & X86_FEATURE_XSAVE)) {
+ print_skip("XSAVE not supported, ecx 0x%x", entry->ecx);
+ return;
+ }
+
+ cmd_vcpu_get_xsave(vm);
+}
+
static void test_introspection(struct kvm_vm *vm)
{
srandom(time(0));
@@ -1476,6 +1501,7 @@ static void test_introspection(struct kvm_vm *vm)
test_cmd_vm_get_max_gfn();
test_event_xsetbv(vm);
test_cmd_vcpu_get_xcr(vm);
+ test_cmd_vcpu_get_xsave(vm);
unhook_introspection(vm);
}