@@ -933,6 +933,42 @@ currently being handled is replied to.
* -KVM_EINVAL - padding is not zero
* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+19. KVMI_GET_CPUID
+------------------
+
+:Architectures: x86
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_get_cpuid {
+ __u32 function;
+ __u32 index;
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_error_code;
+ struct kvmi_get_cpuid_reply {
+ __u32 eax;
+ __u32 ebx;
+ __u32 ecx;
+ __u32 edx;
+ };
+
+Returns a CPUID leaf (as seen by the guest OS).
+
+:Errors:
+
+* -KVM_EINVAL - the selected vCPU is invalid
+* -KVM_EINVAL - padding is not zero
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+* -KVM_ENOENT - the selected leaf is not present or is invalid
+
Events
======
@@ -41,4 +41,16 @@ struct kvmi_get_registers_reply {
struct kvm_msrs msrs;
};
+struct kvmi_get_cpuid {
+ __u32 function;
+ __u32 index;
+};
+
+struct kvmi_get_cpuid_reply {
+ __u32 eax;
+ __u32 ebx;
+ __u32 ecx;
+ __u32 edx;
+};
+
#endif /* _UAPI_ASM_X86_KVMI_H */
@@ -5,6 +5,7 @@
* Copyright (C) 2019 Bitdefender S.R.L.
*/
#include "x86.h"
+#include "cpuid.h"
#include "../../../virt/kvm/kvmi_int.h"
static void *alloc_get_registers_reply(const struct kvmi_msg_hdr *msg,
@@ -211,6 +212,24 @@ bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
return ret;
}
+int kvmi_arch_cmd_get_cpuid(struct kvm_vcpu *vcpu,
+ const struct kvmi_get_cpuid *req,
+ struct kvmi_get_cpuid_reply *rpl)
+{
+ struct kvm_cpuid_entry2 *e;
+
+ e = kvm_find_cpuid_entry(vcpu, req->function, req->index);
+ if (!e)
+ return -KVM_ENOENT;
+
+ rpl->eax = e->eax;
+ rpl->ebx = e->ebx;
+ rpl->ecx = e->ecx;
+ rpl->edx = e->edx;
+
+ return 0;
+}
+
int kvmi_arch_cmd_get_vcpu_info(struct kvm_vcpu *vcpu,
struct kvmi_get_vcpu_info_reply *rpl)
{
@@ -19,6 +19,7 @@
#define KVM_EOPNOTSUPP 95
#define KVM_EAGAIN 11
#define KVM_EBUSY EBUSY
+#define KVM_ENOENT ENOENT
#define KVM_ENOMEM ENOMEM
#define KVM_HC_VAPIC_POLL_IRQ 1
@@ -230,6 +230,9 @@ int kvmi_arch_cmd_set_page_write_bitmap(struct kvmi *ikvm,
void kvmi_arch_setup_event(struct kvm_vcpu *vcpu, struct kvmi_event *ev);
bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
u8 access);
+int kvmi_arch_cmd_get_cpuid(struct kvm_vcpu *vcpu,
+ const struct kvmi_get_cpuid *req,
+ struct kvmi_get_cpuid_reply *rpl);
int kvmi_arch_cmd_get_vcpu_info(struct kvm_vcpu *vcpu,
struct kvmi_get_vcpu_info_reply *rpl);
@@ -29,6 +29,7 @@ static const char *const msg_IDs[] = {
[KVMI_CONTROL_VM_EVENTS] = "KVMI_CONTROL_VM_EVENTS",
[KVMI_EVENT] = "KVMI_EVENT",
[KVMI_EVENT_REPLY] = "KVMI_EVENT_REPLY",
+ [KVMI_GET_CPUID] = "KVMI_GET_CPUID",
[KVMI_GET_GUEST_INFO] = "KVMI_GET_GUEST_INFO",
[KVMI_GET_PAGE_ACCESS] = "KVMI_GET_PAGE_ACCESS",
[KVMI_GET_PAGE_WRITE_BITMAP] = "KVMI_GET_PAGE_WRITE_BITMAP",
@@ -641,6 +642,20 @@ static int handle_control_events(struct kvm_vcpu *vcpu,
return reply_cb(vcpu, msg, ec, NULL, 0);
}
+static int handle_get_cpuid(struct kvm_vcpu *vcpu,
+ const struct kvmi_msg_hdr *msg,
+ const void *req, vcpu_reply_fct reply_cb)
+{
+ struct kvmi_get_cpuid_reply rpl;
+ int ec;
+
+ memset(&rpl, 0, sizeof(rpl));
+
+ ec = kvmi_arch_cmd_get_cpuid(vcpu, req, &rpl);
+
+ return reply_cb(vcpu, msg, ec, &rpl, sizeof(rpl));
+}
+
/*
* These commands are executed on the vCPU thread. The receiving thread
* passes the messages using a newly allocated 'struct kvmi_vcpu_cmd'
@@ -652,6 +667,7 @@ static int(*const msg_vcpu[])(struct kvm_vcpu *,
vcpu_reply_fct) = {
[KVMI_CONTROL_EVENTS] = handle_control_events,
[KVMI_EVENT_REPLY] = handle_event_reply,
+ [KVMI_GET_CPUID] = handle_get_cpuid,
[KVMI_GET_REGISTERS] = handle_get_registers,
[KVMI_GET_VCPU_INFO] = handle_get_vcpu_info,
[KVMI_SET_REGISTERS] = handle_set_registers,