@@ -557,6 +557,50 @@ the *KVMI_VM_CONTROL_EVENTS* command.
* -KVM_EPERM - the access is disallowed (use *KVMI_VM_CHECK_EVENT* first)
* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+11. KVMI_VCPU_GET_REGISTERS
+---------------------------
+
+:Architectures: x86
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_vcpu_get_registers {
+ __u16 nmsrs;
+ __u16 padding1;
+ __u32 padding2;
+ __u32 msrs_idx[0];
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_error_code;
+ struct kvmi_vcpu_get_registers_reply {
+ __u32 mode;
+ __u32 padding;
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ struct kvm_msrs msrs;
+ };
+
+For the given vCPU and the ``nmsrs`` sized array of MSRs registers,
+returns the current vCPU mode (in bytes: 2, 4 or 8), the general purpose
+registers, the special registers and the requested set of MSRs.
+
+:Errors:
+
+* -KVM_EINVAL - the selected vCPU is invalid
+* -KVM_EINVAL - one of the indicated MSRs is invalid
+* -KVM_EINVAL - the padding is not zero
+* -KVM_EINVAL - the reply size is larger than
+ kvmi_get_version_reply.max_msg_size (too many MSRs)
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+* -KVM_ENOMEM - there is not enough memory to allocate the reply
+
Events
======
@@ -30,4 +30,19 @@ struct kvmi_vcpu_event_arch {
} msrs;
};
+struct kvmi_vcpu_get_registers {
+ __u16 nmsrs;
+ __u16 padding1;
+ __u32 padding2;
+ __u32 msrs_idx[0];
+};
+
+struct kvmi_vcpu_get_registers_reply {
+ __u32 mode;
+ __u32 padding;
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+ struct kvm_msrs msrs;
+};
+
#endif /* _UAPI_ASM_X86_KVMI_H */
@@ -93,3 +93,28 @@ void kvmi_arch_setup_vcpu_event(struct kvm_vcpu *vcpu,
ev->arch.mode = kvmi_vcpu_mode(vcpu, &event->sregs);
kvmi_get_msrs(vcpu, event);
}
+
+int kvmi_arch_cmd_vcpu_get_registers(struct kvm_vcpu *vcpu,
+ const struct kvmi_vcpu_get_registers *req,
+ struct kvmi_vcpu_get_registers_reply *rpl)
+{
+ struct msr_data m = {.host_initiated = true};
+ int k, err = 0;
+
+ kvm_arch_vcpu_get_regs(vcpu, &rpl->regs);
+ kvm_arch_vcpu_get_sregs(vcpu, &rpl->sregs);
+ rpl->mode = kvmi_vcpu_mode(vcpu, &rpl->sregs);
+ rpl->msrs.nmsrs = req->nmsrs;
+
+ for (k = 0; k < req->nmsrs && !err; k++) {
+ m.index = req->msrs_idx[k];
+
+ err = kvm_x86_ops.get_msr(vcpu, &m);
+ if (!err) {
+ rpl->msrs.entries[k].index = m.index;
+ rpl->msrs.entries[k].data = m.data;
+ }
+ }
+
+ return err ? -KVM_EINVAL : 0;
+}
new file mode 100644
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_X86_KVM_KVMI_H
+#define ARCH_X86_KVM_KVMI_H
+
+int kvmi_arch_cmd_vcpu_get_registers(struct kvm_vcpu *vcpu,
+ const struct kvmi_vcpu_get_registers *req,
+ struct kvmi_vcpu_get_registers_reply *rpl);
+
+#endif
@@ -7,6 +7,7 @@
*/
#include "../../../virt/kvm/introspection/kvmi_int.h"
+#include "kvmi.h"
static int handle_vcpu_get_info(const struct kvmi_vcpu_msg_job *job,
const struct kvmi_msg_hdr *msg,
@@ -21,8 +22,77 @@ static int handle_vcpu_get_info(const struct kvmi_vcpu_msg_job *job,
return kvmi_msg_vcpu_reply(job, msg, 0, &rpl, sizeof(rpl));
}
+static bool is_valid_get_regs_request(const struct kvmi_msg_hdr *msg,
+ const struct kvmi_vcpu_get_registers *req)
+{
+ size_t req_size, msg_size;
+
+ if (req->padding1 || req->padding2)
+ return false;
+
+ req_size = struct_size(req, msrs_idx, req->nmsrs);
+
+ if (check_add_overflow(sizeof(struct kvmi_vcpu_hdr),
+ req_size, &msg_size))
+ return false;
+
+ if (msg_size > msg->size)
+ return false;
+
+ return true;
+}
+
+static bool is_valid_get_regs_reply(const struct kvmi_vcpu_get_registers *req,
+ size_t *ptr_rpl_size)
+{
+ struct kvmi_vcpu_get_registers_reply *rpl;
+ size_t rpl_size, msg_size;
+
+ rpl_size = struct_size(rpl, msrs.entries, req->nmsrs);
+
+ if (check_add_overflow(sizeof(struct kvmi_error_code),
+ rpl_size, &msg_size))
+ return false;
+
+ if (msg_size > KVMI_MAX_MSG_SIZE)
+ return false;
+
+ *ptr_rpl_size = rpl_size;
+ return true;
+}
+
+static int handle_vcpu_get_registers(const struct kvmi_vcpu_msg_job *job,
+ const struct kvmi_msg_hdr *msg,
+ const void *req)
+{
+ struct kvmi_vcpu_get_registers_reply *rpl = NULL;
+ size_t rpl_size;
+ int err, ec;
+
+ if (!is_valid_get_regs_request(msg, req) ||
+ !is_valid_get_regs_reply(req, &rpl_size)) {
+ ec = -KVM_EINVAL;
+ goto reply;
+ }
+
+ rpl = kvmi_msg_alloc();
+ if (!rpl) {
+ ec = -KVM_ENOMEM;
+ goto reply;
+ }
+
+ ec = kvmi_arch_cmd_vcpu_get_registers(job->vcpu, req, rpl);
+
+reply:
+ err = kvmi_msg_vcpu_reply(job, msg, ec, rpl, rpl ? rpl_size : 0);
+
+ kvmi_msg_free(rpl);
+ return err;
+}
+
static kvmi_vcpu_msg_job_fct const msg_vcpu[] = {
- [KVMI_VCPU_GET_INFO] = handle_vcpu_get_info,
+ [KVMI_VCPU_GET_INFO] = handle_vcpu_get_info,
+ [KVMI_VCPU_GET_REGISTERS] = handle_vcpu_get_registers,
};
kvmi_vcpu_msg_job_fct kvmi_arch_vcpu_msg_handler(u16 id)
@@ -37,6 +37,7 @@ enum {
KVMI_VCPU_GET_INFO = KVMI_VCPU_MESSAGE_ID(1),
KVMI_VCPU_CONTROL_EVENTS = KVMI_VCPU_MESSAGE_ID(2),
+ KVMI_VCPU_GET_REGISTERS = KVMI_VCPU_MESSAGE_ID(3),
KVMI_NEXT_VCPU_MESSAGE
};
@@ -808,6 +808,64 @@ static void test_cmd_vcpu_control_events(struct kvm_vm *vm)
}
+static void cmd_vcpu_get_registers(struct kvm_vm *vm, struct kvm_regs *regs)
+{
+ struct {
+ struct kvmi_msg_hdr hdr;
+ struct kvmi_vcpu_hdr vcpu_hdr;
+ struct kvmi_vcpu_get_registers cmd;
+ } req = {};
+ struct kvmi_vcpu_get_registers_reply rpl;
+
+ test_vcpu0_command(vm, KVMI_VCPU_GET_REGISTERS, &req.hdr, sizeof(req),
+ &rpl, sizeof(rpl), 0);
+
+ memcpy(regs, &rpl.regs, sizeof(*regs));
+}
+
+static void test_invalid_vcpu_get_registers(struct kvm_vm *vm)
+{
+ struct {
+ struct kvmi_msg_hdr hdr;
+ struct kvmi_vcpu_hdr vcpu_hdr;
+ struct kvmi_vcpu_get_registers cmd;
+ __u32 msrs_idx[1];
+ } req = {};
+ struct {
+ struct kvmi_msg_hdr hdr;
+ struct kvmi_vcpu_hdr vcpu_hdr;
+ struct kvmi_vcpu_get_registers cmd;
+ } *req_big;
+ struct kvmi_vcpu_get_registers_reply rpl;
+ struct kvmi_get_version_reply version;
+
+ req.cmd.nmsrs = 1;
+ req.cmd.msrs_idx[0] = 0xffffffff;
+ test_vcpu0_command(vm, KVMI_VCPU_GET_REGISTERS,
+ &req.hdr, sizeof(req),
+ &rpl, sizeof(rpl), -KVM_EINVAL);
+
+ cmd_vm_get_version(&version);
+
+ req_big = calloc(1, version.max_msg_size);
+ req_big->cmd.nmsrs = (version.max_msg_size - sizeof(*req_big)) / sizeof(__u32);
+ test_vcpu0_command(vm, KVMI_VCPU_GET_REGISTERS,
+ &req.hdr, sizeof(req),
+ &rpl, sizeof(rpl), -KVM_EINVAL);
+ free(req_big);
+}
+
+static void test_cmd_vcpu_get_registers(struct kvm_vm *vm)
+{
+ struct kvm_regs regs = {};
+
+ cmd_vcpu_get_registers(vm, ®s);
+
+ pr_debug("get_registers rip 0x%llx\n", regs.rip);
+
+ test_invalid_vcpu_get_registers(vm);
+}
+
static void test_introspection(struct kvm_vm *vm)
{
srandom(time(0));
@@ -825,6 +883,7 @@ static void test_introspection(struct kvm_vm *vm)
test_cmd_vcpu_get_info(vm);
test_pause(vm);
test_cmd_vcpu_control_events(vm);
+ test_cmd_vcpu_get_registers(vm);
unhook_introspection(vm);
}