@@ -808,6 +808,38 @@ the buffer size from the message size (kvmi_msg_hdr.size).
* -KVM_EAGAIN - the selected vCPU can't be introspected yet
* -KVM_ENOMEM - there is not enough memory to allocate the reply
+18. KVMI_VCPU_GET_MTRR_TYPE
+---------------------------
+
+:Architecture: x86
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_vcpu_get_mtrr_type {
+ __u64 gpa;
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_error_code;
+ struct kvmi_vcpu_get_mtrr_type_reply {
+ __u8 type;
+ __u8 padding[7];
+ };
+
+Returns the guest memory type for a specific physical address.
+
+:Errors:
+
+* -KVM_EINVAL - the selected vCPU is invalid
+* -KVM_EINVAL - the padding is not zero
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+
Events
======
@@ -101,4 +101,13 @@ struct kvmi_vcpu_get_xsave_reply {
__u32 region[0];
};
+struct kvmi_vcpu_get_mtrr_type {
+ __u64 gpa;
+};
+
+struct kvmi_vcpu_get_mtrr_type_reply {
+ __u8 type;
+ __u8 padding[7];
+};
+
#endif /* _UAPI_ASM_X86_KVMI_H */
@@ -722,3 +722,10 @@ int kvmi_arch_cmd_vcpu_get_xsave(struct kvm_vcpu *vcpu,
return 0;
}
+
+int kvmi_arch_cmd_vcpu_get_mtrr_type(struct kvm_vcpu *vcpu, u64 gpa, u8 *type)
+{
+ *type = kvm_mtrr_get_guest_memory_type(vcpu, gpa_to_gfn(gpa));
+
+ return 0;
+}
@@ -36,7 +36,8 @@ enum {
KVMI_VM_GET_MAX_GFN = 17,
- KVMI_VCPU_GET_XSAVE = 18,
+ KVMI_VCPU_GET_XSAVE = 18,
+ KVMI_VCPU_GET_MTRR_TYPE = 19,
KVMI_NUM_MESSAGES
};
@@ -1345,6 +1345,24 @@ static void test_cmd_vcpu_get_xsave(struct kvm_vm *vm)
&rpl, sizeof(rpl));
}
+static void test_cmd_vcpu_get_mtrr_type(struct kvm_vm *vm)
+{
+ struct {
+ struct kvmi_msg_hdr hdr;
+ struct kvmi_vcpu_hdr vcpu_hdr;
+ struct kvmi_vcpu_get_mtrr_type cmd;
+ } req = {};
+ struct kvmi_vcpu_get_mtrr_type_reply rpl;
+
+ req.cmd.gpa = test_gpa;
+
+ test_vcpu0_command(vm, KVMI_VCPU_GET_MTRR_TYPE,
+ &req.hdr, sizeof(req),
+ &rpl, sizeof(rpl));
+
+ DEBUG("mtrr_type: gpa 0x%lx type 0x%x\n", test_gpa, rpl.type);
+}
+
static void test_introspection(struct kvm_vm *vm)
{
srandom(time(0));
@@ -1372,6 +1390,7 @@ static void test_introspection(struct kvm_vm *vm)
test_cmd_vm_get_max_gfn();
test_event_xsetbv(vm);
test_cmd_vcpu_get_xsave(vm);
+ test_cmd_vcpu_get_mtrr_type(vm);
unhook_introspection(vm);
}
@@ -106,5 +106,6 @@ void kvmi_arch_inject_exception(struct kvm_vcpu *vcpu);
int kvmi_arch_cmd_vcpu_get_xsave(struct kvm_vcpu *vcpu,
struct kvmi_vcpu_get_xsave_reply **dest,
size_t *dest_size);
+int kvmi_arch_cmd_vcpu_get_mtrr_type(struct kvm_vcpu *vcpu, u64 gpa, u8 *type);
#endif
@@ -33,6 +33,7 @@ static const char *const msg_IDs[] = {
[KVMI_VCPU_CONTROL_EVENTS] = "KVMI_VCPU_CONTROL_EVENTS",
[KVMI_VCPU_GET_CPUID] = "KVMI_VCPU_GET_CPUID",
[KVMI_VCPU_GET_INFO] = "KVMI_VCPU_GET_INFO",
+ [KVMI_VCPU_GET_MTRR_TYPE] = "KVMI_VCPU_GET_MTRR_TYPE",
[KVMI_VCPU_GET_REGISTERS] = "KVMI_VCPU_GET_REGISTERS",
[KVMI_VCPU_GET_XSAVE] = "KVMI_VCPU_GET_XSAVE",
[KVMI_VCPU_INJECT_EXCEPTION] = "KVMI_VCPU_INJECT_EXCEPTION",
@@ -576,6 +577,21 @@ static int handle_vcpu_get_xsave(const struct kvmi_vcpu_cmd_job *job,
return err;
}
+static int handle_vcpu_get_mtrr_type(const struct kvmi_vcpu_cmd_job *job,
+ const struct kvmi_msg_hdr *msg,
+ const void *_req)
+{
+ const struct kvmi_vcpu_get_mtrr_type *req = _req;
+ struct kvmi_vcpu_get_mtrr_type_reply rpl;
+ int ec;
+
+ memset(&rpl, 0, sizeof(rpl));
+
+ ec = kvmi_arch_cmd_vcpu_get_mtrr_type(job->vcpu, req->gpa, &rpl.type);
+
+ return kvmi_msg_vcpu_reply(job, msg, ec, &rpl, sizeof(rpl));
+}
+
/*
* These commands are executed from the vCPU thread. The receiving thread
* passes the messages using a newly allocated 'struct kvmi_vcpu_cmd_job'
@@ -589,6 +605,7 @@ static int(*const msg_vcpu[])(const struct kvmi_vcpu_cmd_job *,
[KVMI_VCPU_CONTROL_EVENTS] = handle_vcpu_control_events,
[KVMI_VCPU_GET_CPUID] = handle_get_cpuid,
[KVMI_VCPU_GET_INFO] = handle_get_vcpu_info,
+ [KVMI_VCPU_GET_MTRR_TYPE] = handle_vcpu_get_mtrr_type,
[KVMI_VCPU_GET_REGISTERS] = handle_get_registers,
[KVMI_VCPU_GET_XSAVE] = handle_vcpu_get_xsave,
[KVMI_VCPU_INJECT_EXCEPTION] = handle_vcpu_inject_exception,