@@ -509,6 +509,60 @@ by the *KVMI_CONTROL_VM_EVENTS* command.
* -KVM_EPERM - the access is restricted by the host
* -KVM_EOPNOTSUPP - one the events can't be intercepted in the current setup
+9. KVMI_GET_PAGE_ACCESS
+-----------------------
+
+:Architectures: all
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_get_page_access {
+ __u16 view;
+ __u16 count;
+ __u32 padding;
+ __u64 gpa[0];
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_error_code;
+ struct kvmi_get_page_access_reply {
+ __u8 access[0];
+ };
+
+Returns the spte access bits (rwx) for an array of ``count`` guest
+physical addresses.
+
+The valid access bits for *KVMI_GET_PAGE_ACCESS* and *KVMI_SET_PAGE_ACCESS*
+are::
+
+ KVMI_PAGE_ACCESS_R
+ KVMI_PAGE_ACCESS_W
+ KVMI_PAGE_ACCESS_X
+
+By default, for any guest physical address, the returned access mode will
+be 'rwx' (all the above bits). If the introspection tool must prevent
+the code execution from a guest page, for example, it should use the
+KVMI_SET_PAGE_ACCESS command to set the 'rw' bits for any guest physical
+addresses contained in that page. Of course, in order to receive
+page fault events when these violations take place, the KVMI_CONTROL_EVENTS
+command must be used to enable this type of event (KVMI_EVENT_PF).
+
+On Intel hardware with multiple EPT views, the ``view`` argument selects the
+EPT view (0 is primary). On all other hardware it must be zero.
+
+:Errors:
+
+* -KVM_EINVAL - the selected SPT view is invalid
+* -KVM_EINVAL - padding is not zero
+* -KVM_EOPNOTSUPP - a SPT view was selected but the hardware doesn't support it
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+* -KVM_ENOMEM - not enough memory to allocate the reply
+
Events
======
@@ -183,3 +183,44 @@ void kvmi_arch_update_page_tracking(struct kvm *kvm,
}
}
}
+
+int kvmi_arch_cmd_get_page_access(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const struct kvmi_get_page_access *req,
+ struct kvmi_get_page_access_reply **dest,
+ size_t *dest_size)
+{
+ struct kvmi_get_page_access_reply *rpl = NULL;
+ size_t rpl_size = 0;
+ size_t k, n = req->count;
+ int ec = 0;
+
+ if (req->padding)
+ return -KVM_EINVAL;
+
+ if (msg->size < sizeof(*req) + req->count * sizeof(req->gpa[0]))
+ return -KVM_EINVAL;
+
+ if (req->view != 0) /* TODO */
+ return -KVM_EOPNOTSUPP;
+
+ rpl_size = sizeof(*rpl) + sizeof(rpl->access[0]) * n;
+ rpl = kvmi_msg_alloc_check(rpl_size);
+ if (!rpl)
+ return -KVM_ENOMEM;
+
+ for (k = 0; k < n && ec == 0; k++)
+ ec = kvmi_cmd_get_page_access(ikvm, req->gpa[k],
+ &rpl->access[k]);
+
+ if (ec) {
+ kvmi_msg_free(rpl);
+ return ec;
+ }
+
+ *dest = rpl;
+ *dest_size = rpl_size;
+
+ return 0;
+}
+
@@ -116,6 +116,17 @@ struct kvmi_get_guest_info_reply {
__u32 padding[3];
};
+struct kvmi_get_page_access {
+ __u16 view;
+ __u16 count;
+ __u32 padding;
+ __u64 gpa[0];
+};
+
+struct kvmi_get_page_access_reply {
+ __u8 access[0];
+};
+
struct kvmi_get_vcpu_info_reply {
__u64 tsc_speed;
};
@@ -1072,6 +1072,15 @@ void kvmi_handle_requests(struct kvm_vcpu *vcpu)
kvmi_put(vcpu->kvm);
}
+int kvmi_cmd_get_page_access(struct kvmi *ikvm, u64 gpa, u8 *access)
+{
+ gfn_t gfn = gpa_to_gfn(gpa);
+
+ kvmi_get_gfn_access(ikvm, gfn, access);
+
+ return 0;
+}
+
int kvmi_cmd_control_events(struct kvm_vcpu *vcpu, unsigned int event_id,
bool enable)
{
@@ -159,6 +159,7 @@ int kvmi_msg_send_unhook(struct kvmi *ikvm);
void *kvmi_msg_alloc(void);
void *kvmi_msg_alloc_check(size_t size);
void kvmi_msg_free(void *addr);
+int kvmi_cmd_get_page_access(struct kvmi *ikvm, u64 gpa, u8 *access);
int kvmi_cmd_control_events(struct kvm_vcpu *vcpu, unsigned int event_id,
bool enable);
int kvmi_cmd_control_vm_events(struct kvmi *ikvm, unsigned int event_id,
@@ -174,6 +175,11 @@ void kvmi_handle_common_event_actions(struct kvm_vcpu *vcpu, u32 action,
void kvmi_arch_update_page_tracking(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvmi_mem_access *m);
+int kvmi_arch_cmd_get_page_access(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const struct kvmi_get_page_access *req,
+ struct kvmi_get_page_access_reply **dest,
+ size_t *dest_size);
void kvmi_arch_setup_event(struct kvm_vcpu *vcpu, struct kvmi_event *ev);
bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
u8 access);
@@ -29,6 +29,7 @@ static const char *const msg_IDs[] = {
[KVMI_EVENT] = "KVMI_EVENT",
[KVMI_EVENT_REPLY] = "KVMI_EVENT_REPLY",
[KVMI_GET_GUEST_INFO] = "KVMI_GET_GUEST_INFO",
+ [KVMI_GET_PAGE_ACCESS] = "KVMI_GET_PAGE_ACCESS",
[KVMI_GET_VCPU_INFO] = "KVMI_GET_VCPU_INFO",
[KVMI_GET_VERSION] = "KVMI_GET_VERSION",
};
@@ -323,6 +324,21 @@ static int handle_control_cmd_response(struct kvmi *ikvm,
return err;
}
+static int handle_get_page_access(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const void *req)
+{
+ struct kvmi_get_page_access_reply *rpl = NULL;
+ size_t rpl_size = 0;
+ int err, ec;
+
+ ec = kvmi_arch_cmd_get_page_access(ikvm, msg, req, &rpl, &rpl_size);
+
+ err = kvmi_msg_vm_maybe_reply(ikvm, msg, ec, rpl, rpl_size);
+ kvmi_msg_free(rpl);
+ return err;
+}
+
static bool invalid_vcpu_hdr(const struct kvmi_vcpu_hdr *hdr)
{
return hdr->padding1 || hdr->padding2;
@@ -338,6 +354,7 @@ static int(*const msg_vm[])(struct kvmi *, const struct kvmi_msg_hdr *,
[KVMI_CONTROL_CMD_RESPONSE] = handle_control_cmd_response,
[KVMI_CONTROL_VM_EVENTS] = handle_control_vm_events,
[KVMI_GET_GUEST_INFO] = handle_get_guest_info,
+ [KVMI_GET_PAGE_ACCESS] = handle_get_page_access,
[KVMI_GET_VERSION] = handle_get_version,
};