@@ -563,6 +563,60 @@ EPT view (0 is primary). On all other hardware it must be zero.
* -KVM_EAGAIN - the selected vCPU can't be introspected yet
* -KVM_ENOMEM - not enough memory to allocate the reply
+10. KVMI_SET_PAGE_ACCESS
+------------------------
+
+:Architectures: all
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_set_page_access {
+ __u16 view;
+ __u16 count;
+ __u32 padding;
+ struct kvmi_page_access_entry entries[0];
+ };
+
+where::
+
+ struct kvmi_page_access_entry {
+ __u64 gpa;
+ __u8 access;
+ __u8 padding1;
+ __u16 padding2;
+ __u32 padding3;
+ };
+
+
+:Returns:
+
+::
+
+ struct kvmi_error_code
+
+Sets the spte access bits (rwx) for an array of ``count`` guest physical
+addresses.
+
+The command will fail with -KVM_EINVAL if any of the specified combination
+of access bits is not supported.
+
+The command will make the changes in order and it will stop on the first
+error. The introspection tool should handle the rollback.
+
+In order to 'forget' an address, all the access bits ('rwx') must be set.
+
+:Errors:
+
+* -KVM_EINVAL - the specified access bits combination is invalid
+* -KVM_EINVAL - the selected SPT view is invalid
+* -KVM_EINVAL - padding is not zero
+* -KVM_EINVAL - the message size is invalid
+* -KVM_EOPNOTSUPP - a SPT view was selected but the hardware doesn't support it
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+* -KVM_ENOMEM - not enough memory to add the page tracking structures
+
Events
======
@@ -224,3 +224,39 @@ int kvmi_arch_cmd_get_page_access(struct kvmi *ikvm,
return 0;
}
+int kvmi_arch_cmd_set_page_access(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const struct kvmi_set_page_access *req)
+{
+ const struct kvmi_page_access_entry *entry = req->entries;
+ const struct kvmi_page_access_entry *end = req->entries + req->count;
+ u8 unknown_bits = ~(KVMI_PAGE_ACCESS_R | KVMI_PAGE_ACCESS_W
+ | KVMI_PAGE_ACCESS_X);
+ int ec = 0;
+
+ if (req->padding)
+ return -KVM_EINVAL;
+
+ if (msg->size < sizeof(*req) + (end - entry) * sizeof(*entry))
+ return -KVM_EINVAL;
+
+ if (req->view != 0) /* TODO */
+ return -KVM_EOPNOTSUPP;
+
+ for (; entry < end; entry++) {
+ if ((entry->access & unknown_bits) || entry->padding1
+ || entry->padding2 || entry->padding3)
+ ec = -KVM_EINVAL;
+ else
+ ec = kvmi_cmd_set_page_access(ikvm, entry->gpa,
+ entry->access);
+ if (ec)
+ kvmi_warn(ikvm, "%s: %llx %x padding %x,%x,%x",
+ __func__, entry->gpa, entry->access,
+ entry->padding1, entry->padding2,
+ entry->padding3);
+ }
+
+ return ec;
+}
+
@@ -127,6 +127,21 @@ struct kvmi_get_page_access_reply {
__u8 access[0];
};
+struct kvmi_page_access_entry {
+ __u64 gpa;
+ __u8 access;
+ __u8 padding1;
+ __u16 padding2;
+ __u32 padding3;
+};
+
+struct kvmi_set_page_access {
+ __u16 view;
+ __u16 count;
+ __u32 padding;
+ struct kvmi_page_access_entry entries[0];
+};
+
struct kvmi_get_vcpu_info_reply {
__u64 tsc_speed;
};
@@ -73,6 +73,57 @@ static int kvmi_get_gfn_access(struct kvmi *ikvm, const gfn_t gfn,
return m ? 0 : -1;
}
+static int kvmi_set_gfn_access(struct kvm *kvm, gfn_t gfn, u8 access)
+{
+ struct kvmi_mem_access *m;
+ struct kvmi_mem_access *__m;
+ struct kvmi *ikvm = IKVM(kvm);
+ int err = 0;
+ int idx;
+
+ m = kmem_cache_zalloc(radix_cache, GFP_KERNEL);
+ if (!m)
+ return -KVM_ENOMEM;
+
+ m->gfn = gfn;
+ m->access = access;
+
+ if (radix_tree_preload(GFP_KERNEL)) {
+ err = -KVM_ENOMEM;
+ goto exit;
+ }
+
+ idx = srcu_read_lock(&kvm->srcu);
+ spin_lock(&kvm->mmu_lock);
+ write_lock(&ikvm->access_tree_lock);
+
+ __m = __kvmi_get_gfn_access(ikvm, gfn);
+ if (__m) {
+ __m->access = access;
+ kvmi_arch_update_page_tracking(kvm, NULL, __m);
+ if (access == full_access) {
+ radix_tree_delete(&ikvm->access_tree, gfn);
+ kmem_cache_free(radix_cache, __m);
+ }
+ } else {
+ radix_tree_insert(&ikvm->access_tree, gfn, m);
+ kvmi_arch_update_page_tracking(kvm, NULL, m);
+ m = NULL;
+ }
+
+ write_unlock(&ikvm->access_tree_lock);
+ spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ radix_tree_preload_end();
+
+exit:
+ if (m)
+ kmem_cache_free(radix_cache, m);
+
+ return err;
+}
+
static bool kvmi_restricted_access(struct kvmi *ikvm, gpa_t gpa, u8 access)
{
u8 allowed_access;
@@ -1081,6 +1132,16 @@ int kvmi_cmd_get_page_access(struct kvmi *ikvm, u64 gpa, u8 *access)
return 0;
}
+int kvmi_cmd_set_page_access(struct kvmi *ikvm, u64 gpa, u8 access)
+{
+ gfn_t gfn = gpa_to_gfn(gpa);
+ u8 ignored_access;
+
+ kvmi_get_gfn_access(ikvm, gfn, &ignored_access);
+
+ return kvmi_set_gfn_access(ikvm->kvm, gfn, access);
+}
+
int kvmi_cmd_control_events(struct kvm_vcpu *vcpu, unsigned int event_id,
bool enable)
{
@@ -160,6 +160,7 @@ void *kvmi_msg_alloc(void);
void *kvmi_msg_alloc_check(size_t size);
void kvmi_msg_free(void *addr);
int kvmi_cmd_get_page_access(struct kvmi *ikvm, u64 gpa, u8 *access);
+int kvmi_cmd_set_page_access(struct kvmi *ikvm, u64 gpa, u8 access);
int kvmi_cmd_control_events(struct kvm_vcpu *vcpu, unsigned int event_id,
bool enable);
int kvmi_cmd_control_vm_events(struct kvmi *ikvm, unsigned int event_id,
@@ -180,6 +181,9 @@ int kvmi_arch_cmd_get_page_access(struct kvmi *ikvm,
const struct kvmi_get_page_access *req,
struct kvmi_get_page_access_reply **dest,
size_t *dest_size);
+int kvmi_arch_cmd_set_page_access(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const struct kvmi_set_page_access *req);
void kvmi_arch_setup_event(struct kvm_vcpu *vcpu, struct kvmi_event *ev);
bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
u8 access);
@@ -32,6 +32,7 @@ static const char *const msg_IDs[] = {
[KVMI_GET_PAGE_ACCESS] = "KVMI_GET_PAGE_ACCESS",
[KVMI_GET_VCPU_INFO] = "KVMI_GET_VCPU_INFO",
[KVMI_GET_VERSION] = "KVMI_GET_VERSION",
+ [KVMI_SET_PAGE_ACCESS] = "KVMI_SET_PAGE_ACCESS",
};
static bool is_known_message(u16 id)
@@ -339,6 +340,17 @@ static int handle_get_page_access(struct kvmi *ikvm,
return err;
}
+static int handle_set_page_access(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const void *req)
+{
+ int ec;
+
+ ec = kvmi_arch_cmd_set_page_access(ikvm, msg, req);
+
+ return kvmi_msg_vm_maybe_reply(ikvm, msg, ec, NULL, 0);
+}
+
static bool invalid_vcpu_hdr(const struct kvmi_vcpu_hdr *hdr)
{
return hdr->padding1 || hdr->padding2;
@@ -356,6 +368,7 @@ static int(*const msg_vm[])(struct kvmi *, const struct kvmi_msg_hdr *,
[KVMI_GET_GUEST_INFO] = handle_get_guest_info,
[KVMI_GET_PAGE_ACCESS] = handle_get_page_access,
[KVMI_GET_VERSION] = handle_get_version,
+ [KVMI_SET_PAGE_ACCESS] = handle_set_page_access,
};
static int handle_event_reply(struct kvm_vcpu *vcpu,