@@ -694,6 +694,72 @@ EPT view (0 is primary). On all other hardware it must be zero.
* -KVM_EAGAIN - the selected vCPU can't be introspected yet
* -KVM_ENOMEM - not enough memory to allocate the reply
+13. KVMI_SET_PAGE_WRITE_BITMAP
+------------------------------
+
+:Architectures: x86
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_set_page_write_bitmap {
+ __u16 view;
+ __u16 count;
+ __u32 padding;
+ struct kvmi_page_write_bitmap_entry entries[0];
+ };
+
+where::
+
+ struct kvmi_page_write_bitmap_entry {
+ __u64 gpa;
+ __u32 bitmap;
+ __u32 padding;
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_error_code;
+
+Sets the subpage protection (SPP) write bitmap for an array of ``count``
+guest physical addresses of 4KB bytes.
+
+The command will make the changes starting with the first entry and
+it will stop on the first error. The introspection tool should handle
+the rollback.
+
+While the *KVMI_SET_PAGE_ACCESS* command can be used to write-protect a
+4KB page, this command can write-protect 128-bytes subpages inside of a
+4KB page by setting the corresponding bit to 1 (write allowed) or to 0
+(write disallowed). For example, to allow write access to the A and B
+subpages only, the bitmap must be set to::
+
+ BIT(A) | BIT(B)
+
+A and B must be a number between 0 (first subpage) and 31 (last subpage).
+
+Using this command to set all bits to 1 (allow write access for
+all subpages) will allow write access to the whole 4KB page (like a
+*KVMI_SET_PAGE_ACCESS* command with the *KVMI_PAGE_ACCESS_W* flag set)
+and vice versa.
+
+Using this command to set any bit to 0 will write-protect the whole 4KB
+page (like a *KVMI_SET_PAGE_ACCESS* command with the *KVMI_PAGE_ACCESS_W*
+flag cleared) and allow write access only for subpages with the
+corresponding bit set to 1.
+
+:Errors:
+
+* -KVM_EINVAL - the selected SPT view is invalid
+* -KVM_EOPNOTSUPP - a SPT view was selected but the hardware doesn't support it
+* -KVM_EOPNOTSUPP - the hardware doesn't support SPP or hasn't been enabled
+* -KVM_EINVAL - the write access is already allowed for the whole 4KB page
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+* -KVM_ENOMEM - not enough memory to add the page tracking structures
+
Events
======
@@ -304,6 +304,36 @@ int kvmi_arch_cmd_set_page_access(struct kvmi *ikvm,
return ec;
}
+int kvmi_arch_cmd_set_page_write_bitmap(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const struct kvmi_set_page_write_bitmap
+ *req)
+{
+ u16 k, n = req->count;
+ int ec = 0;
+
+ if (req->padding)
+ return -KVM_EINVAL;
+
+ if (msg->size < sizeof(*req) + req->count * sizeof(req->entries[0]))
+ return -KVM_EINVAL;
+
+ if (!kvmi_spp_enabled(ikvm))
+ return -KVM_EOPNOTSUPP;
+
+ if (req->view != 0) /* TODO */
+ return -KVM_EOPNOTSUPP;
+
+ for (k = 0; k < n && ec == 0; k++) {
+ u64 gpa = req->entries[k].gpa;
+ u32 bitmap = req->entries[k].bitmap;
+
+ ec = kvmi_cmd_set_page_write_bitmap(ikvm, gpa, bitmap);
+ }
+
+ return ec;
+}
+
int kvmi_arch_cmd_control_spp(struct kvmi *ikvm)
{
return kvm_arch_init_spp(ikvm->kvm);
@@ -160,6 +160,19 @@ struct kvmi_get_page_write_bitmap_reply {
__u32 bitmap[0];
};
+struct kvmi_page_write_bitmap_entry {
+ __u64 gpa;
+ __u32 bitmap;
+ __u32 padding;
+};
+
+struct kvmi_set_page_write_bitmap {
+ __u16 view;
+ __u16 count;
+ __u32 padding;
+ struct kvmi_page_write_bitmap_entry entries[0];
+};
+
struct kvmi_get_vcpu_info_reply {
__u64 tsc_speed;
};
@@ -99,6 +99,24 @@ static int kvmi_set_gfn_access(struct kvm *kvm, gfn_t gfn, u8 access,
m->access = access;
m->write_bitmap = write_bitmap;
+ /*
+ * Only try to set SPP bitmap when the page is writable.
+ * Be careful, kvm_mmu_set_subpages() will enable page write-protection
+ * by default when set SPP bitmap. If bitmap contains all 1s, it'll
+ * make the page writable by default too.
+ */
+ if (!(access & KVMI_PAGE_ACCESS_W) && kvmi_spp_enabled(ikvm)) {
+ struct kvm_subpage spp_info;
+
+ spp_info.base_gfn = gfn;
+ spp_info.npages = 1;
+ spp_info.access_map[0] = write_bitmap;
+
+ err = kvm_arch_set_subpages(kvm, &spp_info);
+ if (err)
+ goto exit;
+ }
+
if (radix_tree_preload(GFP_KERNEL)) {
err = -KVM_ENOMEM;
goto exit;
@@ -1183,6 +1201,25 @@ int kvmi_cmd_set_page_access(struct kvmi *ikvm, u64 gpa, u8 access)
return kvmi_set_gfn_access(ikvm->kvm, gfn, access, write_bitmap);
}
+int kvmi_cmd_set_page_write_bitmap(struct kvmi *ikvm, u64 gpa,
+ u32 write_bitmap)
+{
+ bool write_allowed_for_all;
+ gfn_t gfn = gpa_to_gfn(gpa);
+ u32 ignored_write_bitmap;
+ u8 access;
+
+ kvmi_get_gfn_access(ikvm, gfn, &access, &ignored_write_bitmap);
+
+ write_allowed_for_all = (write_bitmap == (u32)((1ULL << 32) - 1));
+ if (write_allowed_for_all)
+ access |= KVMI_PAGE_ACCESS_W;
+ else
+ access &= ~KVMI_PAGE_ACCESS_W;
+
+ return kvmi_set_gfn_access(ikvm->kvm, gfn, access, write_bitmap);
+}
+
int kvmi_cmd_control_events(struct kvm_vcpu *vcpu, unsigned int event_id,
bool enable)
{
@@ -173,6 +173,7 @@ void kvmi_msg_free(void *addr);
int kvmi_cmd_get_page_access(struct kvmi *ikvm, u64 gpa, u8 *access);
int kvmi_cmd_set_page_access(struct kvmi *ikvm, u64 gpa, u8 access);
int kvmi_cmd_get_page_write_bitmap(struct kvmi *ikvm, u64 gpa, u32 *bitmap);
+int kvmi_cmd_set_page_write_bitmap(struct kvmi *ikvm, u64 gpa, u32 bitmap);
int kvmi_cmd_control_events(struct kvm_vcpu *vcpu, unsigned int event_id,
bool enable);
int kvmi_cmd_control_vm_events(struct kvmi *ikvm, unsigned int event_id,
@@ -202,6 +203,9 @@ int kvmi_arch_cmd_get_page_write_bitmap(struct kvmi *ikvm,
const struct kvmi_get_page_write_bitmap *req,
struct kvmi_get_page_write_bitmap_reply **dest,
size_t *dest_size);
+int kvmi_arch_cmd_set_page_write_bitmap(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const struct kvmi_set_page_write_bitmap *req);
void kvmi_arch_setup_event(struct kvm_vcpu *vcpu, struct kvmi_event *ev);
bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
u8 access);
@@ -35,6 +35,7 @@ static const char *const msg_IDs[] = {
[KVMI_GET_VCPU_INFO] = "KVMI_GET_VCPU_INFO",
[KVMI_GET_VERSION] = "KVMI_GET_VERSION",
[KVMI_SET_PAGE_ACCESS] = "KVMI_SET_PAGE_ACCESS",
+ [KVMI_SET_PAGE_WRITE_BITMAP] = "KVMI_SET_PAGE_WRITE_BITMAP",
};
static bool is_known_message(u16 id)
@@ -400,6 +401,17 @@ static int handle_get_page_write_bitmap(struct kvmi *ikvm,
return err;
}
+static int handle_set_page_write_bitmap(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const void *req)
+{
+ int ec;
+
+ ec = kvmi_arch_cmd_set_page_write_bitmap(ikvm, msg, req);
+
+ return kvmi_msg_vm_maybe_reply(ikvm, msg, ec, NULL, 0);
+}
+
static bool invalid_vcpu_hdr(const struct kvmi_vcpu_hdr *hdr)
{
return hdr->padding1 || hdr->padding2;
@@ -420,6 +432,7 @@ static int(*const msg_vm[])(struct kvmi *, const struct kvmi_msg_hdr *,
[KVMI_GET_PAGE_WRITE_BITMAP] = handle_get_page_write_bitmap,
[KVMI_GET_VERSION] = handle_get_version,
[KVMI_SET_PAGE_ACCESS] = handle_set_page_access,
+ [KVMI_SET_PAGE_WRITE_BITMAP] = handle_set_page_write_bitmap,
};
static int handle_event_reply(struct kvm_vcpu *vcpu,