@@ -617,6 +617,39 @@ In order to 'forget' an address, all the access bits ('rwx') must be set.
* -KVM_EAGAIN - the selected vCPU can't be introspected yet
* -KVM_ENOMEM - not enough memory to add the page tracking structures
+11. KVMI_CONTROL_SPP
+--------------------
+
+:Architectures: x86/intel
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_control_spp {
+ __u8 enable;
+ __u8 padding1;
+ __u16 padding2;
+ __u32 padding3;
+ }
+
+:Returns:
+
+::
+
+ struct kvmi_error_code;
+
+Enables/disables subpage protection (SPP) for the current VM.
+
+If SPP is not enabled, *KVMI_GET_PAGE_WRITE_BITMAP* and
+*KVMI_SET_PAGE_WRITE_BITMAP* commands will fail.
+
+:Errors:
+
+* -KVM_EINVAL - padding is not zero
+* -KVM_EOPNOTSUPP - the hardware doesn't support SPP
+* -KVM_EOPNOTSUPP - the current implementation can't disable SPP
+
Events
======
@@ -260,3 +260,7 @@ int kvmi_arch_cmd_set_page_access(struct kvmi *ikvm,
return ec;
}
+int kvmi_arch_cmd_control_spp(struct kvmi *ikvm)
+{
+ return kvm_arch_init_spp(ikvm->kvm);
+}
@@ -142,6 +142,13 @@ struct kvmi_set_page_access {
struct kvmi_page_access_entry entries[0];
};
+struct kvmi_control_spp {
+ __u8 enable;
+ __u8 padding1;
+ __u16 padding2;
+ __u32 padding3;
+};
+
struct kvmi_get_vcpu_info_reply {
__u64 tsc_speed;
};
@@ -130,6 +130,11 @@ struct kvmi {
DECLARE_BITMAP(event_allow_mask, KVMI_NUM_EVENTS);
DECLARE_BITMAP(vm_ev_mask, KVMI_NUM_EVENTS);
+ struct {
+ bool initialized;
+ atomic_t enabled;
+ } spp;
+
bool cmd_reply_disabled;
};
@@ -184,6 +189,7 @@ int kvmi_arch_cmd_get_page_access(struct kvmi *ikvm,
int kvmi_arch_cmd_set_page_access(struct kvmi *ikvm,
const struct kvmi_msg_hdr *msg,
const struct kvmi_set_page_access *req);
+int kvmi_arch_cmd_control_spp(struct kvmi *ikvm);
void kvmi_arch_setup_event(struct kvm_vcpu *vcpu, struct kvmi_event *ev);
bool kvmi_arch_pf_event(struct kvm_vcpu *vcpu, gpa_t gpa, gva_t gva,
u8 access);
@@ -25,6 +25,7 @@ static const char *const msg_IDs[] = {
[KVMI_CHECK_EVENT] = "KVMI_CHECK_EVENT",
[KVMI_CONTROL_CMD_RESPONSE] = "KVMI_CONTROL_CMD_RESPONSE",
[KVMI_CONTROL_EVENTS] = "KVMI_CONTROL_EVENTS",
+ [KVMI_CONTROL_SPP] = "KVMI_CONTROL_SPP",
[KVMI_CONTROL_VM_EVENTS] = "KVMI_CONTROL_VM_EVENTS",
[KVMI_EVENT] = "KVMI_EVENT",
[KVMI_EVENT_REPLY] = "KVMI_EVENT_REPLY",
@@ -300,6 +301,37 @@ static int kvmi_get_vcpu(struct kvmi *ikvm, unsigned int vcpu_idx,
return 0;
}
+static bool enable_spp(struct kvmi *ikvm)
+{
+ if (!ikvm->spp.initialized) {
+ int err = kvmi_arch_cmd_control_spp(ikvm);
+
+ ikvm->spp.initialized = true;
+
+ if (!err)
+ atomic_set(&ikvm->spp.enabled, true);
+ }
+
+ return atomic_read(&ikvm->spp.enabled);
+}
+
+static int handle_control_spp(struct kvmi *ikvm,
+ const struct kvmi_msg_hdr *msg,
+ const void *_req)
+{
+ const struct kvmi_control_spp *req = _req;
+ int ec;
+
+ if (req->padding1 || req->padding2 || req->padding3)
+ ec = -KVM_EINVAL;
+ else if (req->enable && enable_spp(ikvm))
+ ec = 0;
+ else
+ ec = -KVM_EOPNOTSUPP;
+
+ return kvmi_msg_vm_maybe_reply(ikvm, msg, ec, NULL, 0);
+}
+
static int handle_control_cmd_response(struct kvmi *ikvm,
const struct kvmi_msg_hdr *msg,
const void *_req)
@@ -364,6 +396,7 @@ static int(*const msg_vm[])(struct kvmi *, const struct kvmi_msg_hdr *,
[KVMI_CHECK_COMMAND] = handle_check_command,
[KVMI_CHECK_EVENT] = handle_check_event,
[KVMI_CONTROL_CMD_RESPONSE] = handle_control_cmd_response,
+ [KVMI_CONTROL_SPP] = handle_control_spp,
[KVMI_CONTROL_VM_EVENTS] = handle_control_vm_events,
[KVMI_GET_GUEST_INFO] = handle_get_guest_info,
[KVMI_GET_PAGE_ACCESS] = handle_get_page_access,
This command enables/disables subpage protection (SPP) for the current VM. Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com> --- Documentation/virtual/kvm/kvmi.rst | 33 ++++++++++++++++++++++++++++++ arch/x86/kvm/kvmi.c | 4 ++++ include/uapi/linux/kvmi.h | 7 +++++++ virt/kvm/kvmi_int.h | 6 ++++++ virt/kvm/kvmi_msg.c | 33 ++++++++++++++++++++++++++++++ 5 files changed, 83 insertions(+)