@@ -367,6 +367,9 @@ the following events::
KVMI_VM_EVENT_UNHOOK
+The vCPU events (e.g. *KVMI_VCPU_EVENT_PAUSE*) are controlled with
+the *KVMI_VCPU_CONTROL_EVENTS* command.
+
:Errors:
* -KVM_EINVAL - the padding is not zero
@@ -509,6 +512,51 @@ command) before returning to guest.
*KVMI_VCPU_EVENT_PAUSE* events
* -KVM_EPERM - the *KVMI_VCPU_EVENT_PAUSE* event is disallowed
+10. KVMI_VCPU_CONTROL_EVENTS
+----------------------------
+
+:Architectures: all
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_vcpu_control_events {
+ __u16 event_id;
+ __u8 enable;
+ __u8 padding1;
+ __u32 padding2;
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_error_code
+
+Enables/disables vCPU introspection events.
+
+When an event is enabled, the introspection tool is notified and
+must reply with: continue, retry, crash, etc. (see **Events** below).
+
+The following vCPU events doesn't have to be enabled and can't be disabled,
+because these are sent as a result of certain commands (but they can be
+disallowed by the device manager) ::
+
+ KVMI_VCPU_EVENT_PAUSE
+
+The VM events (e.g. *KVMI_VM_EVENT_UNHOOK*) are controlled with
+the *KVMI_VM_CONTROL_EVENTS* command.
+
+:Errors:
+
+* -KVM_EINVAL - the padding is not zero
+* -KVM_EINVAL - the selected vCPU is invalid
+* -KVM_EINVAL - the event ID is unknown (use *KVMI_VM_CHECK_EVENT* first)
+* -KVM_EPERM - the access is disallowed (use *KVMI_VM_CHECK_EVENT* first)
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+
Events
======
@@ -31,6 +31,8 @@ struct kvm_vcpu_introspection {
struct kvmi_vcpu_reply reply;
bool waiting_for_reply;
+
+ unsigned long *ev_enable_mask;
};
struct kvm_introspection {
@@ -35,7 +35,8 @@ enum {
enum {
KVMI_VCPU_EVENT = KVMI_VCPU_MESSAGE_ID(0),
- KVMI_VCPU_GET_INFO = KVMI_VCPU_MESSAGE_ID(1),
+ KVMI_VCPU_GET_INFO = KVMI_VCPU_MESSAGE_ID(1),
+ KVMI_VCPU_CONTROL_EVENTS = KVMI_VCPU_MESSAGE_ID(2),
KVMI_NEXT_VCPU_MESSAGE
};
@@ -148,4 +149,11 @@ struct kvmi_vcpu_event_reply {
__u32 padding2;
};
+struct kvmi_vcpu_control_events {
+ __u16 event_id;
+ __u8 enable;
+ __u8 padding1;
+ __u32 padding2;
+};
+
#endif /* _UAPI__LINUX_KVMI_H */
@@ -777,6 +777,51 @@ static void test_pause(struct kvm_vm *vm)
allow_event(vm, KVMI_VCPU_EVENT_PAUSE);
}
+static void cmd_vcpu_control_event(struct kvm_vm *vm, __u16 event_id,
+ __u8 enable, int expected_err)
+{
+ struct {
+ struct kvmi_msg_hdr hdr;
+ struct kvmi_vcpu_hdr vcpu_hdr;
+ struct kvmi_vcpu_control_events cmd;
+ } req = {};
+
+ req.cmd.event_id = event_id;
+ req.cmd.enable = enable;
+
+ test_vcpu0_command(vm, KVMI_VCPU_CONTROL_EVENTS,
+ &req.hdr, sizeof(req), NULL, 0,
+ expected_err);
+}
+
+
+static void enable_vcpu_event(struct kvm_vm *vm, __u16 event_id)
+{
+ cmd_vcpu_control_event(vm, event_id, 1, 0);
+}
+
+static void disable_vcpu_event(struct kvm_vm *vm, __u16 event_id)
+{
+ cmd_vcpu_control_event(vm, event_id, 0, 0);
+}
+
+static void test_cmd_vcpu_control_events(struct kvm_vm *vm)
+{
+ __u16 id = KVMI_VCPU_EVENT_PAUSE, invalid_id = 0xffff;
+ __u8 enable = 1, enable_inval = 2;
+
+ enable_vcpu_event(vm, id);
+ disable_vcpu_event(vm, id);
+
+ cmd_vcpu_control_event(vm, id, enable_inval, -KVM_EINVAL);
+ cmd_vcpu_control_event(vm, invalid_id, enable, -KVM_EINVAL);
+
+ disallow_event(vm, id);
+ cmd_vcpu_control_event(vm, id, enable, -KVM_EPERM);
+ allow_event(vm, id);
+
+}
+
static void test_introspection(struct kvm_vm *vm)
{
srandom(time(0));
@@ -793,6 +838,7 @@ static void test_introspection(struct kvm_vm *vm)
test_memory_access(vm);
test_cmd_vcpu_get_info(vm);
test_pause(vm);
+ test_cmd_vcpu_control_events(vm);
unhook_introspection(vm);
}
@@ -83,6 +83,11 @@ bool kvmi_is_known_vm_event(u16 id)
return id < KVMI_NUM_EVENTS && test_bit(id, Kvmi_known_vm_events);
}
+bool kvmi_is_known_vcpu_event(u16 id)
+{
+ return id < KVMI_NUM_EVENTS && test_bit(id, Kvmi_known_vcpu_events);
+}
+
static bool kvmi_is_vm_event_enabled(struct kvm_introspection *kvmi, u16 id)
{
return id < KVMI_NUM_EVENTS && test_bit(id, kvmi->vm_event_enable_mask);
@@ -190,6 +195,12 @@ static bool kvmi_alloc_vcpui(struct kvm_vcpu *vcpu)
if (!vcpui)
return false;
+ vcpui->ev_enable_mask = bitmap_zalloc(KVMI_NUM_EVENTS, GFP_KERNEL);
+ if (!vcpui->ev_enable_mask) {
+ kfree(vcpu);
+ return false;
+ }
+
INIT_LIST_HEAD(&vcpui->job_list);
spin_lock_init(&vcpui->job_lock);
@@ -225,6 +236,8 @@ static void kvmi_free_vcpui(struct kvm_vcpu *vcpu)
kvmi_free_vcpu_jobs(vcpui);
+ bitmap_free(vcpui->ev_enable_mask);
+
kfree(vcpui);
vcpu->kvmi = NULL;
}
@@ -622,6 +635,19 @@ int kvmi_cmd_vm_control_events(struct kvm_introspection *kvmi,
return 0;
}
+int kvmi_cmd_vcpu_control_events(struct kvm_vcpu *vcpu,
+ u16 event_id, bool enable)
+{
+ struct kvm_vcpu_introspection *vcpui = VCPUI(vcpu);
+
+ if (enable)
+ set_bit(event_id, vcpui->ev_enable_mask);
+ else
+ clear_bit(event_id, vcpui->ev_enable_mask);
+
+ return 0;
+}
+
static long
get_user_pages_remote_unlocked(struct mm_struct *mm, unsigned long start,
unsigned long nr_pages, unsigned int gup_flags,
@@ -47,12 +47,15 @@ bool kvmi_is_command_allowed(struct kvm_introspection *kvmi, u16 id);
bool kvmi_is_event_allowed(struct kvm_introspection *kvmi, u16 id);
bool kvmi_is_known_event(u16 id);
bool kvmi_is_known_vm_event(u16 id);
+bool kvmi_is_known_vcpu_event(u16 id);
int kvmi_add_job(struct kvm_vcpu *vcpu,
void (*fct)(struct kvm_vcpu *vcpu, void *ctx),
void *ctx, void (*free_fct)(void *ctx));
void kvmi_run_jobs(struct kvm_vcpu *vcpu);
int kvmi_cmd_vm_control_events(struct kvm_introspection *kvmi,
u16 event_id, bool enable);
+int kvmi_cmd_vcpu_control_events(struct kvm_vcpu *vcpu,
+ u16 event_id, bool enable);
int kvmi_cmd_read_physical(struct kvm *kvm, u64 gpa, size_t size,
int (*send)(struct kvm_introspection *,
const struct kvmi_msg_hdr*,
@@ -367,6 +367,27 @@ static int handle_vcpu_event_reply(const struct kvmi_vcpu_msg_job *job,
return expected->error;
}
+static int handle_vcpu_control_events(const struct kvmi_vcpu_msg_job *job,
+ const struct kvmi_msg_hdr *msg,
+ const void *_req)
+{
+ struct kvm_introspection *kvmi = KVMI(job->vcpu->kvm);
+ const struct kvmi_vcpu_control_events *req = _req;
+ int ec;
+
+ if (req->padding1 || req->padding2 || req->enable > 1)
+ ec = -KVM_EINVAL;
+ else if (!kvmi_is_known_vcpu_event(req->event_id))
+ ec = -KVM_EINVAL;
+ else if (!kvmi_is_event_allowed(kvmi, req->event_id))
+ ec = -KVM_EPERM;
+ else
+ ec = kvmi_cmd_vcpu_control_events(job->vcpu, req->event_id,
+ req->enable == 1);
+
+ return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
+}
+
/*
* These functions are executed from the vCPU thread. The receiving thread
* passes the messages using a newly allocated 'struct kvmi_vcpu_msg_job'
@@ -374,7 +395,8 @@ static int handle_vcpu_event_reply(const struct kvmi_vcpu_msg_job *job,
* sending back the reply).
*/
static const kvmi_vcpu_msg_job_fct msg_vcpu[] = {
- [KVMI_VCPU_EVENT] = handle_vcpu_event_reply,
+ [KVMI_VCPU_EVENT] = handle_vcpu_event_reply,
+ [KVMI_VCPU_CONTROL_EVENTS] = handle_vcpu_control_events,
};
static kvmi_vcpu_msg_job_fct get_vcpu_msg_handler(u16 id)