@@ -377,6 +377,9 @@ the following events::
KVMI_EVENT_UNHOOK
+The vCPU events (e.g. *KVMI_EVENT_PAUSE_VCPU*) are controlled with
+the *KVMI_VCPU_CONTROL_EVENTS* command.
+
:Errors:
* -KVM_EINVAL - the padding is not zero
@@ -520,12 +523,58 @@ command) before returning to guest.
*KVMI_EVENT_PAUSE_VCPU* events
* -KVM_EPERM - the *KVMI_EVENT_PAUSE_VCPU* event is disallowed
+10. KVMI_VCPU_CONTROL_EVENTS
+----------------------------
+
+:Architectures: all
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_vcpu_control_events {
+ __u16 event_id;
+ __u8 enable;
+ __u8 padding1;
+ __u32 padding2;
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_error_code
+
+Enables/disables vCPU introspection events.
+
+When an event is enabled, the introspection tool is notified and
+must reply with: continue, retry, crash, etc. (see **Events** below).
+
+The following vCPU events doesn't have to be enabled and can't be disabled,
+because these are sent as a result of certain commands (but they can be
+disallowed by the device manager) ::
+
+ KVMI_EVENT_PAUSE_VCPU
+
+The VM events (e.g. *KVMI_EVENT_UNHOOK*) are controlled with
+the *KVMI_VM_CONTROL_EVENTS* command.
+
+:Errors:
+
+* -KVM_EINVAL - the padding is not zero
+* -KVM_EINVAL - the selected vCPU is invalid
+* -KVM_EINVAL - the event ID is unknown (use *KVMI_VM_CHECK_EVENT* first)
+* -KVM_EPERM - the access is disallowed (use *KVMI_VM_CHECK_EVENT* first)
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+
Events
======
All introspection events (VM or vCPU related) are sent
using the *KVMI_EVENT* message id. No event will be sent unless
-it is explicitly enabled or requested (eg. *KVMI_EVENT_PAUSE_VCPU*).
+it is explicitly enabled (see *KVMI_VM_CONTROL_EVENTS*
+and *KVMI_VCPU_CONTROL_EVENTS*) or requested (eg. *KVMI_EVENT_PAUSE_VCPU*).
The *KVMI_EVENT_UNHOOK* event doesn't have a reply and share the kvmi_event
structure, for consistency with the vCPU events.
@@ -31,6 +31,8 @@ struct kvm_vcpu_introspection {
struct kvmi_vcpu_reply reply;
bool waiting_for_reply;
+
+ unsigned long *ev_enable_mask;
};
struct kvm_introspection {
@@ -25,8 +25,9 @@ enum {
KVMI_VM_READ_PHYSICAL = 6,
KVMI_VM_WRITE_PHYSICAL = 7,
- KVMI_VCPU_GET_INFO = 8,
- KVMI_VCPU_PAUSE = 9,
+ KVMI_VCPU_GET_INFO = 8,
+ KVMI_VCPU_PAUSE = 9,
+ KVMI_VCPU_CONTROL_EVENTS = 10,
KVMI_NUM_MESSAGES
};
@@ -122,6 +123,13 @@ struct kvmi_vcpu_pause {
__u32 padding3;
};
+struct kvmi_vcpu_control_events {
+ __u16 event_id;
+ __u8 enable;
+ __u8 padding1;
+ __u32 padding2;
+};
+
struct kvmi_event {
__u16 size;
__u16 vcpu;
@@ -834,6 +834,59 @@ static void test_pause(struct kvm_vm *vm)
allow_event(vm, KVMI_EVENT_PAUSE_VCPU);
}
+static void cmd_vcpu_control_event(struct kvm_vm *vm, __u16 event_id,
+ __u8 enable, __u16 padding,
+ int expected_err)
+{
+ struct {
+ struct kvmi_msg_hdr hdr;
+ struct kvmi_vcpu_hdr vcpu_hdr;
+ struct kvmi_vcpu_control_events cmd;
+ } req = {};
+ int r;
+
+ req.cmd.event_id = event_id;
+ req.cmd.enable = enable;
+ req.cmd.padding1 = padding;
+ req.cmd.padding2 = padding;
+
+ r = do_vcpu0_command(vm, KVMI_VCPU_CONTROL_EVENTS,
+ &req.hdr, sizeof(req), NULL, 0);
+ TEST_ASSERT(r == expected_err,
+ "KVMI_VCPU_CONTROL_EVENTS failed to enable vCPU event %d, error %d (%s), expected error %d\n",
+ event_id, -r, kvm_strerror(-r), expected_err);
+}
+
+
+static void enable_vcpu_event(struct kvm_vm *vm, __u16 event_id)
+{
+ cmd_vcpu_control_event(vm, event_id, 1, 0, 0);
+}
+
+static void disable_vcpu_event(struct kvm_vm *vm, __u16 event_id)
+{
+ cmd_vcpu_control_event(vm, event_id, 0, 0, 0);
+}
+
+static void test_cmd_vcpu_control_events(struct kvm_vm *vm)
+{
+ __u16 id = KVMI_EVENT_PAUSE_VCPU, invalid_id = 0xffff;
+ __u16 padding = 1, no_padding = 0;
+ __u8 enable = 1, enable_inval = 2;
+
+ enable_vcpu_event(vm, id);
+ disable_vcpu_event(vm, id);
+
+ cmd_vcpu_control_event(vm, id, enable, padding, -KVM_EINVAL);
+ cmd_vcpu_control_event(vm, id, enable_inval, no_padding, -KVM_EINVAL);
+ cmd_vcpu_control_event(vm, invalid_id, enable, no_padding, -KVM_EINVAL);
+
+ disallow_event(vm, id);
+ cmd_vcpu_control_event(vm, id, enable, no_padding, -KVM_EPERM);
+ allow_event(vm, id);
+
+}
+
static void test_introspection(struct kvm_vm *vm)
{
srandom(time(0));
@@ -850,6 +903,7 @@ static void test_introspection(struct kvm_vm *vm)
test_memory_access(vm);
test_cmd_vcpu_get_info(vm);
test_pause(vm);
+ test_cmd_vcpu_control_events(vm);
unhook_introspection(vm);
}
@@ -73,6 +73,11 @@ bool kvmi_is_known_vm_event(u8 id)
return id < KVMI_NUM_EVENTS && test_bit(id, Kvmi_known_vm_events);
}
+bool kvmi_is_known_vcpu_event(u8 id)
+{
+ return id < KVMI_NUM_EVENTS && test_bit(id, Kvmi_known_vcpu_events);
+}
+
static bool is_vm_event_enabled(struct kvm_introspection *kvmi, int event)
{
return test_bit(event, kvmi->vm_event_enable_mask);
@@ -179,6 +184,12 @@ static bool alloc_vcpui(struct kvm_vcpu *vcpu)
if (!vcpui)
return false;
+ vcpui->ev_enable_mask = bitmap_zalloc(KVMI_NUM_EVENTS, GFP_KERNEL);
+ if (!vcpui->ev_enable_mask) {
+ kfree(vcpu);
+ return false;
+ }
+
INIT_LIST_HEAD(&vcpui->job_list);
spin_lock_init(&vcpui->job_lock);
@@ -214,6 +225,8 @@ static void free_vcpui(struct kvm_vcpu *vcpu)
free_vcpu_jobs(vcpui);
+ bitmap_free(vcpui->ev_enable_mask);
+
kfree(vcpui);
vcpu->kvmi = NULL;
}
@@ -613,6 +626,19 @@ int kvmi_cmd_vm_control_events(struct kvm_introspection *kvmi,
return 0;
}
+int kvmi_cmd_vcpu_control_events(struct kvm_vcpu *vcpu,
+ unsigned int event_id, bool enable)
+{
+ struct kvm_vcpu_introspection *vcpui = VCPUI(vcpu);
+
+ if (enable)
+ set_bit(event_id, vcpui->ev_enable_mask);
+ else
+ clear_bit(event_id, vcpui->ev_enable_mask);
+
+ return 0;
+}
+
static unsigned long gfn_to_hva_safe(struct kvm *kvm, gfn_t gfn)
{
unsigned long hva;
@@ -35,12 +35,15 @@ void kvmi_msg_free(void *addr);
bool kvmi_is_command_allowed(struct kvm_introspection *kvmi, u16 id);
bool kvmi_is_known_event(u8 id);
bool kvmi_is_known_vm_event(u8 id);
+bool kvmi_is_known_vcpu_event(u8 id);
int kvmi_add_job(struct kvm_vcpu *vcpu,
void (*fct)(struct kvm_vcpu *vcpu, void *ctx),
void *ctx, void (*free_fct)(void *ctx));
void kvmi_run_jobs(struct kvm_vcpu *vcpu);
int kvmi_cmd_vm_control_events(struct kvm_introspection *kvmi,
unsigned int event_id, bool enable);
+int kvmi_cmd_vcpu_control_events(struct kvm_vcpu *vcpu,
+ unsigned int event_id, bool enable);
int kvmi_cmd_read_physical(struct kvm *kvm, u64 gpa, size_t size,
int (*send)(struct kvm_introspection *,
const struct kvmi_msg_hdr*,
@@ -397,6 +397,27 @@ static int handle_vcpu_event_reply(const struct kvmi_vcpu_msg_job *job,
return expected->error;
}
+static int handle_vcpu_control_events(const struct kvmi_vcpu_msg_job *job,
+ const struct kvmi_msg_hdr *msg,
+ const void *_req)
+{
+ struct kvm_introspection *kvmi = KVMI(job->vcpu->kvm);
+ const struct kvmi_vcpu_control_events *req = _req;
+ int ec;
+
+ if (req->padding1 || req->padding2 || req->enable > 1)
+ ec = -KVM_EINVAL;
+ else if (!kvmi_is_known_vcpu_event(req->event_id))
+ ec = -KVM_EINVAL;
+ else if (!is_event_allowed(kvmi, req->event_id))
+ ec = -KVM_EPERM;
+ else
+ ec = kvmi_cmd_vcpu_control_events(job->vcpu, req->event_id,
+ req->enable == 1);
+
+ return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
+}
+
/*
* These functions are executed from the vCPU thread. The receiving thread
* passes the messages using a newly allocated 'struct kvmi_vcpu_msg_job'
@@ -405,8 +426,9 @@ static int handle_vcpu_event_reply(const struct kvmi_vcpu_msg_job *job,
*/
static int(*const msg_vcpu[])(const struct kvmi_vcpu_msg_job *,
const struct kvmi_msg_hdr *, const void *) = {
- [KVMI_EVENT] = handle_vcpu_event_reply,
- [KVMI_VCPU_GET_INFO] = handle_vcpu_get_info,
+ [KVMI_EVENT] = handle_vcpu_event_reply,
+ [KVMI_VCPU_CONTROL_EVENTS] = handle_vcpu_control_events,
+ [KVMI_VCPU_GET_INFO] = handle_vcpu_get_info,
};
static bool is_vcpu_command(u16 id)