@@ -340,11 +340,45 @@ This command is always allowed.
Returns the number of online vCPUs.
+5. KVMI_VM_CONTROL_EVENTS
+-------------------------
+
+:Architectures: all
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_vm_control_events {
+ __u16 event_id;
+ __u8 enable;
+ __u8 padding1;
+ __u32 padding2;
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_error_code
+
+Enables/disables VM introspection events. This command can be used with
+the following events::
+
+ KVMI_EVENT_UNHOOK
+
+:Errors:
+
+* -KVM_EINVAL - the padding is not zero
+* -KVM_EINVAL - the event ID is unknown (use *KVMI_VM_CHECK_EVENT* first)
+* -KVM_EPERM - the access is disallowed (use *KVMI_VM_CHECK_EVENT* first)
+
Events
======
All introspection events (VM or vCPU related) are sent
-using the *KVMI_EVENT* message id.
+using the *KVMI_EVENT* message id. No event will be sent unless
+it is explicitly enabled.
The *KVMI_EVENT_UNHOOK* event doesn't have a reply and share the kvmi_event
structure, for consistency with the vCPU events.
@@ -398,6 +432,8 @@ Specific data can follow these common structures.
:Returns: none
-This event is sent when the device manager has to pause/stop/migrate the
-guest (see **Unhooking**). The introspection tool has a chance to unhook
-and close the KVMI channel (signaling that the operation can proceed).
+This event is sent when the device manager has to pause/stop/migrate
+the guest (see **Unhooking**) and the introspection has been enabled
+for this event (see **KVMI_VM_CONTROL_EVENTS**). The introspection tool
+has a chance to unhook and close the KVMI channel (signaling that the
+operation can proceed).
@@ -22,6 +22,8 @@ struct kvm_introspection {
DECLARE_BITMAP(cmd_allow_mask, KVMI_NUM_COMMANDS);
DECLARE_BITMAP(event_allow_mask, KVMI_NUM_EVENTS);
+ DECLARE_BITMAP(vm_event_enable_mask, KVMI_NUM_EVENTS);
+
atomic_t ev_seq;
};
@@ -15,12 +15,13 @@ enum {
};
enum {
- KVMI_EVENT = 1,
+ KVMI_EVENT = 1,
- KVMI_GET_VERSION = 2,
- KVMI_VM_CHECK_COMMAND = 3,
- KVMI_VM_CHECK_EVENT = 4,
- KVMI_VM_GET_INFO = 5,
+ KVMI_GET_VERSION = 2,
+ KVMI_VM_CHECK_COMMAND = 3,
+ KVMI_VM_CHECK_EVENT = 4,
+ KVMI_VM_GET_INFO = 5,
+ KVMI_VM_CONTROL_EVENTS = 6,
KVMI_NUM_MESSAGES
};
@@ -75,6 +76,13 @@ struct kvmi_vm_get_info_reply {
__u32 padding[3];
};
+struct kvmi_vm_control_events {
+ __u16 event_id;
+ __u8 enable;
+ __u8 padding1;
+ __u32 padding2;
+};
+
struct kvmi_event {
__u16 size;
__u16 vcpu;
@@ -245,9 +245,15 @@ static int cmd_check_event(__u16 id)
static void test_cmd_check_event(void)
{
+ __u16 valid_id = KVMI_EVENT_UNHOOK;
__u16 invalid_id = 0xffff;
int r;
+ r = cmd_check_event(valid_id);
+ TEST_ASSERT(r == 0,
+ "KVMI_VM_CHECK_EVENT failed, error %d (%s)\n",
+ -r, kvm_strerror(-r));
+
r = cmd_check_event(invalid_id);
TEST_ASSERT(r == -KVM_ENOENT,
"KVMI_VM_CHECK_EVENT didn't failed with -KVM_ENOENT, error %d (%s)\n",
@@ -298,15 +304,62 @@ static void receive_event(struct kvmi_msg_hdr *hdr, struct kvmi_event *ev,
ev->event, event_id);
}
+static int cmd_vm_control_events(__u16 event_id, bool enable)
+{
+ struct {
+ struct kvmi_msg_hdr hdr;
+ struct kvmi_vm_control_events cmd;
+ } req = {};
+
+ req.cmd.event_id = event_id;
+ req.cmd.enable = enable ? 1 : 0;
+
+ return do_command(KVMI_VM_CONTROL_EVENTS, &req.hdr, sizeof(req),
+ NULL, 0);
+}
+
+static void enable_vm_event(__u16 event_id)
+{
+ int r;
+
+ r = cmd_vm_control_events(event_id, true);
+ TEST_ASSERT(r == 0,
+ "KVMI_VM_CONTROL_EVENTS failed to enable VM event %d, error %d (%s)\n",
+ event_id, -r, kvm_strerror(-r));
+}
+
+static void disable_vm_event(__u16 event_id)
+{
+ int r;
+
+ r = cmd_vm_control_events(event_id, false);
+ TEST_ASSERT(r == 0,
+ "KVMI_VM_CONTROL_EVENTS failed to disable VM event %d, error %d (%s)\n",
+ event_id, -r, kvm_strerror(-r));
+}
+
static void test_event_unhook(struct kvm_vm *vm)
{
__u16 id = KVMI_EVENT_UNHOOK;
struct kvmi_msg_hdr hdr;
struct kvmi_event ev;
+ enable_vm_event(id);
+
trigger_event_unhook_notification(vm);
receive_event(&hdr, &ev, sizeof(ev), id);
+
+ disable_vm_event(id);
+}
+
+static void test_cmd_vm_control_events(void)
+{
+ __u16 id = KVMI_EVENT_UNHOOK;
+
+ enable_vm_event(id);
+
+ disable_vm_event(id);
}
static void test_introspection(struct kvm_vm *vm)
@@ -320,6 +373,7 @@ static void test_introspection(struct kvm_vm *vm)
test_cmd_check_event();
test_cmd_get_vm_info();
test_event_unhook(vm);
+ test_cmd_vm_control_events();
unhook_introspection(vm);
}
@@ -12,7 +12,7 @@
static DECLARE_BITMAP(Kvmi_always_allowed_commands, KVMI_NUM_COMMANDS);
DECLARE_BITMAP(Kvmi_known_events, KVMI_NUM_EVENTS);
-static DECLARE_BITMAP(Kvmi_known_vm_events, KVMI_NUM_EVENTS);
+DECLARE_BITMAP(Kvmi_known_vm_events, KVMI_NUM_EVENTS);
static DECLARE_BITMAP(Kvmi_known_vcpu_events, KVMI_NUM_EVENTS);
static struct kmem_cache *msg_cache;
@@ -360,10 +360,18 @@ int kvmi_ioctl_command(struct kvm *kvm, void __user *argp)
return err;
}
+static bool is_vm_event_enabled(struct kvm_introspection *kvmi, int event)
+{
+ return test_bit(event, kvmi->vm_event_enable_mask);
+}
+
static bool kvmi_unhook_event(struct kvm_introspection *kvmi)
{
int err;
+ if (!is_vm_event_enabled(kvmi, KVMI_EVENT_UNHOOK))
+ return false;
+
err = kvmi_msg_send_unhook(kvmi);
return !err;
@@ -389,3 +397,14 @@ int kvmi_ioctl_preunhook(struct kvm *kvm)
mutex_unlock(&kvm->kvmi_lock);
return err;
}
+
+int kvmi_cmd_vm_control_events(struct kvm_introspection *kvmi,
+ unsigned int event_id, bool enable)
+{
+ if (enable)
+ set_bit(event_id, kvmi->vm_event_enable_mask);
+ else
+ clear_bit(event_id, kvmi->vm_event_enable_mask);
+
+ return 0;
+}
@@ -17,6 +17,7 @@
kvm_info("%pU ERROR: " fmt, &kvmi->uuid, ## __VA_ARGS__)
extern DECLARE_BITMAP(Kvmi_known_events, KVMI_NUM_EVENTS);
+extern DECLARE_BITMAP(Kvmi_known_vm_events, KVMI_NUM_EVENTS);
#define KVMI(kvm) ((kvm)->kvmi)
@@ -30,5 +31,7 @@ int kvmi_msg_send_unhook(struct kvm_introspection *kvmi);
/* kvmi.c */
void *kvmi_msg_alloc(void);
void kvmi_msg_free(void *addr);
+int kvmi_cmd_vm_control_events(struct kvm_introspection *kvmi,
+ unsigned int event_id, bool enable);
#endif
@@ -171,15 +171,38 @@ static int handle_get_info(struct kvm_introspection *kvmi,
return kvmi_msg_vm_reply(kvmi, msg, 0, &rpl, sizeof(rpl));
}
+static int handle_vm_control_events(struct kvm_introspection *kvmi,
+ const struct kvmi_msg_hdr *msg,
+ const void *_req)
+{
+ const struct kvmi_vm_control_events *req = _req;
+ int ec;
+
+ if (req->padding1 || req->padding2 || req->enable > 1)
+ ec = -KVM_EINVAL;
+ else if (req->event_id >= KVMI_NUM_EVENTS)
+ ec = -KVM_EINVAL;
+ else if (!test_bit(req->event_id, Kvmi_known_vm_events))
+ ec = -KVM_EINVAL;
+ else if (!is_event_allowed(kvmi, req->event_id))
+ ec = -KVM_EPERM;
+ else
+ ec = kvmi_cmd_vm_control_events(kvmi, req->event_id,
+ req->enable == 1);
+
+ return kvmi_msg_vm_reply(kvmi, msg, ec, NULL, 0);
+}
+
/*
* These commands are executed by the receiving thread/worker.
*/
static int(*const msg_vm[])(struct kvm_introspection *,
const struct kvmi_msg_hdr *, const void *) = {
- [KVMI_GET_VERSION] = handle_get_version,
- [KVMI_VM_CHECK_COMMAND] = handle_check_command,
- [KVMI_VM_CHECK_EVENT] = handle_check_event,
- [KVMI_VM_GET_INFO] = handle_get_info,
+ [KVMI_GET_VERSION] = handle_get_version,
+ [KVMI_VM_CHECK_COMMAND] = handle_check_command,
+ [KVMI_VM_CHECK_EVENT] = handle_check_event,
+ [KVMI_VM_CONTROL_EVENTS] = handle_vm_control_events,
+ [KVMI_VM_GET_INFO] = handle_get_info,
};
static bool is_vm_command(u16 id)
With this command the introspection tool enables/disables VM events (ie. KVMI_EVENT_UNHOOK). By default, all events are disabled. Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com> --- Documentation/virt/kvm/kvmi.rst | 44 +++++++++++++-- include/linux/kvmi_host.h | 2 + include/uapi/linux/kvmi.h | 18 +++++-- .../testing/selftests/kvm/x86_64/kvmi_test.c | 54 +++++++++++++++++++ virt/kvm/introspection/kvmi.c | 21 +++++++- virt/kvm/introspection/kvmi_int.h | 3 ++ virt/kvm/introspection/kvmi_msg.c | 31 +++++++++-- 7 files changed, 159 insertions(+), 14 deletions(-)