@@ -275,6 +275,7 @@ struct kvm_vcpu {
bool preempted;
struct kvm_vcpu_arch arch;
struct dentry *debugfs_dentry;
+ void *kvmi;
};
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
@@ -14,6 +14,8 @@ int kvmi_ioctl_hook(struct kvm *kvm, void __user *argp);
int kvmi_ioctl_command(struct kvm *kvm, void __user *argp);
int kvmi_ioctl_event(struct kvm *kvm, void __user *argp);
int kvmi_ioctl_unhook(struct kvm *kvm, bool force_reset);
+int kvmi_vcpu_init(struct kvm_vcpu *vcpu);
+void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu);
#else
@@ -21,6 +23,8 @@ static inline int kvmi_init(void) { return 0; }
static inline void kvmi_uninit(void) { }
static inline void kvmi_create_vm(struct kvm *kvm) { }
static inline void kvmi_destroy_vm(struct kvm *kvm) { }
+static inline int kvmi_vcpu_init(struct kvm_vcpu *vcpu) { return 0; }
+static inline void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu) { }
#endif /* CONFIG_KVM_INTROSPECTION */
@@ -316,6 +316,13 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
r = kvm_arch_vcpu_init(vcpu);
if (r < 0)
goto fail_free_run;
+
+ r = kvmi_vcpu_init(vcpu);
+ if (r < 0) {
+ kvm_arch_vcpu_uninit(vcpu);
+ goto fail_free_run;
+ }
+
return 0;
fail_free_run:
@@ -333,6 +340,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
* descriptors are already gone.
*/
put_pid(rcu_dereference_protected(vcpu->pid, 1));
+ kvmi_vcpu_uninit(vcpu);
kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run);
}
@@ -80,6 +80,19 @@ static bool alloc_kvmi(struct kvm *kvm, const struct kvm_introspection *qemu)
return true;
}
+static bool alloc_ivcpu(struct kvm_vcpu *vcpu)
+{
+ struct kvmi_vcpu *ivcpu;
+
+ ivcpu = kzalloc(sizeof(*ivcpu), GFP_KERNEL);
+ if (!ivcpu)
+ return false;
+
+ vcpu->kvmi = ivcpu;
+
+ return true;
+}
+
struct kvmi * __must_check kvmi_get(struct kvm *kvm)
{
if (refcount_inc_not_zero(&kvm->kvmi_ref))
@@ -90,8 +103,16 @@ struct kvmi * __must_check kvmi_get(struct kvm *kvm)
static void kvmi_destroy(struct kvm *kvm)
{
+ struct kvm_vcpu *vcpu;
+ int i;
+
kfree(kvm->kvmi);
kvm->kvmi = NULL;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kfree(vcpu->kvmi);
+ vcpu->kvmi = NULL;
+ }
}
static void kvmi_release(struct kvm *kvm)
@@ -109,6 +130,48 @@ void kvmi_put(struct kvm *kvm)
kvmi_release(kvm);
}
+/*
+ * VCPU hotplug - this function will likely be called before VCPU will start
+ * executing code
+ */
+int kvmi_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct kvmi *ikvm;
+ int ret = 0;
+
+ ikvm = kvmi_get(vcpu->kvm);
+ if (!ikvm)
+ return 0;
+
+ if (!alloc_ivcpu(vcpu)) {
+ kvmi_err(ikvm, "Unable to alloc ivcpu for vcpu_id %u\n",
+ vcpu->vcpu_id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+out:
+ kvmi_put(vcpu->kvm);
+
+ return ret;
+}
+
+/*
+ * VCPU hotplug - this function will likely be called after VCPU will stop
+ * executing code
+ */
+void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Under certain circumstances (errors in creating the VCPU, hotplug?)
+ * this function may be reached with the KVMI member still allocated.
+ * This VCPU won't be reachable by the introspection engine, so no
+ * protection is necessary when de-allocating.
+ */
+ kfree(vcpu->kvmi);
+ vcpu->kvmi = NULL;
+}
+
static void kvmi_end_introspection(struct kvmi *ikvm)
{
struct kvm *kvm = ikvm->kvm;
@@ -142,8 +205,9 @@ static int kvmi_recv(void *arg)
int kvmi_hook(struct kvm *kvm, const struct kvm_introspection *qemu)
{
+ struct kvm_vcpu *vcpu;
struct kvmi *ikvm;
- int err = 0;
+ int i, err = 0;
/* wait for the previous introspection to finish */
err = wait_for_completion_killable(&kvm->kvmi_completed);
@@ -159,6 +223,13 @@ int kvmi_hook(struct kvm *kvm, const struct kvm_introspection *qemu)
}
ikvm = IKVM(kvm);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!alloc_ivcpu(vcpu)) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ }
+
/* interact with other kernel components after structure allocation */
if (!kvmi_sock_get(ikvm, qemu->fd)) {
err = -EINVAL;
@@ -23,6 +23,8 @@
#define kvmi_err(ikvm, fmt, ...) \
kvm_info("%pU ERROR: " fmt, &ikvm->uuid, ## __VA_ARGS__)
+#define IVCPU(vcpu) ((struct kvmi_vcpu *)((vcpu)->kvmi))
+
#define KVMI_MSG_SIZE_ALLOC (sizeof(struct kvmi_msg_hdr) + KVMI_MSG_SIZE)
#define KVMI_KNOWN_VCPU_EVENTS ( \
@@ -73,6 +75,9 @@
#define KVMI_NUM_COMMANDS KVMI_NEXT_AVAILABLE_COMMAND
+struct kvmi_vcpu {
+};
+
#define IKVM(kvm) ((struct kvmi *)((kvm)->kvmi))
struct kvmi {