@@ -2,6 +2,9 @@
#ifndef _ASM_X86_KVMI_HOST_H
#define _ASM_X86_KVMI_HOST_H
+struct kvm_vcpu_arch_introspection {
+};
+
struct kvm_arch_introspection {
};
@@ -321,6 +321,7 @@ struct kvm_vcpu {
bool ready;
struct kvm_vcpu_arch arch;
struct kvm_dirty_ring dirty_ring;
+ struct kvm_vcpu_introspection *kvmi;
};
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
@@ -6,6 +6,10 @@
#include <asm/kvmi_host.h>
+struct kvm_vcpu_introspection {
+ struct kvm_vcpu_arch_introspection arch;
+};
+
struct kvm_introspection {
struct kvm_arch_introspection arch;
struct kvm *kvm;
@@ -28,6 +32,7 @@ int kvmi_init(void);
void kvmi_uninit(void);
void kvmi_create_vm(struct kvm *kvm);
void kvmi_destroy_vm(struct kvm *kvm);
+void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu);
int kvmi_ioctl_hook(struct kvm *kvm,
const struct kvm_introspection_hook *hook);
@@ -45,6 +50,7 @@ static inline int kvmi_init(void) { return 0; }
static inline void kvmi_uninit(void) { }
static inline void kvmi_create_vm(struct kvm *kvm) { }
static inline void kvmi_destroy_vm(struct kvm *kvm) { }
+static inline void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu) { }
#endif /* CONFIG_KVM_INTROSPECTION */
@@ -118,8 +118,41 @@ void kvmi_uninit(void)
kvmi_cache_destroy();
}
+static bool kvmi_alloc_vcpui(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_introspection *vcpui;
+
+ vcpui = kzalloc(sizeof(*vcpui), GFP_KERNEL);
+ if (!vcpui)
+ return false;
+
+ vcpu->kvmi = vcpui;
+
+ return true;
+}
+
+static int kvmi_create_vcpui(struct kvm_vcpu *vcpu)
+{
+ if (!kvmi_alloc_vcpui(vcpu))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void kvmi_free_vcpui(struct kvm_vcpu *vcpu)
+{
+ kfree(vcpu->kvmi);
+ vcpu->kvmi = NULL;
+}
+
static void kvmi_free(struct kvm *kvm)
{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvmi_free_vcpui(vcpu);
+
bitmap_free(kvm->kvmi->cmd_allow_mask);
bitmap_free(kvm->kvmi->event_allow_mask);
bitmap_free(kvm->kvmi->vm_event_enable_mask);
@@ -128,10 +161,19 @@ static void kvmi_free(struct kvm *kvm)
kvm->kvmi = NULL;
}
+void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ mutex_lock(&vcpu->kvm->kvmi_lock);
+ kvmi_free_vcpui(vcpu);
+ mutex_unlock(&vcpu->kvm->kvmi_lock);
+}
+
static struct kvm_introspection *
kvmi_alloc(struct kvm *kvm, const struct kvm_introspection_hook *hook)
{
struct kvm_introspection *kvmi;
+ struct kvm_vcpu *vcpu;
+ int i;
kvmi = kzalloc(sizeof(*kvmi), GFP_KERNEL);
if (!kvmi)
@@ -157,6 +199,15 @@ kvmi_alloc(struct kvm *kvm, const struct kvm_introspection_hook *hook)
atomic_set(&kvmi->ev_seq, 0);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ int err = kvmi_create_vcpui(vcpu);
+
+ if (err) {
+ kvmi_free(kvm);
+ return NULL;
+ }
+ }
+
kvmi->kvm = kvm;
return kvmi;
@@ -421,6 +421,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
+ kvmi_vcpu_uninit(vcpu);
kvm_dirty_ring_free(&vcpu->dirty_ring);
kvm_arch_vcpu_destroy(vcpu);
@@ -3253,6 +3254,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
+ kvmi_vcpu_uninit(vcpu);
kvm_dirty_ring_free(&vcpu->dirty_ring);
arch_vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);