@@ -4,6 +4,9 @@
#include <asm/kvmi.h>
+struct kvm_vcpu_arch_introspection {
+};
+
struct kvm_arch_introspection {
};
@@ -319,6 +319,7 @@ struct kvm_vcpu {
bool preempted;
bool ready;
struct kvm_vcpu_arch arch;
+ struct kvm_vcpu_introspection *kvmi;
};
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
@@ -6,6 +6,10 @@
#include <asm/kvmi_host.h>
+struct kvm_vcpu_introspection {
+ struct kvm_vcpu_arch_introspection arch;
+};
+
struct kvm_introspection {
struct kvm_arch_introspection arch;
struct kvm *kvm;
@@ -28,6 +32,7 @@ int kvmi_init(void);
void kvmi_uninit(void);
void kvmi_create_vm(struct kvm *kvm);
void kvmi_destroy_vm(struct kvm *kvm);
+void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu);
int kvmi_ioctl_hook(struct kvm *kvm,
const struct kvm_introspection_hook *hook);
@@ -44,6 +49,7 @@ static inline int kvmi_init(void) { return 0; }
static inline void kvmi_uninit(void) { }
static inline void kvmi_create_vm(struct kvm *kvm) { }
static inline void kvmi_destroy_vm(struct kvm *kvm) { }
+static inline void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu) { }
#endif /* CONFIG_KVM_INTROSPECTION */
@@ -107,8 +107,41 @@ void kvmi_uninit(void)
kvmi_cache_destroy();
}
+static bool alloc_vcpui(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_introspection *vcpui;
+
+ vcpui = kzalloc(sizeof(*vcpui), GFP_KERNEL);
+ if (!vcpui)
+ return false;
+
+ vcpu->kvmi = vcpui;
+
+ return true;
+}
+
+static int create_vcpui(struct kvm_vcpu *vcpu)
+{
+ if (!alloc_vcpui(vcpu))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void free_vcpui(struct kvm_vcpu *vcpu)
+{
+ kfree(vcpu->kvmi);
+ vcpu->kvmi = NULL;
+}
+
static void free_kvmi(struct kvm *kvm)
{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ free_vcpui(vcpu);
+
bitmap_free(kvm->kvmi->cmd_allow_mask);
bitmap_free(kvm->kvmi->event_allow_mask);
bitmap_free(kvm->kvmi->vm_event_enable_mask);
@@ -117,10 +150,19 @@ static void free_kvmi(struct kvm *kvm)
kvm->kvmi = NULL;
}
+void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+ mutex_lock(&vcpu->kvm->kvmi_lock);
+ free_vcpui(vcpu);
+ mutex_unlock(&vcpu->kvm->kvmi_lock);
+}
+
static struct kvm_introspection *
alloc_kvmi(struct kvm *kvm, const struct kvm_introspection_hook *hook)
{
struct kvm_introspection *kvmi;
+ struct kvm_vcpu *vcpu;
+ int i;
kvmi = kzalloc(sizeof(*kvmi), GFP_KERNEL);
if (!kvmi)
@@ -146,6 +188,15 @@ alloc_kvmi(struct kvm *kvm, const struct kvm_introspection_hook *hook)
atomic_set(&kvmi->ev_seq, 0);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ int err = create_vcpui(vcpu);
+
+ if (err) {
+ free_kvmi(kvm);
+ return NULL;
+ }
+ }
+
kvmi->kvm = kvm;
return kvmi;
@@ -366,6 +366,7 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
+ kvmi_vcpu_uninit(vcpu);
kvm_arch_vcpu_destroy(vcpu);
/*
@@ -3137,6 +3138,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
+ kvmi_vcpu_uninit(vcpu);
kvm_arch_vcpu_destroy(vcpu);
vcpu_free_run_page:
free_page((unsigned long)vcpu->run);