@@ -80,6 +80,7 @@
#define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24)
#define KVM_REQ_APICV_UPDATE \
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_INTROSPECTION KVM_ARCH_REQ(26)
#define CR0_RESERVED_BITS \
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -8549,6 +8549,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
+ if (kvm_check_request(KVM_REQ_INTROSPECTION, vcpu))
+ kvmi_handle_requests(vcpu);
+
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {
@@ -56,6 +56,8 @@ int kvmi_ioctl_command(struct kvm *kvm, void __user *argp);
int kvmi_ioctl_event(struct kvm *kvm, void __user *argp);
int kvmi_ioctl_preunhook(struct kvm *kvm);
+void kvmi_handle_requests(struct kvm_vcpu *vcpu);
+
#else
static inline int kvmi_init(void) { return 0; }
@@ -64,6 +66,8 @@ static inline void kvmi_create_vm(struct kvm *kvm) { }
static inline void kvmi_destroy_vm(struct kvm *kvm) { }
static inline void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu) { }
+static inline void kvmi_handle_requests(struct kvm_vcpu *vcpu) { }
+
#endif /* CONFIG_KVM_INTROSPECTION */
#endif
@@ -87,6 +87,12 @@ void kvmi_uninit(void)
kvmi_cache_destroy();
}
+static void kvmi_make_request(struct kvm_vcpu *vcpu)
+{
+ kvm_make_request(KVM_REQ_INTROSPECTION, vcpu);
+ kvm_vcpu_kick(vcpu);
+}
+
static int __kvmi_add_job(struct kvm_vcpu *vcpu,
void (*fct)(struct kvm_vcpu *vcpu, void *ctx),
void *ctx, void (*free_fct)(void *ctx))
@@ -118,6 +124,9 @@ int kvmi_add_job(struct kvm_vcpu *vcpu,
err = __kvmi_add_job(vcpu, fct, ctx, free_fct);
+ if (!err)
+ kvmi_make_request(vcpu);
+
return err;
}
@@ -270,6 +279,14 @@ int kvmi_ioctl_unhook(struct kvm *kvm)
return 0;
}
+struct kvm_introspection * __must_check kvmi_get(struct kvm *kvm)
+{
+ if (refcount_inc_not_zero(&kvm->kvmi_ref))
+ return kvm->kvmi;
+
+ return NULL;
+}
+
void kvmi_put(struct kvm *kvm)
{
if (refcount_dec_and_test(&kvm->kvmi_ref))
@@ -331,6 +348,10 @@ int kvmi_hook(struct kvm *kvm, const struct kvm_introspection_hook *hook)
init_completion(&kvm->kvmi_complete);
refcount_set(&kvm->kvmi_ref, 1);
+ /*
+ * Paired with refcount_inc_not_zero() from kvmi_get().
+ */
+ smp_wmb();
kvmi->recv = kthread_run(kvmi_recv_thread, kvmi, "kvmi-recv");
if (IS_ERR(kvmi->recv)) {
@@ -635,3 +656,40 @@ int kvmi_cmd_write_physical(struct kvm *kvm, u64 gpa, size_t size,
return 0;
}
+
+static struct kvmi_job *kvmi_pull_job(struct kvm_vcpu_introspection *vcpui)
+{
+ struct kvmi_job *job = NULL;
+
+ spin_lock(&vcpui->job_lock);
+ job = list_first_entry_or_null(&vcpui->job_list, typeof(*job), link);
+ if (job)
+ list_del(&job->link);
+ spin_unlock(&vcpui->job_lock);
+
+ return job;
+}
+
+void kvmi_run_jobs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_introspection *vcpui = VCPUI(vcpu);
+ struct kvmi_job *job;
+
+ while ((job = kvmi_pull_job(vcpui))) {
+ job->fct(vcpu, job->ctx);
+ kvmi_free_job(job);
+ }
+}
+
+void kvmi_handle_requests(struct kvm_vcpu *vcpu)
+{
+ struct kvm_introspection *kvmi;
+
+ kvmi = kvmi_get(vcpu->kvm);
+ if (!kvmi)
+ return;
+
+ kvmi_run_jobs(vcpu);
+
+ kvmi_put(vcpu->kvm);
+}
@@ -2484,6 +2484,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
goto out;
if (signal_pending(current))
goto out;
+ if (kvm_test_request(KVM_REQ_INTROSPECTION, vcpu))
+ goto out;
ret = 0;
out: