@@ -9144,6 +9144,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
+ if (kvm_check_request(KVM_REQ_INTROSPECTION, vcpu))
+ kvmi_handle_requests(vcpu);
+
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {
@@ -147,6 +147,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_PENDING_TIMER 2
#define KVM_REQ_UNHALT 3
+#define KVM_REQ_INTROSPECTION 4
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
@@ -53,6 +53,8 @@ int kvmi_ioctl_event(struct kvm *kvm,
const struct kvm_introspection_feature *feat);
int kvmi_ioctl_preunhook(struct kvm *kvm);
+void kvmi_handle_requests(struct kvm_vcpu *vcpu);
+
#else
static inline int kvmi_version(void) { return 0; }
@@ -62,6 +64,8 @@ static inline void kvmi_create_vm(struct kvm *kvm) { }
static inline void kvmi_destroy_vm(struct kvm *kvm) { }
static inline void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu) { }
+static inline void kvmi_handle_requests(struct kvm_vcpu *vcpu) { }
+
#endif /* CONFIG_KVM_INTROSPECTION */
#endif
@@ -124,6 +124,12 @@ void kvmi_uninit(void)
kvmi_cache_destroy();
}
+static void kvmi_make_request(struct kvm_vcpu *vcpu)
+{
+ kvm_make_request(KVM_REQ_INTROSPECTION, vcpu);
+ kvm_vcpu_kick(vcpu);
+}
+
static int __kvmi_add_job(struct kvm_vcpu *vcpu,
void (*fct)(struct kvm_vcpu *vcpu, void *ctx),
void *ctx, void (*free_fct)(void *ctx))
@@ -155,6 +161,9 @@ int kvmi_add_job(struct kvm_vcpu *vcpu,
err = __kvmi_add_job(vcpu, fct, ctx, free_fct);
+ if (!err)
+ kvmi_make_request(vcpu);
+
return err;
}
@@ -323,6 +332,14 @@ int kvmi_ioctl_unhook(struct kvm *kvm)
return 0;
}
+struct kvm_introspection * __must_check kvmi_get(struct kvm *kvm)
+{
+ if (refcount_inc_not_zero(&kvm->kvmi_ref))
+ return kvm->kvmi;
+
+ return NULL;
+}
+
static void kvmi_put(struct kvm *kvm)
{
if (refcount_dec_and_test(&kvm->kvmi_ref))
@@ -340,6 +357,19 @@ static int __kvmi_hook(struct kvm *kvm,
return 0;
}
+static void kvmi_job_release_vcpu(struct kvm_vcpu *vcpu, void *ctx)
+{
+}
+
+static void kvmi_release_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvmi_add_job(vcpu, kvmi_job_release_vcpu, NULL, NULL);
+}
+
static int kvmi_recv_thread(void *arg)
{
struct kvm_introspection *kvmi = arg;
@@ -350,6 +380,8 @@ static int kvmi_recv_thread(void *arg)
/* Signal userspace and prevent the vCPUs from sending events. */
kvmi_sock_shutdown(kvmi);
+ kvmi_release_vcpus(kvmi->kvm);
+
kvmi_put(kvmi->kvm);
return 0;
}
@@ -382,6 +414,10 @@ static int kvmi_hook(struct kvm *kvm,
init_completion(&kvm->kvmi_complete);
refcount_set(&kvm->kvmi_ref, 1);
+ /*
+ * Paired with refcount_inc_not_zero() from kvmi_get().
+ */
+ smp_wmb();
kvmi->recv = kthread_run(kvmi_recv_thread, kvmi, "kvmi-recv");
if (IS_ERR(kvmi->recv)) {
@@ -670,3 +706,40 @@ int kvmi_cmd_write_physical(struct kvm *kvm, u64 gpa, size_t size,
return ec;
}
+
+static struct kvmi_job *kvmi_pull_job(struct kvm_vcpu_introspection *vcpui)
+{
+ struct kvmi_job *job = NULL;
+
+ spin_lock(&vcpui->job_lock);
+ job = list_first_entry_or_null(&vcpui->job_list, typeof(*job), link);
+ if (job)
+ list_del(&job->link);
+ spin_unlock(&vcpui->job_lock);
+
+ return job;
+}
+
+void kvmi_run_jobs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_introspection *vcpui = VCPUI(vcpu);
+ struct kvmi_job *job;
+
+ while ((job = kvmi_pull_job(vcpui))) {
+ job->fct(vcpu, job->ctx);
+ kvmi_free_job(job);
+ }
+}
+
+void kvmi_handle_requests(struct kvm_vcpu *vcpu)
+{
+ struct kvm_introspection *kvmi;
+
+ kvmi = kvmi_get(vcpu->kvm);
+ if (!kvmi)
+ return;
+
+ kvmi_run_jobs(vcpu);
+
+ kvmi_put(vcpu->kvm);
+}
@@ -2796,6 +2796,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
goto out;
if (signal_pending(current))
goto out;
+ if (kvm_test_request(KVM_REQ_INTROSPECTION, vcpu))
+ goto out;
ret = 0;
out: