@@ -8706,6 +8706,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.l1tf_flush_l1d = true;
for (;;) {
+ if (kvm_check_request(KVM_REQ_INTROSPECTION, vcpu))
+ kvmi_handle_requests(vcpu);
+
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {
@@ -146,6 +146,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_PENDING_TIMER 2
#define KVM_REQ_UNHALT 3
+#define KVM_REQ_INTROSPECTION 4
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
@@ -53,6 +53,8 @@ int kvmi_ioctl_event(struct kvm *kvm,
const struct kvm_introspection_feature *feat);
int kvmi_ioctl_preunhook(struct kvm *kvm);
+void kvmi_handle_requests(struct kvm_vcpu *vcpu);
+
#else
static inline int kvmi_init(void) { return 0; }
@@ -61,6 +63,8 @@ static inline void kvmi_create_vm(struct kvm *kvm) { }
static inline void kvmi_destroy_vm(struct kvm *kvm) { }
static inline void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu) { }
+static inline void kvmi_handle_requests(struct kvm_vcpu *vcpu) { }
+
#endif /* CONFIG_KVM_INTROSPECTION */
#endif
@@ -113,6 +113,12 @@ void kvmi_uninit(void)
kvmi_cache_destroy();
}
+static void kvmi_make_request(struct kvm_vcpu *vcpu)
+{
+ kvm_make_request(KVM_REQ_INTROSPECTION, vcpu);
+ kvm_vcpu_kick(vcpu);
+}
+
static int __kvmi_add_job(struct kvm_vcpu *vcpu,
void (*fct)(struct kvm_vcpu *vcpu, void *ctx),
void *ctx, void (*free_fct)(void *ctx))
@@ -144,6 +150,9 @@ int kvmi_add_job(struct kvm_vcpu *vcpu,
err = __kvmi_add_job(vcpu, fct, ctx, free_fct);
+ if (!err)
+ kvmi_make_request(vcpu);
+
return err;
}
@@ -312,6 +321,14 @@ int kvmi_ioctl_unhook(struct kvm *kvm)
return 0;
}
+struct kvm_introspection * __must_check kvmi_get(struct kvm *kvm)
+{
+ if (refcount_inc_not_zero(&kvm->kvmi_ref))
+ return kvm->kvmi;
+
+ return NULL;
+}
+
void kvmi_put(struct kvm *kvm)
{
if (refcount_dec_and_test(&kvm->kvmi_ref))
@@ -373,6 +390,10 @@ int kvmi_hook(struct kvm *kvm, const struct kvm_introspection_hook *hook)
init_completion(&kvm->kvmi_complete);
refcount_set(&kvm->kvmi_ref, 1);
+ /*
+ * Paired with refcount_inc_not_zero() from kvmi_get().
+ */
+ smp_wmb();
kvmi->recv = kthread_run(kvmi_recv_thread, kvmi, "kvmi-recv");
if (IS_ERR(kvmi->recv)) {
@@ -672,3 +693,40 @@ int kvmi_cmd_write_physical(struct kvm *kvm, u64 gpa, size_t size,
return 0;
}
+
+static struct kvmi_job *kvmi_pull_job(struct kvm_vcpu_introspection *vcpui)
+{
+ struct kvmi_job *job = NULL;
+
+ spin_lock(&vcpui->job_lock);
+ job = list_first_entry_or_null(&vcpui->job_list, typeof(*job), link);
+ if (job)
+ list_del(&job->link);
+ spin_unlock(&vcpui->job_lock);
+
+ return job;
+}
+
+void kvmi_run_jobs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_introspection *vcpui = VCPUI(vcpu);
+ struct kvmi_job *job;
+
+ while ((job = kvmi_pull_job(vcpui))) {
+ job->fct(vcpu, job->ctx);
+ kvmi_free_job(job);
+ }
+}
+
+void kvmi_handle_requests(struct kvm_vcpu *vcpu)
+{
+ struct kvm_introspection *kvmi;
+
+ kvmi = kvmi_get(vcpu->kvm);
+ if (!kvmi)
+ return;
+
+ kvmi_run_jobs(vcpu);
+
+ kvmi_put(vcpu->kvm);
+}
@@ -2710,6 +2710,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
goto out;
if (signal_pending(current))
goto out;
+ if (kvm_test_request(KVM_REQ_INTROSPECTION, vcpu))
+ goto out;
ret = 0;
out: