diff mbox series

[RFC,v6,12/92] kvm: introspection: add a jobs list to every introspected vCPU

Message ID 20190809160047.8319-13-alazar@bitdefender.com (mailing list archive)
State New, archived
Headers show
Series VM introspection | expand

Commit Message

Adalbert Lazăr Aug. 9, 2019, 3:59 p.m. UTC
Every vCPU has a lock-protected list in which (mostly) the receiving
worker places the jobs to be done by the vCPU once it is kicked
(KVM_REQ_INTROSPECTION) out of guest.

A job is defined by a "do" function, a pointer (context) and a "free"
function.

Co-developed-by: Nicușor Cîțu <ncitu@bitdefender.com>
Signed-off-by: Nicușor Cîțu <ncitu@bitdefender.com>
Signed-off-by: Adalbert Lazăr <alazar@bitdefender.com>
---
 arch/x86/include/asm/kvm_host.h |   1 +
 virt/kvm/kvmi.c                 | 102 +++++++++++++++++++++++++++++++-
 virt/kvm/kvmi_int.h             |   9 +++
 3 files changed, 111 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 180373360e34..67ed934ca124 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -78,6 +78,7 @@ 
 #define KVM_REQ_HV_STIMER		KVM_ARCH_REQ(22)
 #define KVM_REQ_LOAD_EOI_EXITMAP	KVM_ARCH_REQ(23)
 #define KVM_REQ_GET_VMCS12_PAGES	KVM_ARCH_REQ(24)
+#define KVM_REQ_INTROSPECTION		KVM_ARCH_REQ(25)
 
 #define CR0_RESERVED_BITS                                               \
 	(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
diff --git a/virt/kvm/kvmi.c b/virt/kvm/kvmi.c
index 860574039221..07ebd1c629b0 100644
--- a/virt/kvm/kvmi.c
+++ b/virt/kvm/kvmi.c
@@ -11,6 +11,9 @@ 
 #include <linux/bitmap.h>
 
 static struct kmem_cache *msg_cache;
+static struct kmem_cache *job_cache;
+
+static void kvmi_abort_events(struct kvm *kvm);
 
 void *kvmi_msg_alloc(void)
 {
@@ -34,14 +37,19 @@  static void kvmi_cache_destroy(void)
 {
 	kmem_cache_destroy(msg_cache);
 	msg_cache = NULL;
+	kmem_cache_destroy(job_cache);
+	job_cache = NULL;
 }
 
 static int kvmi_cache_create(void)
 {
+	job_cache = kmem_cache_create("kvmi_job",
+				      sizeof(struct kvmi_job),
+				      0, SLAB_ACCOUNT, NULL);
 	msg_cache = kmem_cache_create("kvmi_msg", KVMI_MSG_SIZE_ALLOC,
 				      4096, SLAB_ACCOUNT, NULL);
 
-	if (!msg_cache) {
+	if (!msg_cache || !job_cache) {
 		kvmi_cache_destroy();
 
 		return -1;
@@ -80,6 +88,53 @@  static bool alloc_kvmi(struct kvm *kvm, const struct kvm_introspection *qemu)
 	return true;
 }
 
+static int __kvmi_add_job(struct kvm_vcpu *vcpu,
+			  void (*fct)(struct kvm_vcpu *vcpu, void *ctx),
+			  void *ctx, void (*free_fct)(void *ctx))
+{
+	struct kvmi_vcpu *ivcpu = IVCPU(vcpu);
+	struct kvmi_job *job;
+
+	job = kmem_cache_zalloc(job_cache, GFP_KERNEL);
+	if (unlikely(!job))
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&job->link);
+	job->fct = fct;
+	job->ctx = ctx;
+	job->free_fct = free_fct;
+
+	spin_lock(&ivcpu->job_lock);
+	list_add_tail(&job->link, &ivcpu->job_list);
+	spin_unlock(&ivcpu->job_lock);
+
+	return 0;
+}
+
+int kvmi_add_job(struct kvm_vcpu *vcpu,
+		 void (*fct)(struct kvm_vcpu *vcpu, void *ctx),
+		 void *ctx, void (*free_fct)(void *ctx))
+{
+	int err;
+
+	err = __kvmi_add_job(vcpu, fct, ctx, free_fct);
+
+	if (!err) {
+		kvm_make_request(KVM_REQ_INTROSPECTION, vcpu);
+		kvm_vcpu_kick(vcpu);
+	}
+
+	return err;
+}
+
+static void kvmi_free_job(struct kvmi_job *job)
+{
+	if (job->free_fct)
+		job->free_fct(job->ctx);
+
+	kmem_cache_free(job_cache, job);
+}
+
 static bool alloc_ivcpu(struct kvm_vcpu *vcpu)
 {
 	struct kvmi_vcpu *ivcpu;
@@ -88,6 +143,9 @@  static bool alloc_ivcpu(struct kvm_vcpu *vcpu)
 	if (!ivcpu)
 		return false;
 
+	INIT_LIST_HEAD(&ivcpu->job_list);
+	spin_lock_init(&ivcpu->job_lock);
+
 	vcpu->kvmi = ivcpu;
 
 	return true;
@@ -101,6 +159,27 @@  struct kvmi * __must_check kvmi_get(struct kvm *kvm)
 	return NULL;
 }
 
+static void kvmi_clear_vcpu_jobs(struct kvm *kvm)
+{
+	int i;
+	struct kvm_vcpu *vcpu;
+	struct kvmi_job *cur, *next;
+
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		struct kvmi_vcpu *ivcpu = IVCPU(vcpu);
+
+		if (!ivcpu)
+			continue;
+
+		spin_lock(&ivcpu->job_lock);
+		list_for_each_entry_safe(cur, next, &ivcpu->job_list, link) {
+			list_del(&cur->link);
+			kvmi_free_job(cur);
+		}
+		spin_unlock(&ivcpu->job_lock);
+	}
+}
+
 static void kvmi_destroy(struct kvm *kvm)
 {
 	struct kvm_vcpu *vcpu;
@@ -118,6 +197,7 @@  static void kvmi_destroy(struct kvm *kvm)
 static void kvmi_release(struct kvm *kvm)
 {
 	kvmi_sock_put(IKVM(kvm));
+	kvmi_clear_vcpu_jobs(kvm);
 	kvmi_destroy(kvm);
 
 	complete(&kvm->kvmi_completed);
@@ -179,6 +259,13 @@  static void kvmi_end_introspection(struct kvmi *ikvm)
 	/* Signal QEMU which is waiting for POLLHUP. */
 	kvmi_sock_shutdown(ikvm);
 
+	/*
+	 * Trigger all the VCPUs out of waiting for replies. Although the
+	 * introspection is still enabled, sending additional events will
+	 * fail because the socket is shut down. Waiting will not be possible.
+	 */
+	kvmi_abort_events(kvm);
+
 	/*
 	 * At this moment the socket is shut down, no more commands will come
 	 * from the introspector, and the only way into the introspection is
@@ -420,6 +507,19 @@  int kvmi_cmd_control_vm_events(struct kvmi *ikvm, unsigned int event_id,
 	return 0;
 }
 
+static void kvmi_job_abort(struct kvm_vcpu *vcpu, void *ctx)
+{
+}
+
+static void kvmi_abort_events(struct kvm *kvm)
+{
+	int i;
+	struct kvm_vcpu *vcpu;
+
+	kvm_for_each_vcpu(i, vcpu, kvm)
+		kvmi_add_job(vcpu, kvmi_job_abort, NULL, NULL);
+}
+
 int kvmi_ioctl_unhook(struct kvm *kvm, bool force_reset)
 {
 	struct kvmi *ikvm;
diff --git a/virt/kvm/kvmi_int.h b/virt/kvm/kvmi_int.h
index 8739a3435893..97f91a568096 100644
--- a/virt/kvm/kvmi_int.h
+++ b/virt/kvm/kvmi_int.h
@@ -75,7 +75,16 @@ 
 
 #define KVMI_NUM_COMMANDS KVMI_NEXT_AVAILABLE_COMMAND
 
+struct kvmi_job {
+	struct list_head link;
+	void *ctx;
+	void (*fct)(struct kvm_vcpu *vcpu, void *ctx);
+	void (*free_fct)(void *ctx);
+};
+
 struct kvmi_vcpu {
+	struct list_head job_list;
+	spinlock_t job_lock;
 };
 
 #define IKVM(kvm) ((struct kvmi *)((kvm)->kvmi))