@@ -513,3 +513,26 @@ pause/stop/migrate the guest (see **Unhooking**) and the introspection
has been enabled for this event (see **KVMI_CONTROL_VM_EVENTS**).
The introspection tool has a chance to unhook and close the KVMI channel
(signaling that the operation can proceed).
+
+2. KVMI_EVENT_CREATE_VCPU
+-------------------------
+
+:Architectures: all
+:Versions: >= 1
+:Actions: CONTINUE, CRASH
+:Parameters:
+
+::
+
+ struct kvmi_event;
+
+:Returns:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_event_reply;
+
+This event is sent when a new vCPU is created and the introspection has
+been enabled for this event (see *KVMI_CONTROL_VM_EVENTS*).
+
@@ -13,6 +13,7 @@
static struct kmem_cache *msg_cache;
static struct kmem_cache *job_cache;
+static bool kvmi_create_vcpu_event(struct kvm_vcpu *vcpu);
static void kvmi_abort_events(struct kvm *kvm);
void *kvmi_msg_alloc(void)
@@ -150,6 +151,11 @@ static struct kvmi_job *kvmi_pull_job(struct kvmi_vcpu *ivcpu)
return job;
}
+static void kvmi_job_create_vcpu(struct kvm_vcpu *vcpu, void *ctx)
+{
+ kvmi_create_vcpu_event(vcpu);
+}
+
static bool alloc_ivcpu(struct kvm_vcpu *vcpu)
{
struct kvmi_vcpu *ivcpu;
@@ -245,6 +251,9 @@ int kvmi_vcpu_init(struct kvm_vcpu *vcpu)
goto out;
}
+ if (kvmi_add_job(vcpu, kvmi_job_create_vcpu, NULL, NULL))
+ ret = -ENOMEM;
+
out:
kvmi_put(vcpu->kvm);
@@ -330,6 +339,10 @@ int kvmi_hook(struct kvm *kvm, const struct kvm_introspection *qemu)
err = -ENOMEM;
goto err_alloc;
}
+ if (kvmi_add_job(vcpu, kvmi_job_create_vcpu, NULL, NULL)) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
}
/* interact with other kernel components after structure allocation */
@@ -551,6 +564,40 @@ void kvmi_handle_common_event_actions(struct kvm_vcpu *vcpu, u32 action,
}
}
+static bool __kvmi_create_vcpu_event(struct kvm_vcpu *vcpu)
+{
+ u32 action;
+ bool ret = false;
+
+ action = kvmi_msg_send_create_vcpu(vcpu);
+ switch (action) {
+ case KVMI_EVENT_ACTION_CONTINUE:
+ ret = true;
+ break;
+ default:
+ kvmi_handle_common_event_actions(vcpu, action, "CREATE");
+ }
+
+ return ret;
+}
+
+static bool kvmi_create_vcpu_event(struct kvm_vcpu *vcpu)
+{
+ struct kvmi *ikvm;
+ bool ret = true;
+
+ ikvm = kvmi_get(vcpu->kvm);
+ if (!ikvm)
+ return true;
+
+ if (test_bit(KVMI_EVENT_CREATE_VCPU, ikvm->vm_ev_mask))
+ ret = __kvmi_create_vcpu_event(vcpu);
+
+ kvmi_put(vcpu->kvm);
+
+ return ret;
+}
+
void kvmi_run_jobs(struct kvm_vcpu *vcpu)
{
struct kvmi_vcpu *ivcpu = IVCPU(vcpu);
@@ -123,6 +123,7 @@ bool kvmi_sock_get(struct kvmi *ikvm, int fd);
void kvmi_sock_shutdown(struct kvmi *ikvm);
void kvmi_sock_put(struct kvmi *ikvm);
bool kvmi_msg_process(struct kvmi *ikvm);
+u32 kvmi_msg_send_create_vcpu(struct kvm_vcpu *vcpu);
int kvmi_msg_send_unhook(struct kvmi *ikvm);
/* kvmi.c */
@@ -725,3 +725,15 @@ int kvmi_msg_send_unhook(struct kvmi *ikvm)
return kvmi_sock_write(ikvm, vec, n, msg_size);
}
+
+u32 kvmi_msg_send_create_vcpu(struct kvm_vcpu *vcpu)
+{
+ int err, action;
+
+ err = kvmi_send_event(vcpu, KVMI_EVENT_CREATE_VCPU, NULL, 0,
+ NULL, 0, &action);
+ if (err)
+ return KVMI_EVENT_ACTION_CONTINUE;
+
+ return action;
+}