@@ -816,6 +816,9 @@ struct kvm_vcpu_arch {
/* #PF translated error code from EPT/NPT exit reason */
u64 error_code;
+
+ /* Control the interception of MSRs/CRs/BP... */
+ struct kvmi_interception *kvmi;
};
struct kvm_lpage_info {
@@ -4,6 +4,10 @@
#include <asm/kvmi.h>
+struct kvmi_interception {
+ bool restore_interception;
+};
+
struct kvm_vcpu_arch_introspection {
struct kvm_regs delayed_regs;
bool have_delayed_regs;
@@ -210,3 +210,52 @@ void kvmi_arch_breakpoint_event(struct kvm_vcpu *vcpu, u64 gva, u8 insn_len)
kvmi_handle_common_event_actions(vcpu, action);
}
}
+
+static void kvmi_arch_restore_interception(struct kvm_vcpu *vcpu)
+{
+}
+
+bool kvmi_arch_clean_up_interception(struct kvm_vcpu *vcpu)
+{
+ struct kvmi_interception *arch_vcpui = vcpu->arch.kvmi;
+
+ if (!arch_vcpui)
+ return false;
+
+ if (!arch_vcpui->restore_interception)
+ return false;
+
+ kvmi_arch_restore_interception(vcpu);
+
+ return true;
+}
+
+bool kvmi_arch_vcpu_alloc_interception(struct kvm_vcpu *vcpu)
+{
+ struct kvmi_interception *arch_vcpui;
+
+ arch_vcpui = kzalloc(sizeof(*arch_vcpui), GFP_KERNEL);
+ if (!arch_vcpui)
+ return false;
+
+ return true;
+}
+
+void kvmi_arch_vcpu_free_interception(struct kvm_vcpu *vcpu)
+{
+ kfree(vcpu->arch.kvmi);
+ WRITE_ONCE(vcpu->arch.kvmi, NULL);
+}
+
+bool kvmi_arch_vcpu_introspected(struct kvm_vcpu *vcpu)
+{
+ return !!READ_ONCE(vcpu->arch.kvmi);
+}
+
+void kvmi_arch_request_interception_cleanup(struct kvm_vcpu *vcpu)
+{
+ struct kvmi_interception *arch_vcpui = READ_ONCE(vcpu->arch.kvmi);
+
+ if (arch_vcpui)
+ arch_vcpui->restore_interception = true;
+}
@@ -206,7 +206,7 @@ static bool kvmi_alloc_vcpui(struct kvm_vcpu *vcpu)
vcpu->kvmi = vcpui;
- return true;
+ return kvmi_arch_vcpu_alloc_interception(vcpu);
}
static int kvmi_create_vcpui(struct kvm_vcpu *vcpu)
@@ -240,6 +240,9 @@ static void kvmi_free_vcpui(struct kvm_vcpu *vcpu)
kfree(vcpui);
vcpu->kvmi = NULL;
+
+ kvmi_arch_request_interception_cleanup(vcpu);
+ kvmi_make_request(vcpu, false);
}
static void kvmi_free(struct kvm *kvm)
@@ -262,6 +265,7 @@ void kvmi_vcpu_uninit(struct kvm_vcpu *vcpu)
{
mutex_lock(&vcpu->kvm->kvmi_lock);
kvmi_free_vcpui(vcpu);
+ kvmi_arch_vcpu_free_interception(vcpu);
mutex_unlock(&vcpu->kvm->kvmi_lock);
}
@@ -410,6 +414,21 @@ static int kvmi_recv_thread(void *arg)
return 0;
}
+static bool ready_to_hook(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ if (kvm->kvmi)
+ return false;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ if (kvmi_arch_vcpu_introspected(vcpu))
+ return false;
+
+ return true;
+}
+
int kvmi_hook(struct kvm *kvm, const struct kvm_introspection_hook *hook)
{
struct kvm_introspection *kvmi;
@@ -417,7 +436,7 @@ int kvmi_hook(struct kvm *kvm, const struct kvm_introspection_hook *hook)
mutex_lock(&kvm->kvmi_lock);
- if (kvm->kvmi) {
+ if (!ready_to_hook(kvm)) {
err = -EEXIST;
goto out;
}
@@ -814,7 +833,7 @@ void kvmi_handle_requests(struct kvm_vcpu *vcpu)
kvmi = kvmi_get(vcpu->kvm);
if (!kvmi)
- return;
+ goto out;
for (;;) {
kvmi_run_jobs(vcpu);
@@ -826,6 +845,13 @@ void kvmi_handle_requests(struct kvm_vcpu *vcpu)
}
kvmi_put(vcpu->kvm);
+
+out:
+ if (kvmi_arch_clean_up_interception(vcpu)) {
+ mutex_lock(&vcpu->kvm->kvmi_lock);
+ kvmi_arch_vcpu_free_interception(vcpu);
+ mutex_unlock(&vcpu->kvm->kvmi_lock);
+ }
}
int kvmi_cmd_vcpu_pause(struct kvm_vcpu *vcpu, bool wait)
@@ -78,6 +78,11 @@ void kvmi_arch_init_vcpu_events_mask(unsigned long *supported);
kvmi_vcpu_msg_job_fct kvmi_arch_vcpu_msg_handler(u16 id);
void kvmi_arch_setup_vcpu_event(struct kvm_vcpu *vcpu,
struct kvmi_vcpu_event *ev);
+bool kvmi_arch_vcpu_alloc_interception(struct kvm_vcpu *vcpu);
+void kvmi_arch_vcpu_free_interception(struct kvm_vcpu *vcpu);
+bool kvmi_arch_vcpu_introspected(struct kvm_vcpu *vcpu);
+void kvmi_arch_request_interception_cleanup(struct kvm_vcpu *vcpu);
+bool kvmi_arch_clean_up_interception(struct kvm_vcpu *vcpu);
void kvmi_arch_post_reply(struct kvm_vcpu *vcpu);
bool kvmi_arch_is_agent_hypercall(struct kvm_vcpu *vcpu);
void kvmi_arch_breakpoint_event(struct kvm_vcpu *vcpu, u64 gva, u8 insn_len);