@@ -4,8 +4,15 @@
#include <asm/kvmi.h>
+struct kvmi_monitor_interception {
+ bool kvmi_intercepted;
+ bool kvm_intercepted;
+ bool (*monitor_fct)(struct kvm_vcpu *vcpu, bool enable);
+};
+
struct kvmi_interception {
bool restore_interception;
+ struct kvmi_monitor_interception breakpoint;
};
struct kvm_vcpu_arch_introspection {
@@ -16,4 +23,15 @@ struct kvm_vcpu_arch_introspection {
struct kvm_arch_introspection {
};
+#ifdef CONFIG_KVM_INTROSPECTION
+
+bool kvmi_monitor_bp_intercept(struct kvm_vcpu *vcpu, u32 dbg);
+
+#else /* CONFIG_KVM_INTROSPECTION */
+
+static inline bool kvmi_monitor_bp_intercept(struct kvm_vcpu *vcpu, u32 dbg)
+ { return false; }
+
+#endif /* CONFIG_KVM_INTROSPECTION */
+
#endif /* _ASM_X86_KVMI_HOST_H */
@@ -162,19 +162,72 @@ bool kvmi_arch_is_agent_hypercall(struct kvm_vcpu *vcpu)
&& subfunc2 == 0);
}
+/*
+ * Returns true if one side (kvm or kvmi) tries to enable/disable the breakpoint
+ * interception while the other side is still tracking it.
+ */
+bool kvmi_monitor_bp_intercept(struct kvm_vcpu *vcpu, u32 dbg)
+{
+ struct kvmi_interception *arch_vcpui = READ_ONCE(vcpu->arch.kvmi);
+ u32 bp_mask = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
+ bool enable = false;
+
+ if ((dbg & bp_mask) == bp_mask)
+ enable = true;
+
+ return (arch_vcpui && arch_vcpui->breakpoint.monitor_fct(vcpu, enable));
+}
+EXPORT_SYMBOL(kvmi_monitor_bp_intercept);
+
+static bool monitor_bp_fct_kvmi(struct kvm_vcpu *vcpu, bool enable)
+{
+ if (enable) {
+ if (kvm_x86_ops.bp_intercepted(vcpu))
+ return true;
+ } else if (!vcpu->arch.kvmi->breakpoint.kvmi_intercepted)
+ return true;
+
+ vcpu->arch.kvmi->breakpoint.kvmi_intercepted = enable;
+
+ return false;
+}
+
+static bool monitor_bp_fct_kvm(struct kvm_vcpu *vcpu, bool enable)
+{
+ if (enable) {
+ if (kvm_x86_ops.bp_intercepted(vcpu))
+ return true;
+ } else if (!vcpu->arch.kvmi->breakpoint.kvm_intercepted)
+ return true;
+
+ vcpu->arch.kvmi->breakpoint.kvm_intercepted = enable;
+
+ return false;
+}
+
static int kvmi_control_bp_intercept(struct kvm_vcpu *vcpu, bool enable)
{
struct kvm_guest_debug dbg = {};
int err = 0;
+ vcpu->arch.kvmi->breakpoint.monitor_fct = monitor_bp_fct_kvmi;
if (enable)
dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
err = kvm_arch_vcpu_set_guest_debug(vcpu, &dbg);
+ vcpu->arch.kvmi->breakpoint.monitor_fct = monitor_bp_fct_kvm;
return err;
}
+static void kvmi_arch_disable_bp_intercept(struct kvm_vcpu *vcpu)
+{
+ kvmi_control_bp_intercept(vcpu, false);
+
+ vcpu->arch.kvmi->breakpoint.kvmi_intercepted = false;
+ vcpu->arch.kvmi->breakpoint.kvm_intercepted = false;
+}
+
int kvmi_arch_cmd_control_intercept(struct kvm_vcpu *vcpu,
unsigned int event_id, bool enable)
{
@@ -213,6 +266,7 @@ void kvmi_arch_breakpoint_event(struct kvm_vcpu *vcpu, u64 gva, u8 insn_len)
static void kvmi_arch_restore_interception(struct kvm_vcpu *vcpu)
{
+ kvmi_arch_disable_bp_intercept(vcpu);
}
bool kvmi_arch_clean_up_interception(struct kvm_vcpu *vcpu)
@@ -238,6 +292,12 @@ bool kvmi_arch_vcpu_alloc_interception(struct kvm_vcpu *vcpu)
if (!arch_vcpui)
return false;
+ arch_vcpui->breakpoint.monitor_fct = monitor_bp_fct_kvm;
+
+ /* pair with kvmi_monitor_bp_intercept() */
+ smp_wmb();
+ WRITE_ONCE(vcpu->arch.kvmi, arch_vcpui);
+
return true;
}
@@ -9756,6 +9756,11 @@ int kvm_arch_vcpu_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_queue_exception(vcpu, BP_VECTOR);
}
+ if (kvmi_monitor_bp_intercept(vcpu, dbg->control)) {
+ r = -EBUSY;
+ goto out;
+ }
+
/*
* Read rflags as long as potentially injected trace flags are still
* filtered out.
@@ -63,6 +63,9 @@ enum {
#define HOST_SEND_TEST(uc) (uc.cmd == UCALL_SYNC && uc.args[1] == 0)
#define HOST_TEST_DONE(uc) (uc.cmd == UCALL_SYNC && uc.args[1] == 1)
+static pthread_t start_vcpu_worker(struct vcpu_worker_data *data);
+static void wait_vcpu_worker(pthread_t vcpu_thread);
+
static int guest_test_id(void)
{
GUEST_REQUEST_TEST();
@@ -172,13 +175,24 @@ static void allow_command(struct kvm_vm *vm, __s32 id)
static void hook_introspection(struct kvm_vm *vm)
{
+ struct vcpu_worker_data data = {.vm = vm, .vcpu_id = VCPU_ID };
__u32 allow = 1, disallow = 0, allow_inval = 2;
+ pthread_t vcpu_thread;
__s32 all_IDs = -1;
set_command_perm(vm, all_IDs, allow, EFAULT);
set_event_perm(vm, all_IDs, allow, EFAULT);
do_hook_ioctl(vm, -1, EINVAL);
+
+ /*
+ * The last call failed "too late".
+ * We have to let the vCPU run and clean up its structures,
+ * otherwise the next call will fail with EEXIST.
+ */
+ vcpu_thread = start_vcpu_worker(&data);
+ wait_vcpu_worker(vcpu_thread);
+
do_hook_ioctl(vm, Kvm_socket, 0);
do_hook_ioctl(vm, Kvm_socket, EEXIST);