@@ -547,6 +547,7 @@ the following events::
KVMI_EVENT_HYPERCALL
KVMI_EVENT_MSR
KVMI_EVENT_PF
+ KVMI_EVENT_SINGLESTEP
KVMI_EVENT_TRAP
KVMI_EVENT_XSETBV
@@ -1352,3 +1353,30 @@ The *CONTINUE* action will continue the page fault handling via emulation.
The *RETRY* action is used by the introspection tool to retry the
execution of the current instruction, usually because it changed the
instruction pointer or the page restrictions.
+
+11. KVMI_EVENT_SINGLESTEP
+-------------------------
+
+:Architectures: x86
+:Versions: >= 1
+:Actions: CONTINUE, CRASH
+:Parameters:
+
+::
+
+ struct kvmi_event;
+
+:Returns:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_event_reply;
+ struct kvmi_event_singlestep {
+ __u8 failed;
+ __u8 padding[7];
+ };
+
+This event is sent when the current instruction has been executed or the
+singlestep failed and the introspection has been enabled for this event
+(see **KVMI_VCPU_CONTROL_EVENTS**).
@@ -5439,6 +5439,7 @@ static int handle_invalid_op(struct kvm_vcpu *vcpu)
static int handle_monitor_trap(struct kvm_vcpu *vcpu)
{
+ kvmi_singlestep_done(vcpu);
return 1;
}
@@ -5994,6 +5995,11 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
}
}
+ if (kvmi_vcpu_running_singlestep(vcpu) &&
+ exit_reason != EXIT_REASON_EPT_VIOLATION &&
+ exit_reason != EXIT_REASON_MONITOR_TRAP_FLAG)
+ kvmi_singlestep_failed(vcpu);
+
if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason]) {
#ifdef CONFIG_RETPOLINE
@@ -95,6 +95,8 @@ bool kvmi_hypercall_event(struct kvm_vcpu *vcpu);
bool kvmi_breakpoint_event(struct kvm_vcpu *vcpu, u64 gva, u8 insn_len);
bool kvmi_enter_guest(struct kvm_vcpu *vcpu);
bool kvmi_vcpu_running_singlestep(struct kvm_vcpu *vcpu);
+void kvmi_singlestep_done(struct kvm_vcpu *vcpu);
+void kvmi_singlestep_failed(struct kvm_vcpu *vcpu);
#else
@@ -113,6 +115,8 @@ static inline bool kvmi_enter_guest(struct kvm_vcpu *vcpu)
{ return true; }
static inline bool kvmi_vcpu_running_singlestep(struct kvm_vcpu *vcpu)
{ return false; }
+static void kvmi_singlestep_done(struct kvm_vcpu *vcpu) { }
+static void kvmi_singlestep_failed(struct kvm_vcpu *vcpu) { }
#endif /* CONFIG_KVM_INTROSPECTION */
@@ -59,6 +59,7 @@ enum {
KVMI_EVENT_DESCRIPTOR = 7,
KVMI_EVENT_MSR = 8,
KVMI_EVENT_PF = 9,
+ KVMI_EVENT_SINGLESTEP = 10,
KVMI_NUM_EVENTS
};
@@ -199,4 +200,9 @@ struct kvmi_event_pf {
__u32 padding3;
};
+struct kvmi_event_singlestep {
+ __u8 failed;
+ __u8 padding[7];
+};
+
#endif /* _UAPI__LINUX_KVMI_H */
@@ -738,6 +738,14 @@ static void stop_vcpu_worker(pthread_t vcpu_thread,
wait_vcpu_worker(vcpu_thread);
}
+static int __do_vcpu_command(struct kvm_vm *vm, int cmd_id,
+ struct kvmi_msg_hdr *req, size_t req_size,
+ void *rpl, size_t rpl_size)
+{
+ send_message(cmd_id, req, req_size);
+ return receive_cmd_reply(req, rpl, rpl_size);
+}
+
static int do_vcpu_command(struct kvm_vm *vm, int cmd_id,
struct kvmi_msg_hdr *req, size_t req_size,
void *rpl, size_t rpl_size)
@@ -748,13 +756,24 @@ static int do_vcpu_command(struct kvm_vm *vm, int cmd_id,
vcpu_thread = start_vcpu_worker(&data);
- send_message(cmd_id, req, req_size);
- r = receive_cmd_reply(req, rpl, rpl_size);
+ r = __do_vcpu_command(vm, cmd_id, req, req_size, rpl, rpl_size);
stop_vcpu_worker(vcpu_thread, &data);
return r;
}
+static int __do_vcpu0_command(struct kvm_vm *vm, int cmd_id,
+ struct kvmi_msg_hdr *req, size_t req_size,
+ void *rpl, size_t rpl_size)
+{
+ struct kvmi_vcpu_hdr *vcpu_hdr = (struct kvmi_vcpu_hdr *)req;
+
+ vcpu_hdr->vcpu = 0;
+
+ send_message(cmd_id, req, req_size);
+ return receive_cmd_reply(req, rpl, rpl_size);
+}
+
static int do_vcpu0_command(struct kvm_vm *vm, int cmd_id,
struct kvmi_msg_hdr *req, size_t req_size,
void *rpl, size_t rpl_size)
@@ -1681,24 +1700,67 @@ static void test_event_pf(struct kvm_vm *vm)
test_pf(vm, cbk_test_event_pf);
}
-static void test_cmd_vcpu_control_singlestep(struct kvm_vm *vm)
+static void control_singlestep(struct kvm_vm *vm, bool enable)
{
struct {
struct kvmi_msg_hdr hdr;
struct kvmi_vcpu_hdr vcpu_hdr;
struct kvmi_vcpu_control_singlestep cmd;
} req = {};
+ int r;
+
+ req.cmd.enable = enable;
+ r = __do_vcpu0_command(vm, KVMI_VCPU_CONTROL_SINGLESTEP,
+ &req.hdr, sizeof(req), NULL, 0);
+ TEST_ASSERT(r == 0,
+ "KVMI_VCPU_CONTROL_SINGLESTEP failed, error %d (%s)\n",
+ -r, kvm_strerror(-r));
+}
+
+static void enable_singlestep(struct kvm_vm *vm)
+{
+ control_singlestep(vm, true);
+}
+
+static void disable_singlestep(struct kvm_vm *vm)
+{
+ control_singlestep(vm, false);
+}
+
+static void test_cmd_vcpu_control_singlestep(struct kvm_vm *vm)
+{
+ struct vcpu_worker_data data = { .vm = vm, .vcpu_id = VCPU_ID };
+ struct {
+ struct kvmi_event common;
+ struct kvmi_event_singlestep singlestep;
+ } ev;
+ __u16 event_id = KVMI_EVENT_SINGLESTEP;
+ struct vcpu_reply rpl = {};
+ struct kvmi_msg_hdr hdr;
+ pthread_t vcpu_thread;
if (!features.singlestep)
return;
- req.cmd.enable = true;
- test_vcpu0_command(vm, KVMI_VCPU_CONTROL_SINGLESTEP,
- &req.hdr, sizeof(req), NULL, 0);
+ enable_vcpu_event(vm, event_id);
+
+ vcpu_thread = start_vcpu_worker(&data);
+
+ enable_singlestep(vm);
+
+ receive_event(&hdr, &ev.common, sizeof(ev), event_id);
+
+ DEBUG("SINGLESTEP event, rip 0x%llx success %d\n",
+ ev.common.arch.regs.rip, !ev.singlestep.failed);
+
+ disable_singlestep(vm);
+
+ reply_to_event(&hdr, &ev.common, KVMI_EVENT_ACTION_CONTINUE,
+ &rpl, sizeof(rpl));
+
+ stop_vcpu_worker(vcpu_thread, &data);
- req.cmd.enable = false;
- test_vcpu0_command(vm, KVMI_VCPU_CONTROL_SINGLESTEP,
- &req.hdr, sizeof(req), NULL, 0);
+ disable_vcpu_event(vm, KVMI_EVENT_SINGLESTEP);
}
static void test_introspection(struct kvm_vm *vm)
@@ -1319,3 +1319,64 @@ bool kvmi_vcpu_running_singlestep(struct kvm_vcpu *vcpu)
return ret;
}
EXPORT_SYMBOL(kvmi_vcpu_running_singlestep);
+
+static u32 kvmi_send_singlestep(struct kvm_vcpu *vcpu, bool success)
+{
+ struct kvmi_event_singlestep e;
+ int err, action;
+
+ memset(&e, 0, sizeof(e));
+ e.failed = success ? 0 : 1;
+
+ err = kvmi_send_event(vcpu, KVMI_EVENT_SINGLESTEP, &e, sizeof(e),
+ NULL, 0, &action);
+ if (err)
+ return KVMI_EVENT_ACTION_CONTINUE;
+
+ return action;
+}
+
+static void __kvmi_singlestep_event(struct kvm_vcpu *vcpu, bool success)
+{
+ struct kvm_vcpu_introspection *vcpui = VCPUI(vcpu);
+ u32 action;
+
+ if (!vcpui->singlestep.loop)
+ return;
+
+ action = kvmi_send_singlestep(vcpu, success);
+ switch (action) {
+ case KVMI_EVENT_ACTION_CONTINUE:
+ break;
+ default:
+ kvmi_handle_common_event_actions(vcpu->kvm, action,
+ "SINGLESTEP");
+ }
+}
+
+static void kvmi_singlestep_event(struct kvm_vcpu *vcpu, bool success)
+{
+ struct kvm_introspection *kvmi;
+ struct kvm *kvm = vcpu->kvm;
+
+ kvmi = kvmi_get(kvm);
+ if (!kvmi)
+ return;
+
+ if (is_event_enabled(vcpu, KVMI_EVENT_SINGLESTEP))
+ __kvmi_singlestep_event(vcpu, success);
+
+ kvmi_put(kvm);
+}
+
+void kvmi_singlestep_done(struct kvm_vcpu *vcpu)
+{
+ kvmi_singlestep_event(vcpu, true);
+}
+EXPORT_SYMBOL(kvmi_singlestep_done);
+
+void kvmi_singlestep_failed(struct kvm_vcpu *vcpu)
+{
+ kvmi_singlestep_event(vcpu, false);
+}
+EXPORT_SYMBOL(kvmi_singlestep_failed);
@@ -30,6 +30,7 @@
| BIT(KVMI_EVENT_TRAP) \
| BIT(KVMI_EVENT_PAUSE_VCPU) \
| BIT(KVMI_EVENT_PF) \
+ | BIT(KVMI_EVENT_SINGLESTEP) \
| BIT(KVMI_EVENT_XSETBV) \
)