@@ -1049,6 +1049,38 @@ In order to 'forget' an address, all three bits ('rwx') must be set.
* -KVM_EAGAIN - the selected vCPU can't be introspected yet
* -KVM_ENOMEM - there is not enough memory to add the page tracking structures
+24. KVMI_VCPU_CONTROL_SINGLESTEP
+--------------------------------
+
+:Architectures: x86 (vmx)
+:Versions: >= 1
+:Parameters:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_vcpu_control_singlestep {
+ __u8 enable;
+ __u8 padding[7];
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_error_code;
+
+Enables/disables singlestep for the selected vCPU.
+
+The introspection tool should use *KVMI_GET_VERSION*, to check
+if the hardware supports singlestep (see **KVMI_GET_VERSION**).
+
+:Errors:
+
+* -KVM_EOPNOTSUPP - the hardware doesn't support singlestep
+* -KVM_EINVAL - the padding is not zero
+* -KVM_EAGAIN - the selected vCPU can't be introspected yet
+
Events
======
@@ -1355,3 +1355,21 @@ void kvmi_arch_features(struct kvmi_features *feat)
{
feat->singlestep = !!kvm_x86_ops.control_singlestep;
}
+
+bool kvmi_arch_start_singlestep(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_x86_ops.control_singlestep)
+ return false;
+
+ kvm_x86_ops.control_singlestep(vcpu, true);
+ return true;
+}
+
+bool kvmi_arch_stop_singlestep(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_x86_ops.control_singlestep)
+ return false;
+
+ kvm_x86_ops.control_singlestep(vcpu, false);
+ return true;
+}
@@ -8515,9 +8515,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
- inject_pending_event(vcpu, &req_immediate_exit);
- if (req_int_win)
- kvm_x86_ops.enable_irq_window(vcpu);
+ if (!kvmi_vcpu_running_singlestep(vcpu)) {
+ /*
+ * We cannot inject events during single-stepping.
+ * Try again later.
+ */
+ inject_pending_event(vcpu, &req_immediate_exit);
+ if (req_int_win)
+ kvm_x86_ops.enable_irq_window(vcpu);
+ }
if (kvm_lapic_enabled(vcpu)) {
update_cr8_intercept(vcpu);
@@ -45,6 +45,10 @@ struct kvm_vcpu_introspection {
bool pending;
bool send_event;
} exception;
+
+ struct {
+ bool loop;
+ } singlestep;
};
struct kvm_introspection {
@@ -89,6 +93,7 @@ void kvmi_handle_requests(struct kvm_vcpu *vcpu);
bool kvmi_hypercall_event(struct kvm_vcpu *vcpu);
bool kvmi_breakpoint_event(struct kvm_vcpu *vcpu, u64 gva, u8 insn_len);
bool kvmi_enter_guest(struct kvm_vcpu *vcpu);
+bool kvmi_vcpu_running_singlestep(struct kvm_vcpu *vcpu);
#else
@@ -105,6 +110,8 @@ static inline bool kvmi_breakpoint_event(struct kvm_vcpu *vcpu, u64 gva,
{ return true; }
static inline bool kvmi_enter_guest(struct kvm_vcpu *vcpu)
{ return true; }
+static inline bool kvmi_vcpu_running_singlestep(struct kvm_vcpu *vcpu)
+ { return false; }
#endif /* CONFIG_KVM_INTROSPECTION */
@@ -47,6 +47,8 @@ enum {
KVMI_VM_SET_PAGE_ACCESS = 23,
+ KVMI_VCPU_CONTROL_SINGLESTEP = 24,
+
KVMI_NUM_MESSAGES
};
@@ -189,6 +191,11 @@ struct kvmi_vm_set_page_access {
struct kvmi_page_access_entry entries[0];
};
+struct kvmi_vcpu_control_singlestep {
+ __u8 enable;
+ __u8 padding[7];
+};
+
struct kvmi_event {
__u16 size;
__u16 vcpu;
@@ -1940,6 +1940,51 @@ static void test_event_pf(struct kvm_vm *vm)
test_pf(vm, cbk_test_event_pf);
}
+static void cmd_vcpu_singlestep(struct kvm_vm *vm, __u8 enable, __u8 padding,
+ int expected_err)
+{
+ struct {
+ struct kvmi_msg_hdr hdr;
+ struct kvmi_vcpu_hdr vcpu_hdr;
+ struct kvmi_vcpu_control_singlestep cmd;
+ } req = {};
+ int r;
+
+ req.cmd.enable = enable;
+ req.cmd.padding[6] = padding;
+
+ r = do_vcpu0_command(vm, KVMI_VCPU_CONTROL_SINGLESTEP,
+ &req.hdr, sizeof(req), NULL, 0);
+ TEST_ASSERT(r == expected_err,
+ "KVMI_VCPU_CONTROL_SINGLESTEP failed, error %d(%s), expected error %d\n",
+ -r, kvm_strerror(-r), expected_err);
+}
+
+static void test_supported_singlestep(struct kvm_vm *vm)
+{
+ __u8 disable = 0, enable = 1, enable_inval = 2;
+ __u8 padding = 1, no_padding = 0;
+
+ cmd_vcpu_singlestep(vm, enable, no_padding, 0);
+ cmd_vcpu_singlestep(vm, disable, no_padding, 0);
+
+ cmd_vcpu_singlestep(vm, enable, padding, -KVM_EINVAL);
+ cmd_vcpu_singlestep(vm, enable_inval, no_padding, -KVM_EINVAL);
+}
+
+static void test_unsupported_singlestep(struct kvm_vm *vm)
+{
+ cmd_vcpu_singlestep(vm, 1, 0, -KVM_EOPNOTSUPP);
+}
+
+static void test_cmd_vcpu_control_singlestep(struct kvm_vm *vm)
+{
+ if (features.singlestep)
+ test_supported_singlestep(vm);
+ else
+ test_unsupported_singlestep(vm);
+}
+
static void test_introspection(struct kvm_vm *vm)
{
srandom(time(0));
@@ -1974,6 +2019,7 @@ static void test_introspection(struct kvm_vm *vm)
test_cmd_vcpu_control_msr(vm);
test_cmd_vm_set_page_access(vm);
test_event_pf(vm);
+ test_cmd_vcpu_control_singlestep(vm);
unhook_introspection(vm);
}
@@ -429,6 +429,11 @@ static void kvmi_job_release_vcpu(struct kvm_vcpu *vcpu, void *ctx)
atomic_set(&vcpui->pause_requests, 0);
vcpui->waiting_for_reply = false;
+
+ if (vcpui->singlestep.loop) {
+ kvmi_arch_stop_singlestep(vcpu);
+ vcpui->singlestep.loop = false;
+ }
}
static void kvmi_release_vcpus(struct kvm *kvm)
@@ -1047,7 +1052,9 @@ bool kvmi_enter_guest(struct kvm_vcpu *vcpu)
vcpui = VCPUI(vcpu);
- if (vcpui->exception.pending) {
+ if (vcpui->singlestep.loop) {
+ kvmi_arch_start_singlestep(vcpu);
+ } else if (vcpui->exception.pending) {
kvmi_inject_pending_exception(vcpu);
r = false;
}
@@ -1297,3 +1304,20 @@ void kvmi_remove_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
+
+bool kvmi_vcpu_running_singlestep(struct kvm_vcpu *vcpu)
+{
+ struct kvm_introspection *kvmi;
+ bool ret;
+
+ kvmi = kvmi_get(vcpu->kvm);
+ if (!kvmi)
+ return false;
+
+ ret = VCPUI(vcpu)->singlestep.loop;
+
+ kvmi_put(vcpu->kvm);
+
+ return ret;
+}
+EXPORT_SYMBOL(kvmi_vcpu_running_singlestep);
@@ -139,5 +139,7 @@ void kvmi_arch_update_page_tracking(struct kvm *kvm,
void kvmi_arch_hook(struct kvm *kvm);
void kvmi_arch_unhook(struct kvm *kvm);
void kvmi_arch_features(struct kvmi_features *feat);
+bool kvmi_arch_start_singlestep(struct kvm_vcpu *vcpu);
+bool kvmi_arch_stop_singlestep(struct kvm_vcpu *vcpu);
#endif
@@ -609,6 +609,39 @@ static int handle_vcpu_control_msr(const struct kvmi_vcpu_msg_job *job,
return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
}
+static int handle_vcpu_control_singlestep(const struct kvmi_vcpu_msg_job *job,
+ const struct kvmi_msg_hdr *msg,
+ const void *_req)
+{
+ const struct kvmi_vcpu_control_singlestep *req = _req;
+ struct kvm_vcpu *vcpu = job->vcpu;
+ int ec = -KVM_EINVAL;
+ bool done;
+ int i;
+
+ if (req->enable > 1)
+ goto reply;
+
+ for (i = 0; i < sizeof(req->padding); i++)
+ if (req->padding[i])
+ goto reply;
+
+ if (req->enable)
+ done = kvmi_arch_start_singlestep(vcpu);
+ else
+ done = kvmi_arch_stop_singlestep(vcpu);
+
+ if (done) {
+ ec = 0;
+ VCPUI(vcpu)->singlestep.loop = !!req->enable;
+ } else {
+ ec = -KVM_EOPNOTSUPP;
+ }
+
+reply:
+ return kvmi_msg_vcpu_reply(job, msg, ec, NULL, 0);
+}
+
/*
* These functions are executed from the vCPU thread. The receiving thread
* passes the messages using a newly allocated 'struct kvmi_vcpu_msg_job'
@@ -617,19 +650,20 @@ static int handle_vcpu_control_msr(const struct kvmi_vcpu_msg_job *job,
*/
static int(*const msg_vcpu[])(const struct kvmi_vcpu_msg_job *,
const struct kvmi_msg_hdr *, const void *) = {
- [KVMI_EVENT] = handle_vcpu_event_reply,
- [KVMI_VCPU_CONTROL_CR] = handle_vcpu_control_cr,
- [KVMI_VCPU_CONTROL_EVENTS] = handle_vcpu_control_events,
- [KVMI_VCPU_CONTROL_MSR] = handle_vcpu_control_msr,
- [KVMI_VCPU_GET_CPUID] = handle_vcpu_get_cpuid,
- [KVMI_VCPU_GET_INFO] = handle_vcpu_get_info,
- [KVMI_VCPU_GET_MTRR_TYPE] = handle_vcpu_get_mtrr_type,
- [KVMI_VCPU_GET_REGISTERS] = handle_vcpu_get_registers,
- [KVMI_VCPU_GET_XCR] = handle_vcpu_get_xcr,
- [KVMI_VCPU_GET_XSAVE] = handle_vcpu_get_xsave,
- [KVMI_VCPU_INJECT_EXCEPTION] = handle_vcpu_inject_exception,
- [KVMI_VCPU_SET_REGISTERS] = handle_vcpu_set_registers,
- [KVMI_VCPU_SET_XSAVE] = handle_vcpu_set_xsave,
+ [KVMI_EVENT] = handle_vcpu_event_reply,
+ [KVMI_VCPU_CONTROL_CR] = handle_vcpu_control_cr,
+ [KVMI_VCPU_CONTROL_EVENTS] = handle_vcpu_control_events,
+ [KVMI_VCPU_CONTROL_MSR] = handle_vcpu_control_msr,
+ [KVMI_VCPU_CONTROL_SINGLESTEP] = handle_vcpu_control_singlestep,
+ [KVMI_VCPU_GET_CPUID] = handle_vcpu_get_cpuid,
+ [KVMI_VCPU_GET_INFO] = handle_vcpu_get_info,
+ [KVMI_VCPU_GET_MTRR_TYPE] = handle_vcpu_get_mtrr_type,
+ [KVMI_VCPU_GET_REGISTERS] = handle_vcpu_get_registers,
+ [KVMI_VCPU_GET_XCR] = handle_vcpu_get_xcr,
+ [KVMI_VCPU_GET_XSAVE] = handle_vcpu_get_xsave,
+ [KVMI_VCPU_INJECT_EXCEPTION] = handle_vcpu_inject_exception,
+ [KVMI_VCPU_SET_REGISTERS] = handle_vcpu_set_registers,
+ [KVMI_VCPU_SET_XSAVE] = handle_vcpu_set_xsave,
};
static bool is_vcpu_command(u16 id)