@@ -549,6 +549,7 @@ the following events::
KVMI_EVENT_BREAKPOINT
KVMI_EVENT_CR
KVMI_EVENT_HYPERCALL
+ KVMI_EVENT_XSETBV
When an event is enabled, the introspection tool is notified and
must reply with: continue, retry, crash, etc. (see **Events** below).
@@ -1019,3 +1020,27 @@ other vCPU introspection event.
(``error_code``) and CR2 are sent to the introspection tool,
which should check if its exception has been injected or overridden.
+7. KVMI_EVENT_XSETBV
+--------------------
+
+:Architectures: x86
+:Versions: >= 1
+:Actions: CONTINUE, CRASH
+:Parameters:
+
+::
+
+ struct kvmi_event;
+
+:Returns:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_event_reply;
+
+This event is sent when the extended control register XCR0 is going
+to be changed and the introspection has been enabled for this event
+(see *KVMI_VCPU_CONTROL_EVENTS*).
+
+``kvmi_event`` is sent to the introspection tool.
@@ -30,6 +30,7 @@ bool kvmi_cr_event(struct kvm_vcpu *vcpu, unsigned int cr,
unsigned long old_value, unsigned long *new_value);
bool kvmi_cr3_intercepted(struct kvm_vcpu *vcpu);
bool kvmi_monitor_cr3w_intercept(struct kvm_vcpu *vcpu, bool enable);
+void kvmi_xsetbv_event(struct kvm_vcpu *vcpu);
#else /* CONFIG_KVM_INTROSPECTION */
@@ -41,6 +42,7 @@ static inline bool kvmi_cr_event(struct kvm_vcpu *vcpu, unsigned int cr,
static inline bool kvmi_cr3_intercepted(struct kvm_vcpu *vcpu) { return false; }
static inline bool kvmi_monitor_cr3w_intercept(struct kvm_vcpu *vcpu,
bool enable) { return false; }
+static inline void kvmi_xsetbv_event(struct kvm_vcpu *vcpu) { }
#endif /* CONFIG_KVM_INTROSPECTION */
@@ -662,3 +662,42 @@ void kvmi_arch_trap_event(struct kvm_vcpu *vcpu)
kvmi_handle_common_event_actions(vcpu->kvm, action, "TRAP");
}
}
+
+static u32 kvmi_send_xsetbv(struct kvm_vcpu *vcpu)
+{
+ int err, action;
+
+ err = kvmi_send_event(vcpu, KVMI_EVENT_XSETBV, NULL, 0,
+ NULL, 0, &action);
+ if (err)
+ return KVMI_EVENT_ACTION_CONTINUE;
+
+ return action;
+}
+
+static void __kvmi_xsetbv_event(struct kvm_vcpu *vcpu)
+{
+ u32 action;
+
+ action = kvmi_send_xsetbv(vcpu);
+ switch (action) {
+ case KVMI_EVENT_ACTION_CONTINUE:
+ break;
+ default:
+ kvmi_handle_common_event_actions(vcpu->kvm, action, "XSETBV");
+ }
+}
+
+void kvmi_xsetbv_event(struct kvm_vcpu *vcpu)
+{
+ struct kvm_introspection *kvmi;
+
+ kvmi = kvmi_get(vcpu->kvm);
+ if (!kvmi)
+ return;
+
+ if (is_event_enabled(vcpu, KVMI_EVENT_XSETBV))
+ __kvmi_xsetbv_event(vcpu);
+
+ kvmi_put(vcpu->kvm);
+}
@@ -865,6 +865,12 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
}
vcpu->arch.xcr0 = xcr0;
+#ifdef CONFIG_KVM_INTROSPECTION
+ if (index == 0 && xcr0 != old_xcr0)
+ kvmi_xsetbv_event(vcpu);
+#endif /* CONFIG_KVM_INTROSPECTION */
+
+
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
kvm_update_cpuid(vcpu);
return 0;
@@ -46,6 +46,7 @@ enum {
KVMI_EVENT_BREAKPOINT = 3,
KVMI_EVENT_CR = 4,
KVMI_EVENT_TRAP = 5,
+ KVMI_EVENT_XSETBV = 6,
KVMI_NUM_EVENTS
};
@@ -22,6 +22,8 @@
#define VCPU_ID 5
+#define X86_FEATURE_XSAVE (1<<26)
+
static int socket_pair[2];
#define Kvm_socket socket_pair[0]
#define Userspace_socket socket_pair[1]
@@ -54,6 +56,7 @@ enum {
GUEST_TEST_BP,
GUEST_TEST_CR,
GUEST_TEST_HYPERCALL,
+ GUEST_TEST_XSETBV,
};
#define GUEST_REQUEST_TEST() GUEST_SYNC(0)
@@ -85,6 +88,45 @@ static void guest_hypercall_test(void)
asm volatile(".byte 0x0f,0x01,0xc1");
}
+/* from fpu/internal.h */
+static u64 xgetbv(u32 index)
+{
+ u32 eax, edx;
+
+ asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
+ : "=a" (eax), "=d" (edx)
+ : "c" (index));
+ return eax + ((u64)edx << 32);
+}
+
+/* from fpu/internal.h */
+static void xsetbv(u32 index, u64 value)
+{
+ u32 eax = value;
+ u32 edx = value >> 32;
+
+ asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
+ : : "a" (eax), "d" (edx), "c" (index));
+}
+
+static void guest_xsetbv_test(void)
+{
+ const int SSE_BIT = 1 << 1;
+ const int AVX_BIT = 1 << 2;
+ u64 xcr0;
+
+ /* avoid #UD */
+ set_cr4(get_cr4() | X86_CR4_OSXSAVE);
+
+ xcr0 = xgetbv(0);
+ if (xcr0 & AVX_BIT)
+ xcr0 &= ~AVX_BIT;
+ else
+ xcr0 |= (AVX_BIT | SSE_BIT);
+
+ xsetbv(0, xcr0);
+}
+
static void guest_code(void)
{
while (true) {
@@ -100,6 +142,9 @@ static void guest_code(void)
case GUEST_TEST_HYPERCALL:
guest_hypercall_test();
break;
+ case GUEST_TEST_XSETBV:
+ guest_xsetbv_test();
+ break;
}
GUEST_SIGNAL_TEST_DONE();
}
@@ -1243,6 +1288,43 @@ static void test_cmd_vm_get_max_gfn(void)
DEBUG("max_gfn: 0x%llx\n", rpl.gfn);
}
+static void test_event_xsetbv(struct kvm_vm *vm)
+{
+ struct vcpu_worker_data data = {
+ .vm = vm,
+ .vcpu_id = VCPU_ID,
+ .test_id = GUEST_TEST_XSETBV,
+ };
+ __u16 event_id = KVMI_EVENT_XSETBV;
+ struct kvm_cpuid_entry2 *entry;
+ struct vcpu_reply rpl = {};
+ struct kvmi_msg_hdr hdr;
+ pthread_t vcpu_thread;
+ struct kvmi_event ev;
+
+ entry = kvm_get_supported_cpuid_entry(1);
+ if (!(entry->ecx & X86_FEATURE_XSAVE)) {
+ DEBUG("XSAVE is not supported, ecx 0x%x, skipping xsetbv test\n",
+ entry->ecx);
+ return;
+ }
+
+ enable_vcpu_event(vm, event_id);
+
+ vcpu_thread = start_vcpu_worker(&data);
+
+ receive_event(&hdr, &ev, sizeof(ev), event_id);
+
+ DEBUG("XSETBV event, rip 0x%llx\n", ev.arch.regs.rip);
+
+ reply_to_event(&hdr, &ev, KVMI_EVENT_ACTION_CONTINUE,
+ &rpl, sizeof(rpl));
+
+ stop_vcpu_worker(vcpu_thread, &data);
+
+ disable_vcpu_event(vm, event_id);
+}
+
static void test_introspection(struct kvm_vm *vm)
{
srandom(time(0));
@@ -1268,6 +1350,7 @@ static void test_introspection(struct kvm_vm *vm)
test_cmd_vcpu_control_cr(vm);
test_cmd_vcpu_inject_exception(vm);
test_cmd_vm_get_max_gfn();
+ test_event_xsetbv(vm);
unhook_introspection(vm);
}
@@ -83,6 +83,7 @@ static void setup_known_events(void)
set_bit(KVMI_EVENT_HYPERCALL, Kvmi_known_vcpu_events);
set_bit(KVMI_EVENT_PAUSE_VCPU, Kvmi_known_vcpu_events);
set_bit(KVMI_EVENT_TRAP, Kvmi_known_vcpu_events);
+ set_bit(KVMI_EVENT_XSETBV, Kvmi_known_vcpu_events);
bitmap_or(Kvmi_known_events, Kvmi_known_vm_events,
Kvmi_known_vcpu_events, KVMI_NUM_EVENTS);