@@ -541,6 +541,7 @@ the following events::
KVMI_VCPU_EVENT_BREAKPOINT
KVMI_VCPU_EVENT_CR
KVMI_VCPU_EVENT_HYPERCALL
+ KVMI_VCPU_EVENT_XSETBV
When an event is enabled, the introspection tool is notified and
must reply with: continue, retry, crash, etc. (see **Events** below).
@@ -1061,3 +1062,36 @@ other vCPU introspection event.
(``nr``), exception code (``error_code``) and ``address`` are sent to
the introspection tool, which should check if its exception has been
injected or overridden.
+
+7. KVMI_VCPU_EVENT_XSETBV
+-------------------------
+
+:Architectures: x86
+:Versions: >= 1
+:Actions: CONTINUE, CRASH
+:Parameters:
+
+::
+
+ struct kvmi_vcpu_event;
+ struct kvmi_vcpu_event_xsetbv {
+ __u8 xcr;
+ __u8 padding[7];
+ __u64 old_value;
+ __u64 new_value;
+ };
+
+:Returns:
+
+::
+
+ struct kvmi_vcpu_hdr;
+ struct kvmi_vcpu_event_reply;
+
+This event is sent when an extended control register XCR is going
+to be changed and the introspection has been enabled for this event
+(see *KVMI_VCPU_CONTROL_EVENTS*).
+
+``kvmi_vcpu_event`` (with the vCPU state), the extended control register
+number (``xcr``), the old value (``old_value``) and the new value
+(``new_value``) are sent to the introspection tool.
@@ -46,6 +46,8 @@ bool kvmi_cr_event(struct kvm_vcpu *vcpu, unsigned int cr,
bool kvmi_cr3_intercepted(struct kvm_vcpu *vcpu);
bool kvmi_monitor_cr3w_intercept(struct kvm_vcpu *vcpu, bool enable);
void kvmi_enter_guest(struct kvm_vcpu *vcpu);
+void kvmi_xsetbv_event(struct kvm_vcpu *vcpu, u8 xcr,
+ u64 old_value, u64 new_value);
#else /* CONFIG_KVM_INTROSPECTION */
@@ -59,6 +61,8 @@ static inline bool kvmi_cr3_intercepted(struct kvm_vcpu *vcpu) { return false; }
static inline bool kvmi_monitor_cr3w_intercept(struct kvm_vcpu *vcpu,
bool enable) { return false; }
static inline void kvmi_enter_guest(struct kvm_vcpu *vcpu) { }
+static inline void kvmi_xsetbv_event(struct kvm_vcpu *vcpu, u8 xcr,
+ u64 old_value, u64 new_value) { }
#endif /* CONFIG_KVM_INTROSPECTION */
@@ -95,4 +95,11 @@ struct kvmi_vcpu_inject_exception {
__u64 address;
};
+struct kvmi_vcpu_event_xsetbv {
+ __u8 xcr;
+ __u8 padding[7];
+ __u64 old_value;
+ __u64 new_value;
+};
+
#endif /* _UAPI_ASM_X86_KVMI_H */
@@ -16,6 +16,7 @@ void kvmi_arch_init_vcpu_events_mask(unsigned long *supported)
set_bit(KVMI_VCPU_EVENT_CR, supported);
set_bit(KVMI_VCPU_EVENT_HYPERCALL, supported);
set_bit(KVMI_VCPU_EVENT_TRAP, supported);
+ set_bit(KVMI_VCPU_EVENT_XSETBV, supported);
}
static unsigned int kvmi_vcpu_mode(const struct kvm_vcpu *vcpu,
@@ -567,3 +568,32 @@ void kvmi_arch_send_pending_event(struct kvm_vcpu *vcpu)
kvmi_send_trap_event(vcpu);
}
}
+
+static void __kvmi_xsetbv_event(struct kvm_vcpu *vcpu, u8 xcr,
+ u64 old_value, u64 new_value)
+{
+ u32 action;
+
+ action = kvmi_msg_send_vcpu_xsetbv(vcpu, xcr, old_value, new_value);
+ switch (action) {
+ case KVMI_EVENT_ACTION_CONTINUE:
+ break;
+ default:
+ kvmi_handle_common_event_actions(vcpu, action);
+ }
+}
+
+void kvmi_xsetbv_event(struct kvm_vcpu *vcpu, u8 xcr,
+ u64 old_value, u64 new_value)
+{
+ struct kvm_introspection *kvmi;
+
+ kvmi = kvmi_get(vcpu->kvm);
+ if (!kvmi)
+ return;
+
+ if (is_vcpu_event_enabled(vcpu, KVMI_VCPU_EVENT_XSETBV))
+ __kvmi_xsetbv_event(vcpu, xcr, old_value, new_value);
+
+ kvmi_put(vcpu->kvm);
+}
@@ -14,5 +14,7 @@ int kvmi_arch_cmd_vcpu_inject_exception(struct kvm_vcpu *vcpu,
u32 kvmi_msg_send_vcpu_cr(struct kvm_vcpu *vcpu, u32 cr, u64 old_value,
u64 new_value, u64 *ret_value);
u32 kvmi_msg_send_vcpu_trap(struct kvm_vcpu *vcpu);
+u32 kvmi_msg_send_vcpu_xsetbv(struct kvm_vcpu *vcpu, u8 xcr,
+ u64 old_value, u64 new_value);
#endif
@@ -232,3 +232,23 @@ u32 kvmi_msg_send_vcpu_trap(struct kvm_vcpu *vcpu)
return action;
}
+
+u32 kvmi_msg_send_vcpu_xsetbv(struct kvm_vcpu *vcpu, u8 xcr,
+ u64 old_value, u64 new_value)
+{
+ struct kvmi_vcpu_event_xsetbv e;
+ u32 action;
+ int err;
+
+ memset(&e, 0, sizeof(e));
+ e.xcr = xcr;
+ e.old_value = old_value;
+ e.new_value = new_value;
+
+ err = kvmi_send_vcpu_event(vcpu, KVMI_VCPU_EVENT_XSETBV,
+ &e, sizeof(e), NULL, 0, &action);
+ if (err)
+ action = KVMI_EVENT_ACTION_CONTINUE;
+
+ return action;
+}
@@ -955,6 +955,12 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
}
vcpu->arch.xcr0 = xcr0;
+#ifdef CONFIG_KVM_INTROSPECTION
+ if (index == 0 && xcr0 != old_xcr0)
+ kvmi_xsetbv_event(vcpu, 0, old_xcr0, xcr0);
+#endif /* CONFIG_KVM_INTROSPECTION */
+
+
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
kvm_update_cpuid_runtime(vcpu);
return 0;
@@ -63,6 +63,7 @@ enum {
KVMI_VCPU_EVENT_BREAKPOINT = KVMI_VCPU_EVENT_ID(2),
KVMI_VCPU_EVENT_CR = KVMI_VCPU_EVENT_ID(3),
KVMI_VCPU_EVENT_TRAP = KVMI_VCPU_EVENT_ID(4),
+ KVMI_VCPU_EVENT_XSETBV = KVMI_VCPU_EVENT_ID(5),
KVMI_NEXT_VCPU_EVENT
};
@@ -23,6 +23,8 @@
#define VCPU_ID 1
+#define X86_FEATURE_XSAVE (1<<26)
+
static int socket_pair[2];
#define Kvm_socket socket_pair[0]
#define Userspace_socket socket_pair[1]
@@ -57,6 +59,7 @@ enum {
GUEST_TEST_BP,
GUEST_TEST_CR,
GUEST_TEST_HYPERCALL,
+ GUEST_TEST_XSETBV,
};
#define GUEST_REQUEST_TEST() GUEST_SYNC(0)
@@ -92,6 +95,45 @@ static void guest_hypercall_test(void)
asm volatile(".byte 0x0f,0x01,0xc1");
}
+/* from fpu/internal.h */
+static u64 xgetbv(u32 index)
+{
+ u32 eax, edx;
+
+ asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
+ : "=a" (eax), "=d" (edx)
+ : "c" (index));
+ return eax + ((u64)edx << 32);
+}
+
+/* from fpu/internal.h */
+static void xsetbv(u32 index, u64 value)
+{
+ u32 eax = value;
+ u32 edx = value >> 32;
+
+ asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
+ : : "a" (eax), "d" (edx), "c" (index));
+}
+
+static void guest_xsetbv_test(void)
+{
+ const int SSE_BIT = 1 << 1;
+ const int AVX_BIT = 1 << 2;
+ u64 xcr0;
+
+ /* avoid #UD */
+ set_cr4(get_cr4() | X86_CR4_OSXSAVE);
+
+ xcr0 = xgetbv(0);
+ if (xcr0 & AVX_BIT)
+ xcr0 &= ~AVX_BIT;
+ else
+ xcr0 |= (AVX_BIT | SSE_BIT);
+
+ xsetbv(0, xcr0);
+}
+
static void guest_code(void)
{
while (true) {
@@ -107,6 +149,9 @@ static void guest_code(void)
case GUEST_TEST_HYPERCALL:
guest_hypercall_test();
break;
+ case GUEST_TEST_XSETBV:
+ guest_xsetbv_test();
+ break;
}
GUEST_SIGNAL_TEST_DONE();
}
@@ -1333,6 +1378,44 @@ static void test_cmd_vm_get_max_gfn(void)
pr_debug("max_gfn: 0x%llx\n", rpl.gfn);
}
+static void test_event_xsetbv(struct kvm_vm *vm)
+{
+ struct vcpu_worker_data data = {
+ .vm = vm,
+ .vcpu_id = VCPU_ID,
+ .test_id = GUEST_TEST_XSETBV,
+ };
+ __u16 event_id = KVMI_VCPU_EVENT_XSETBV;
+ struct kvm_cpuid_entry2 *entry;
+ struct vcpu_reply rpl = {};
+ struct kvmi_msg_hdr hdr;
+ pthread_t vcpu_thread;
+ struct {
+ struct vcpu_event vcpu_ev;
+ struct kvmi_vcpu_event_xsetbv xsetbv;
+ } ev;
+
+ entry = kvm_get_supported_cpuid_entry(1);
+ if (!(entry->ecx & X86_FEATURE_XSAVE)) {
+ print_skip("XSAVE not supported, ecx 0x%x", entry->ecx);
+ return;
+ }
+
+ enable_vcpu_event(vm, event_id);
+ vcpu_thread = start_vcpu_worker(&data);
+
+ receive_vcpu_event(&hdr, &ev.vcpu_ev, sizeof(ev), event_id);
+
+ pr_debug("XSETBV event, XCR%u, old 0x%llx, new 0x%llx\n",
+ ev.xsetbv.xcr, ev.xsetbv.old_value, ev.xsetbv.new_value);
+
+ reply_to_event(&hdr, &ev.vcpu_ev, KVMI_EVENT_ACTION_CONTINUE,
+ &rpl, sizeof(rpl));
+
+ wait_vcpu_worker(vcpu_thread);
+ disable_vcpu_event(vm, event_id);
+}
+
static void test_introspection(struct kvm_vm *vm)
{
srandom(time(0));
@@ -1359,6 +1442,7 @@ static void test_introspection(struct kvm_vm *vm)
test_cmd_vcpu_control_cr(vm);
test_cmd_vcpu_inject_exception(vm);
test_cmd_vm_get_max_gfn();
+ test_event_xsetbv(vm);
unhook_introspection(vm);
}