@@ -1543,6 +1543,10 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
return -ENOTSUPP;
}
+#define __KVM_WANT_PERF_CALLBACKS
+#define kvm_arch_pmi_in_guest(vcpu) \
+ ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
+
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
@@ -8264,43 +8264,12 @@ static void kvm_timer_init(void)
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
-static inline bool kvm_pmi_in_guest(struct kvm_vcpu *vcpu)
-{
- return vcpu && vcpu->arch.handling_intr_from_guest;
-}
-
-static unsigned int kvm_guest_state(void)
-{
- struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
- unsigned int state;
-
- if (!kvm_pmi_in_guest(vcpu))
- return 0;
-
- state = PERF_GUEST_ACTIVE;
- if (static_call(kvm_x86_get_cpl)(vcpu))
- state |= PERF_GUEST_USER;
-
- return state;
-}
-
-static unsigned long kvm_guest_get_ip(void)
-{
- struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-
- /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
- if (WARN_ON_ONCE(!kvm_pmi_in_guest(vcpu)))
- return 0;
-
- return kvm_rip_read(vcpu);
-}
-
static unsigned int kvm_handle_intel_pt_intr(void)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
/* '0' on failure so that the !PT case can use a RET0 static call. */
- if (!kvm_pmi_in_guest(vcpu))
+ if (!kvm_arch_pmi_in_guest(vcpu))
return 0;
kvm_make_request(KVM_REQ_PMI, vcpu);
@@ -8309,12 +8278,6 @@ static unsigned int kvm_handle_intel_pt_intr(void)
return 1;
}
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
- .state = kvm_guest_state,
- .get_ip = kvm_guest_get_ip,
- .handle_intel_pt_intr = NULL,
-};
-
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
@@ -11068,9 +11031,11 @@ int kvm_arch_hardware_setup(void *opaque)
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
kvm_ops_static_call_update();
+ /* Temporary ugliness. */
if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest())
- kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr;
- perf_register_guest_info_callbacks(&kvm_guest_cbs);
+ kvm_register_perf_callbacks(kvm_handle_intel_pt_intr);
+ else
+ kvm_register_perf_callbacks(NULL);
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
supported_xss = 0;
@@ -11099,8 +11064,7 @@ int kvm_arch_hardware_setup(void *opaque)
void kvm_arch_hardware_unsetup(void)
{
- perf_unregister_guest_info_callbacks();
- kvm_guest_cbs.handle_intel_pt_intr = NULL;
+ kvm_unregister_perf_callbacks();
static_call(kvm_x86_hardware_unsetup)();
}
@@ -11727,6 +11691,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu->arch.preempted_in_kernel;
}
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return kvm_rip_read(vcpu);
+}
+
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -1163,6 +1163,18 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
}
#endif
+#ifdef __KVM_WANT_PERF_CALLBACKS
+
+void kvm_set_intel_pt_intr_handler(unsigned int (*handler)(void));
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+static inline void kvm_unregister_perf_callbacks(void)
+{
+ perf_unregister_guest_info_callbacks();
+}
+#endif
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
@@ -5460,6 +5460,46 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
return &kvm_running_vcpu;
}
+#ifdef __KVM_WANT_PERF_CALLBACKS
+static unsigned int kvm_guest_state(void)
+{
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+ unsigned int state;
+
+ if (!kvm_arch_pmi_in_guest(vcpu))
+ return 0;
+
+ state = PERF_GUEST_ACTIVE;
+ if (!kvm_arch_vcpu_in_kernel(vcpu))
+ state |= PERF_GUEST_USER;
+
+ return state;
+}
+
+static unsigned long kvm_guest_get_ip(void)
+{
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+
+ /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
+ if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
+ return 0;
+
+ return kvm_arch_vcpu_get_ip(vcpu);
+}
+
+static struct perf_guest_info_callbacks kvm_guest_cbs = {
+ .state = kvm_guest_state,
+ .get_ip = kvm_guest_get_ip,
+ .handle_intel_pt_intr = NULL,
+};
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
+{
+ kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
+ perf_register_guest_info_callbacks(&kvm_guest_cbs);
+}
+#endif
+
struct kvm_cpu_compat_check {
void *opaque;
int *ret;
Move x86's perf guest callbacks into common KVM, as they are semantically identical to arm64's callbacks (the only other such KVM callbacks). arm64 will convert to the common versions in a future patch. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/include/asm/kvm_host.h | 4 +++ arch/x86/kvm/x86.c | 53 +++++++-------------------------- include/linux/kvm_host.h | 12 ++++++++ virt/kvm/kvm_main.c | 40 +++++++++++++++++++++++++ 4 files changed, 67 insertions(+), 42 deletions(-)