@@ -36,6 +36,7 @@
#include <asm/hyperv-tlfs.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
+#define __KVM_WANT_PERF_CALLBACKS
#define KVM_MAX_VCPUS 288
#define KVM_SOFT_MAX_VCPUS 240
@@ -8264,32 +8264,6 @@ static void kvm_timer_init(void)
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
-static int kvm_is_in_guest(void)
-{
- /* x86's callbacks are registered only when handling a guest NMI. */
- return true;
-}
-
-static int kvm_is_user_mode(void)
-{
- struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-
- if (WARN_ON_ONCE(!vcpu))
- return 0;
-
- return static_call(kvm_x86_get_cpl)(vcpu) != 0;
-}
-
-static unsigned long kvm_get_guest_ip(void)
-{
- struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-
- if (WARN_ON_ONCE(!vcpu))
- return 0;
-
- return kvm_rip_read(vcpu);
-}
-
static void kvm_handle_intel_pt_intr(void)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
@@ -8302,19 +8276,6 @@ static void kvm_handle_intel_pt_intr(void)
(unsigned long *)&vcpu->arch.pmu.global_status);
}
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
- .is_in_guest = kvm_is_in_guest,
- .is_user_mode = kvm_is_user_mode,
- .get_guest_ip = kvm_get_guest_ip,
- .handle_intel_pt_intr = NULL,
-};
-
-void kvm_register_perf_callbacks(void)
-{
- __perf_register_guest_info_callbacks(&kvm_guest_cbs);
-}
-EXPORT_SYMBOL_GPL(kvm_register_perf_callbacks);
-
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
@@ -11069,7 +11030,7 @@ int kvm_arch_hardware_setup(void *opaque)
kvm_ops_static_call_update();
if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest())
- kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr;
+ kvm_set_intel_pt_intr_handler(kvm_handle_intel_pt_intr);
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
supported_xss = 0;
@@ -11098,7 +11059,7 @@ int kvm_arch_hardware_setup(void *opaque)
void kvm_arch_hardware_unsetup(void)
{
- kvm_guest_cbs.handle_intel_pt_intr = NULL;
+ kvm_set_intel_pt_intr_handler(NULL);
static_call(kvm_x86_hardware_unsetup)();
}
@@ -11725,6 +11686,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu->arch.preempted_in_kernel;
}
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return kvm_rip_read(vcpu);
+}
+
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -387,12 +387,6 @@ static inline bool kvm_cstate_in_guest(struct kvm *kvm)
return kvm->arch.cstate_in_guest;
}
-void kvm_register_perf_callbacks(void);
-static inline void kvm_unregister_perf_callbacks(void)
-{
- __perf_unregister_guest_info_callbacks();
-}
-
static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu, bool is_nmi)
{
WRITE_ONCE(vcpu->arch.handling_nmi_from_guest, is_nmi);
@@ -1163,6 +1163,18 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
}
#endif
+#ifdef __KVM_WANT_PERF_CALLBACKS
+
+void kvm_set_intel_pt_intr_handler(void (*handler)(void));
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(void);
+static inline void kvm_unregister_perf_callbacks(void)
+{
+ __perf_unregister_guest_info_callbacks();
+}
+#endif
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
@@ -5460,6 +5460,52 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
return &kvm_running_vcpu;
}
+#ifdef __KVM_WANT_PERF_CALLBACKS
+static int kvm_is_in_guest(void)
+{
+ /* Registration of KVM's callback signifies "in guest". */
+ return true;
+}
+
+static int kvm_is_user_mode(void)
+{
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+
+ if (WARN_ON_ONCE(!vcpu))
+ return 0;
+
+ return !kvm_arch_vcpu_in_kernel(vcpu);
+}
+
+static unsigned long kvm_get_guest_ip(void)
+{
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+
+ if (WARN_ON_ONCE(!vcpu))
+ return 0;
+
+ return kvm_arch_vcpu_get_ip(vcpu);
+}
+
+static struct perf_guest_info_callbacks kvm_guest_cbs = {
+ .is_in_guest = kvm_is_in_guest,
+ .is_user_mode = kvm_is_user_mode,
+ .get_guest_ip = kvm_get_guest_ip,
+ .handle_intel_pt_intr = NULL,
+};
+
+void kvm_set_intel_pt_intr_handler(void (*handler)(void))
+{
+ kvm_guest_cbs.handle_intel_pt_intr = handler;
+}
+
+void kvm_register_perf_callbacks(void)
+{
+ __perf_register_guest_info_callbacks(&kvm_guest_cbs);
+}
+EXPORT_SYMBOL_GPL(kvm_register_perf_callbacks);
+#endif
+
struct kvm_cpu_compat_check {
void *opaque;
int *ret;
Move x86's perf guest callbacks into common KVM, as they are semantically identical to arm64's callbacks (the only other such KVM callbacks). arm64 will convert to the common versions in a future patch. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 48 +++++---------------------------- arch/x86/kvm/x86.h | 6 ----- include/linux/kvm_host.h | 12 +++++++++ virt/kvm/kvm_main.c | 46 +++++++++++++++++++++++++++++++ 5 files changed, 66 insertions(+), 47 deletions(-)