@@ -548,7 +548,7 @@ void xen_pmu_init(int cpu)
per_cpu(xenpmu_shared, cpu).flags = 0;
if (cpu == 0) {
- perf_register_guest_info_callbacks(&xen_guest_cbs);
+ perf_register_guest_info_callbacks_all_cpus(&xen_guest_cbs);
xen_pmu_arch_init();
}
@@ -1171,7 +1171,7 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
void kvm_register_perf_callbacks(void);
static inline void kvm_unregister_perf_callbacks(void)
{
- __perf_unregister_guest_info_callbacks();
+ perf_unregister_guest_info_callbacks();
}
#endif
@@ -1238,10 +1238,9 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
#ifdef CONFIG_HAVE_GUEST_PERF_EVENTS
DECLARE_PER_CPU(struct perf_guest_info_callbacks *, perf_guest_cbs);
-extern void __perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
-extern void __perf_unregister_guest_info_callbacks(void);
-extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
extern void perf_unregister_guest_info_callbacks(void);
+extern void perf_register_guest_info_callbacks_all_cpus(struct perf_guest_info_callbacks *cbs);
#endif /* CONFIG_HAVE_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
@@ -1486,9 +1485,7 @@ static inline void
perf_bp_event(struct perf_event *event, void *data) { }
#ifdef CONFIG_HAVE_GUEST_PERF_EVENTS
-static inline void perf_register_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { }
-static inline void perf_unregister_guest_info_callbacks(void) { }
+extern void perf_register_guest_info_callbacks_all_cpus(struct perf_guest_info_callbacks *cbs);
#endif
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
@@ -6485,35 +6485,26 @@ static void perf_pending_event(struct irq_work *entry)
#ifdef CONFIG_HAVE_GUEST_PERF_EVENTS
DEFINE_PER_CPU(struct perf_guest_info_callbacks *, perf_guest_cbs);
-void __perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
-{
- __this_cpu_write(perf_guest_cbs, cbs);
-}
-EXPORT_SYMBOL_GPL(__perf_register_guest_info_callbacks);
-
-void __perf_unregister_guest_info_callbacks(void)
-{
- __this_cpu_write(perf_guest_cbs, NULL);
-}
-EXPORT_SYMBOL_GPL(__perf_unregister_guest_info_callbacks);
-
void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(perf_guest_cbs, cpu) = cbs;
+ __this_cpu_write(perf_guest_cbs, cbs);
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
void perf_unregister_guest_info_callbacks(void)
{
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(perf_guest_cbs, cpu) = NULL;
+ __this_cpu_write(perf_guest_cbs, NULL);
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
+
+void perf_register_guest_info_callbacks_all_cpus(struct perf_guest_info_callbacks *cbs)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(perf_guest_cbs, cpu) = cbs;
+}
+EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks_all_cpus);
#endif
static void
@@ -5502,7 +5502,7 @@ EXPORT_SYMBOL_GPL(kvm_set_intel_pt_intr_handler);
void kvm_register_perf_callbacks(void)
{
- __perf_register_guest_info_callbacks(&kvm_guest_cbs);
+ perf_register_guest_info_callbacks(&kvm_guest_cbs);
}
EXPORT_SYMBOL_GPL(kvm_register_perf_callbacks);
#endif
Drop the helper that allows bulk unregistering of the per-CPU callbacks now that KVM, the only entity that actually unregisters callbacks, uses the per-CPU helpers. Bulk unregistering is inherently unsafe as there are no protections against nullifying a pointer for a CPU that is using said pointer in a PMI handler. Opportunistically tweak names to better reflect reality. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/xen/pmu.c | 2 +- include/linux/kvm_host.h | 2 +- include/linux/perf_event.h | 9 +++------ kernel/events/core.c | 31 +++++++++++-------------------- virt/kvm/kvm_main.c | 2 +- 5 files changed, 17 insertions(+), 29 deletions(-)