diff mbox series

[v3,15/16] KVM: arm64: Drop perf.c and fold its tiny bits of code into arm.c / pmu.c

Message ID 20210922000533.713300-16-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series perf: KVM: Fix, optimize, and clean up callbacks | expand

Commit Message

Sean Christopherson Sept. 22, 2021, 12:05 a.m. UTC
Call KVM's (un)register perf callbacks helpers directly from arm.c, and
move the PMU bits into pmu.c and rename the related helper accordingly.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/arm64/include/asm/kvm_host.h |  3 ---
 arch/arm64/kvm/Makefile           |  2 +-
 arch/arm64/kvm/arm.c              |  6 ++++--
 arch/arm64/kvm/perf.c             | 27 ---------------------------
 arch/arm64/kvm/pmu.c              |  8 ++++++++
 include/kvm/arm_pmu.h             |  1 +
 6 files changed, 14 insertions(+), 33 deletions(-)
 delete mode 100644 arch/arm64/kvm/perf.c

Comments

Marc Zyngier Oct. 11, 2021, 9:44 a.m. UTC | #1
On Wed, 22 Sep 2021 01:05:32 +0100,
Sean Christopherson <seanjc@google.com> wrote:
> 
> Call KVM's (un)register perf callbacks helpers directly from arm.c, and
> move the PMU bits into pmu.c and rename the related helper accordingly.
> 
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/arm64/include/asm/kvm_host.h |  3 ---
>  arch/arm64/kvm/Makefile           |  2 +-
>  arch/arm64/kvm/arm.c              |  6 ++++--
>  arch/arm64/kvm/perf.c             | 27 ---------------------------
>  arch/arm64/kvm/pmu.c              |  8 ++++++++
>  include/kvm/arm_pmu.h             |  1 +
>  6 files changed, 14 insertions(+), 33 deletions(-)
>  delete mode 100644 arch/arm64/kvm/perf.c
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 828b6eaa2c56..f141ac65f4f1 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -670,9 +670,6 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
>  int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
>  int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
>  
> -void kvm_perf_init(void);
> -void kvm_perf_teardown(void);
> -
>  #ifdef CONFIG_GUEST_PERF_EVENTS
>  static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
>  {
> diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
> index 989bb5dad2c8..0bcc378b7961 100644
> --- a/arch/arm64/kvm/Makefile
> +++ b/arch/arm64/kvm/Makefile
> @@ -12,7 +12,7 @@ obj-$(CONFIG_KVM) += hyp/
>  
>  kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
>  	 $(KVM)/vfio.o $(KVM)/irqchip.o $(KVM)/binary_stats.o \
> -	 arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
> +	 arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
>  	 inject_fault.o va_layout.o handle_exit.o \
>  	 guest.o debug.o reset.o sys_regs.o \
>  	 vgic-sys-reg-v3.o fpsimd.o pmu.o \
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 2b542fdc237e..48f89d80f464 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -1744,7 +1744,9 @@ static int init_subsystems(void)
>  	if (err)
>  		goto out;
>  
> -	kvm_perf_init();
> +	kvm_pmu_init();
> +	kvm_register_perf_callbacks(NULL);
> +
>  	kvm_sys_reg_table_init();
>  
>  out:
> @@ -2160,7 +2162,7 @@ int kvm_arch_init(void *opaque)
>  /* NOP: Compiling as a module not supported */
>  void kvm_arch_exit(void)
>  {
> -	kvm_perf_teardown();
> +	kvm_unregister_perf_callbacks();
>  }
>  
>  static int __init early_kvm_mode_cfg(char *arg)
> diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c
> deleted file mode 100644
> index 0b902e0d5b5d..000000000000
> --- a/arch/arm64/kvm/perf.c
> +++ /dev/null
> @@ -1,27 +0,0 @@
> -// SPDX-License-Identifier: GPL-2.0-only
> -/*
> - * Based on the x86 implementation.
> - *
> - * Copyright (C) 2012 ARM Ltd.
> - * Author: Marc Zyngier <marc.zyngier@arm.com>
> - */
> -
> -#include <linux/perf_event.h>
> -#include <linux/kvm_host.h>
> -
> -#include <asm/kvm_emulate.h>
> -
> -DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
> -
> -void kvm_perf_init(void)
> -{
> -	if (kvm_pmu_probe_pmuver() != 0xf && !is_protected_kvm_enabled())
> -		static_branch_enable(&kvm_arm_pmu_available);
> -
> -	kvm_register_perf_callbacks(NULL);
> -}
> -
> -void kvm_perf_teardown(void)
> -{
> -	kvm_unregister_perf_callbacks();
> -}
> diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
> index 03a6c1f4a09a..d98b57a17043 100644
> --- a/arch/arm64/kvm/pmu.c
> +++ b/arch/arm64/kvm/pmu.c
> @@ -7,6 +7,14 @@
>  #include <linux/perf_event.h>
>  #include <asm/kvm_hyp.h>
>  
> +DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
> +
> +void kvm_pmu_init(void)
> +{
> +	if (kvm_pmu_probe_pmuver() != 0xf && !is_protected_kvm_enabled())
> +		static_branch_enable(&kvm_arm_pmu_available);
> +}
> +
>  /*
>   * Given the perf event attributes and system type, determine
>   * if we are going to need to switch counters at guest entry/exit.
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 864b9997efb2..42270676498d 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -14,6 +14,7 @@
>  #define ARMV8_PMU_MAX_COUNTER_PAIRS	((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
>  
>  DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
> +void kvm_pmu_init(void);
>  
>  static __always_inline bool kvm_arm_support_pmu_v3(void)
>  {

Note that this patch is now conflicting with e840f42a4992 ("KVM:
arm64: Fix PMU probe ordering"), which was merged in -rc4. Moving the
static key definition to arch/arm64/kvm/pmu-emul.c and getting rid of
kvm_pmu_init() altogether should be enough to resolve it.

With that,

Reviewed-by: Marc Zyngier <maz@kernel.org>

	M.
Sean Christopherson Nov. 9, 2021, 11:16 p.m. UTC | #2
On Mon, Oct 11, 2021, Marc Zyngier wrote:
> On Wed, 22 Sep 2021 01:05:32 +0100,
> Sean Christopherson <seanjc@google.com> wrote:
> > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> > index 864b9997efb2..42270676498d 100644
> > --- a/include/kvm/arm_pmu.h
> > +++ b/include/kvm/arm_pmu.h
> > @@ -14,6 +14,7 @@
> >  #define ARMV8_PMU_MAX_COUNTER_PAIRS	((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
> >  
> >  DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
> > +void kvm_pmu_init(void);
> >  
> >  static __always_inline bool kvm_arm_support_pmu_v3(void)
> >  {
> 
> Note that this patch is now conflicting with e840f42a4992 ("KVM:
> arm64: Fix PMU probe ordering"), which was merged in -rc4. Moving the
> static key definition to arch/arm64/kvm/pmu-emul.c and getting rid of
> kvm_pmu_init() altogether should be enough to resolve it.

Defining kvm_arm_pmu_available in pmu-emul.c doesn't work as-is because pmu-emul.c
depends on CONFIG_HW_PERF_EVENTS=y.  Since pmu-emul.c is the only path that enables
the key, my plan is to add a prep match to bury kvm_arm_pmu_available behind the
existing #ifdef CONFIG_HW_PERF_EVENTS in arm_pmu.h and add a stub
for kvm_arm_support_pmu_v3().  The only ugly part is that the KVM_NVHE_ALIAS() also
gains an #ifdef, but that doesn't seem too bad.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 828b6eaa2c56..f141ac65f4f1 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -670,9 +670,6 @@  unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
 
-void kvm_perf_init(void);
-void kvm_perf_teardown(void);
-
 #ifdef CONFIG_GUEST_PERF_EVENTS
 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
 {
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 989bb5dad2c8..0bcc378b7961 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -12,7 +12,7 @@  obj-$(CONFIG_KVM) += hyp/
 
 kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
 	 $(KVM)/vfio.o $(KVM)/irqchip.o $(KVM)/binary_stats.o \
-	 arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
+	 arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
 	 inject_fault.o va_layout.o handle_exit.o \
 	 guest.o debug.o reset.o sys_regs.o \
 	 vgic-sys-reg-v3.o fpsimd.o pmu.o \
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 2b542fdc237e..48f89d80f464 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1744,7 +1744,9 @@  static int init_subsystems(void)
 	if (err)
 		goto out;
 
-	kvm_perf_init();
+	kvm_pmu_init();
+	kvm_register_perf_callbacks(NULL);
+
 	kvm_sys_reg_table_init();
 
 out:
@@ -2160,7 +2162,7 @@  int kvm_arch_init(void *opaque)
 /* NOP: Compiling as a module not supported */
 void kvm_arch_exit(void)
 {
-	kvm_perf_teardown();
+	kvm_unregister_perf_callbacks();
 }
 
 static int __init early_kvm_mode_cfg(char *arg)
diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c
deleted file mode 100644
index 0b902e0d5b5d..000000000000
--- a/arch/arm64/kvm/perf.c
+++ /dev/null
@@ -1,27 +0,0 @@ 
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Based on the x86 implementation.
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/perf_event.h>
-#include <linux/kvm_host.h>
-
-#include <asm/kvm_emulate.h>
-
-DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
-void kvm_perf_init(void)
-{
-	if (kvm_pmu_probe_pmuver() != 0xf && !is_protected_kvm_enabled())
-		static_branch_enable(&kvm_arm_pmu_available);
-
-	kvm_register_perf_callbacks(NULL);
-}
-
-void kvm_perf_teardown(void)
-{
-	kvm_unregister_perf_callbacks();
-}
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 03a6c1f4a09a..d98b57a17043 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -7,6 +7,14 @@ 
 #include <linux/perf_event.h>
 #include <asm/kvm_hyp.h>
 
+DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+
+void kvm_pmu_init(void)
+{
+	if (kvm_pmu_probe_pmuver() != 0xf && !is_protected_kvm_enabled())
+		static_branch_enable(&kvm_arm_pmu_available);
+}
+
 /*
  * Given the perf event attributes and system type, determine
  * if we are going to need to switch counters at guest entry/exit.
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 864b9997efb2..42270676498d 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -14,6 +14,7 @@ 
 #define ARMV8_PMU_MAX_COUNTER_PAIRS	((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
 
 DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+void kvm_pmu_init(void);
 
 static __always_inline bool kvm_arm_support_pmu_v3(void)
 {