diff mbox series

[09/18] KVM: arm64: Drop kvm_arm_pmu_available static key

Message ID 20241217212048.3709204-10-oliver.upton@linux.dev (mailing list archive)
State New
Headers show
Series KVM: arm64: Support FEAT_PMUv3 on Apple hardware | expand

Commit Message

Oliver Upton Dec. 17, 2024, 9:20 p.m. UTC
With the PMUv3 cpucap, kvm_arm_pmu_available is no longer used in the
hot path of guest entry/exit. On top of that, guest support for PMUv3
may not correlate with host support for the feature, e.g. on IMPDEF
hardware.

Throw out the static key and just inspect the list of PMUs to determine
if PMUv3 is supported for KVM guests.

Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
 arch/arm64/kernel/image-vars.h |  5 -----
 arch/arm64/kvm/arm.c           |  4 ++--
 arch/arm64/kvm/pmu-emul.c      | 11 ++++++-----
 include/kvm/arm_pmu.h          | 13 +------------
 4 files changed, 9 insertions(+), 24 deletions(-)

Comments

kernel test robot Dec. 18, 2024, 11:23 p.m. UTC | #1
Hi Oliver,

kernel test robot noticed the following build errors:

[auto build test ERROR on 78d4f34e2115b517bcbfe7ec0d018bbbb6f9b0b8]

url:    https://github.com/intel-lab-lkp/linux/commits/Oliver-Upton/drivers-perf-apple_m1-Refactor-event-select-filter-configuration/20241218-054416
base:   78d4f34e2115b517bcbfe7ec0d018bbbb6f9b0b8
patch link:    https://lore.kernel.org/r/20241217212048.3709204-10-oliver.upton%40linux.dev
patch subject: [PATCH 09/18] KVM: arm64: Drop kvm_arm_pmu_available static key
config: arm64-randconfig-001-20241219 (https://download.01.org/0day-ci/archive/20241219/202412190743.1bLEb7Ps-lkp@intel.com/config)
compiler: clang version 16.0.6 (https://github.com/llvm/llvm-project 7cbf1a2591520c2491aa35339f227775f4d3adf6)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20241219/202412190743.1bLEb7Ps-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202412190743.1bLEb7Ps-lkp@intel.com/

All errors (new ones prefixed by >>):

>> arch/arm64/kvm/arm.c:394:7: error: call to undeclared function 'kvm_supports_guest_pmuv3'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
                   r = kvm_supports_guest_pmuv3();
                       ^
   arch/arm64/kvm/arm.c:1400:7: error: call to undeclared function 'kvm_supports_guest_pmuv3'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
           if (!kvm_supports_guest_pmuv3())
                ^
   2 errors generated.


vim +/kvm_supports_guest_pmuv3 +394 arch/arm64/kvm/arm.c

   309	
   310	int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
   311	{
   312		int r;
   313	
   314		if (kvm && kvm_vm_is_protected(kvm) && !pkvm_ext_allowed(kvm, ext))
   315			return 0;
   316	
   317		switch (ext) {
   318		case KVM_CAP_IRQCHIP:
   319			r = vgic_present;
   320			break;
   321		case KVM_CAP_IOEVENTFD:
   322		case KVM_CAP_USER_MEMORY:
   323		case KVM_CAP_SYNC_MMU:
   324		case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
   325		case KVM_CAP_ONE_REG:
   326		case KVM_CAP_ARM_PSCI:
   327		case KVM_CAP_ARM_PSCI_0_2:
   328		case KVM_CAP_READONLY_MEM:
   329		case KVM_CAP_MP_STATE:
   330		case KVM_CAP_IMMEDIATE_EXIT:
   331		case KVM_CAP_VCPU_EVENTS:
   332		case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
   333		case KVM_CAP_ARM_NISV_TO_USER:
   334		case KVM_CAP_ARM_INJECT_EXT_DABT:
   335		case KVM_CAP_SET_GUEST_DEBUG:
   336		case KVM_CAP_VCPU_ATTRIBUTES:
   337		case KVM_CAP_PTP_KVM:
   338		case KVM_CAP_ARM_SYSTEM_SUSPEND:
   339		case KVM_CAP_IRQFD_RESAMPLE:
   340		case KVM_CAP_COUNTER_OFFSET:
   341			r = 1;
   342			break;
   343		case KVM_CAP_SET_GUEST_DEBUG2:
   344			return KVM_GUESTDBG_VALID_MASK;
   345		case KVM_CAP_ARM_SET_DEVICE_ADDR:
   346			r = 1;
   347			break;
   348		case KVM_CAP_NR_VCPUS:
   349			/*
   350			 * ARM64 treats KVM_CAP_NR_CPUS differently from all other
   351			 * architectures, as it does not always bound it to
   352			 * KVM_CAP_MAX_VCPUS. It should not matter much because
   353			 * this is just an advisory value.
   354			 */
   355			r = min_t(unsigned int, num_online_cpus(),
   356				  kvm_arm_default_max_vcpus());
   357			break;
   358		case KVM_CAP_MAX_VCPUS:
   359		case KVM_CAP_MAX_VCPU_ID:
   360			if (kvm)
   361				r = kvm->max_vcpus;
   362			else
   363				r = kvm_arm_default_max_vcpus();
   364			break;
   365		case KVM_CAP_MSI_DEVID:
   366			if (!kvm)
   367				r = -EINVAL;
   368			else
   369				r = kvm->arch.vgic.msis_require_devid;
   370			break;
   371		case KVM_CAP_ARM_USER_IRQ:
   372			/*
   373			 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
   374			 * (bump this number if adding more devices)
   375			 */
   376			r = 1;
   377			break;
   378		case KVM_CAP_ARM_MTE:
   379			r = system_supports_mte();
   380			break;
   381		case KVM_CAP_STEAL_TIME:
   382			r = kvm_arm_pvtime_supported();
   383			break;
   384		case KVM_CAP_ARM_EL1_32BIT:
   385			r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1);
   386			break;
   387		case KVM_CAP_GUEST_DEBUG_HW_BPS:
   388			r = get_num_brps();
   389			break;
   390		case KVM_CAP_GUEST_DEBUG_HW_WPS:
   391			r = get_num_wrps();
   392			break;
   393		case KVM_CAP_ARM_PMU_V3:
 > 394			r = kvm_supports_guest_pmuv3();
   395			break;
   396		case KVM_CAP_ARM_INJECT_SERROR_ESR:
   397			r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
   398			break;
   399		case KVM_CAP_ARM_VM_IPA_SIZE:
   400			r = get_kvm_ipa_limit();
   401			break;
   402		case KVM_CAP_ARM_SVE:
   403			r = system_supports_sve();
   404			break;
   405		case KVM_CAP_ARM_PTRAUTH_ADDRESS:
   406		case KVM_CAP_ARM_PTRAUTH_GENERIC:
   407			r = kvm_has_full_ptr_auth();
   408			break;
   409		case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE:
   410			if (kvm)
   411				r = kvm->arch.mmu.split_page_chunk_size;
   412			else
   413				r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT;
   414			break;
   415		case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES:
   416			r = kvm_supported_block_sizes();
   417			break;
   418		case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES:
   419			r = BIT(0);
   420			break;
   421		default:
   422			r = 0;
   423		}
   424	
   425		return r;
   426	}
   427
diff mbox series

Patch

diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 8f5422ed1b75..5919320bc802 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -109,11 +109,6 @@  KVM_NVHE_ALIAS(vgic_v3_cpuif_trap);
 KVM_NVHE_ALIAS(__start___kvm_ex_table);
 KVM_NVHE_ALIAS(__stop___kvm_ex_table);
 
-/* PMU available static key */
-#ifdef CONFIG_HW_PERF_EVENTS
-KVM_NVHE_ALIAS(kvm_arm_pmu_available);
-#endif
-
 /* Position-independent library routines */
 KVM_NVHE_ALIAS_HYP(clear_page, __pi_clear_page);
 KVM_NVHE_ALIAS_HYP(copy_page, __pi_copy_page);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index a102c3aebdbc..081e638c674f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -391,7 +391,7 @@  int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 		r = get_num_wrps();
 		break;
 	case KVM_CAP_ARM_PMU_V3:
-		r = kvm_arm_support_pmu_v3();
+		r = kvm_supports_guest_pmuv3();
 		break;
 	case KVM_CAP_ARM_INJECT_SERROR_ESR:
 		r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
@@ -1397,7 +1397,7 @@  static unsigned long system_supported_vcpu_features(void)
 	if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1))
 		clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features);
 
-	if (!kvm_arm_support_pmu_v3())
+	if (!kvm_supports_guest_pmuv3())
 		clear_bit(KVM_ARM_VCPU_PMU_V3, &features);
 
 	if (!system_supports_sve())
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 6d7fc0051ad8..33cd694c754f 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -17,14 +17,18 @@ 
 
 #define PERF_ATTR_CFG1_COUNTER_64BIT	BIT(0)
 
-DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
 static LIST_HEAD(arm_pmus);
 static DEFINE_MUTEX(arm_pmus_lock);
 
 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
 
+bool kvm_supports_guest_pmuv3(void)
+{
+	guard(mutex)(&arm_pmus_lock);
+	return !list_empty(&arm_pmus);
+}
+
 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc)
 {
 	return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]);
@@ -826,9 +830,6 @@  void kvm_host_pmu_init(struct arm_pmu *pmu)
 	entry->arm_pmu = pmu;
 	list_add_tail(&entry->entry, &arm_pmus);
 
-	if (list_is_singular(&arm_pmus))
-		static_branch_enable(&kvm_arm_pmu_available);
-
 out_unlock:
 	mutex_unlock(&arm_pmus_lock);
 }
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 7ef9eb3cede5..d3dcf5438315 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -37,13 +37,7 @@  struct arm_pmu_entry {
 	struct arm_pmu *arm_pmu;
 };
 
-DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
-static __always_inline bool kvm_arm_support_pmu_v3(void)
-{
-	return static_branch_likely(&kvm_arm_pmu_available);
-}
-
+bool kvm_supports_guest_pmuv3(void);
 #define kvm_arm_pmu_irq_initialized(v)	((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
 u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
 void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
@@ -103,11 +97,6 @@  void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu);
 struct kvm_pmu {
 };
 
-static inline bool kvm_arm_support_pmu_v3(void)
-{
-	return false;
-}
-
 #define kvm_arm_pmu_irq_initialized(v)	(false)
 static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
 					    u64 select_idx)