diff mbox series

[v8,05/26] KVM: x86/pmu: Get eventsel for fixed counters from perf

Message ID 20231110021306.1269082-6-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/pmu: selftests: Fixes and new tests | expand

Commit Message

Sean Christopherson Nov. 10, 2023, 2:12 a.m. UTC
Get the event selectors used to effectively request fixed counters for
perf events from perf itself instead of hardcoding them in KVM and hoping
that they match the underlying hardware.  While fixed counters 0 and 1 use
architectural events, as of ffbe4ab0beda ("perf/x86/intel: Extend the
ref-cycles event to GP counters") fixed counter 2 (reference TSC cycles)
may use a software-defined pseudo-encoding or a real hardware-defined
encoding.

Reported-by: Kan Liang <kan.liang@linux.intel.com>
Closes: https://lkml.kernel.org/r/4281eee7-6423-4ec8-bb18-c6aeee1faf2c%40linux.intel.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/vmx/pmu_intel.c | 30 +++++++++++++++++-------------
 1 file changed, 17 insertions(+), 13 deletions(-)

Comments

Liang, Kan Nov. 10, 2023, 1:48 p.m. UTC | #1
On 2023-11-09 9:12 p.m., Sean Christopherson wrote:
> Get the event selectors used to effectively request fixed counters for
> perf events from perf itself instead of hardcoding them in KVM and hoping
> that they match the underlying hardware.  While fixed counters 0 and 1 use
> architectural events, as of ffbe4ab0beda ("perf/x86/intel: Extend the
> ref-cycles event to GP counters") fixed counter 2 (reference TSC cycles)
> may use a software-defined pseudo-encoding or a real hardware-defined
> encoding.
> 
> Reported-by: Kan Liang <kan.liang@linux.intel.com>
> Closes: https://lkml.kernel.org/r/4281eee7-6423-4ec8-bb18-c6aeee1faf2c%40linux.intel.com
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---

Reviewed-by: Kan Liang <kan.liang@linux.intel.com>

Thanks,
Kan
>  arch/x86/kvm/vmx/pmu_intel.c | 30 +++++++++++++++++-------------
>  1 file changed, 17 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
> index c9df139efc0c..3bac3b32b485 100644
> --- a/arch/x86/kvm/vmx/pmu_intel.c
> +++ b/arch/x86/kvm/vmx/pmu_intel.c
> @@ -406,24 +406,28 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>   * result is the same (ignoring the fact that using a general purpose counter
>   * will likely exacerbate counter contention).
>   *
> - * Note, reference cycles is counted using a perf-defined "psuedo-encoding",
> - * as there is no architectural general purpose encoding for reference cycles.
> + * Forcibly inlined to allow asserting on @index at build time, and there should
> + * never be more than one user.
>   */
> -static u64 intel_get_fixed_pmc_eventsel(int index)
> +static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
>  {
> -	const struct {
> -		u8 event;
> -		u8 unit_mask;
> -	} fixed_pmc_events[] = {
> -		[0] = { 0xc0, 0x00 }, /* Instruction Retired / PERF_COUNT_HW_INSTRUCTIONS. */
> -		[1] = { 0x3c, 0x00 }, /* CPU Cycles/ PERF_COUNT_HW_CPU_CYCLES. */
> -		[2] = { 0x00, 0x03 }, /* Reference Cycles / PERF_COUNT_HW_REF_CPU_CYCLES*/
> +	const enum perf_hw_id fixed_pmc_perf_ids[] = {
> +		[0] = PERF_COUNT_HW_INSTRUCTIONS,
> +		[1] = PERF_COUNT_HW_CPU_CYCLES,
> +		[2] = PERF_COUNT_HW_REF_CPU_CYCLES,
>  	};
> +	u64 eventsel;
>  
> -	BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED);
> +	BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_PMC_MAX_FIXED);
> +	BUILD_BUG_ON(index >= KVM_PMC_MAX_FIXED);
>  
> -	return (fixed_pmc_events[index].unit_mask << 8) |
> -		fixed_pmc_events[index].event;
> +	/*
> +	 * Yell if perf reports support for a fixed counter but perf doesn't
> +	 * have a known encoding for the associated general purpose event.
> +	 */
> +	eventsel = perf_get_hw_event_config(fixed_pmc_perf_ids[index]);
> +	WARN_ON_ONCE(!eventsel && index < kvm_pmu_cap.num_counters_fixed);
> +	return eventsel;
>  }
>  
>  static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index c9df139efc0c..3bac3b32b485 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -406,24 +406,28 @@  static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  * result is the same (ignoring the fact that using a general purpose counter
  * will likely exacerbate counter contention).
  *
- * Note, reference cycles is counted using a perf-defined "psuedo-encoding",
- * as there is no architectural general purpose encoding for reference cycles.
+ * Forcibly inlined to allow asserting on @index at build time, and there should
+ * never be more than one user.
  */
-static u64 intel_get_fixed_pmc_eventsel(int index)
+static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
 {
-	const struct {
-		u8 event;
-		u8 unit_mask;
-	} fixed_pmc_events[] = {
-		[0] = { 0xc0, 0x00 }, /* Instruction Retired / PERF_COUNT_HW_INSTRUCTIONS. */
-		[1] = { 0x3c, 0x00 }, /* CPU Cycles/ PERF_COUNT_HW_CPU_CYCLES. */
-		[2] = { 0x00, 0x03 }, /* Reference Cycles / PERF_COUNT_HW_REF_CPU_CYCLES*/
+	const enum perf_hw_id fixed_pmc_perf_ids[] = {
+		[0] = PERF_COUNT_HW_INSTRUCTIONS,
+		[1] = PERF_COUNT_HW_CPU_CYCLES,
+		[2] = PERF_COUNT_HW_REF_CPU_CYCLES,
 	};
+	u64 eventsel;
 
-	BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED);
+	BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_PMC_MAX_FIXED);
+	BUILD_BUG_ON(index >= KVM_PMC_MAX_FIXED);
 
-	return (fixed_pmc_events[index].unit_mask << 8) |
-		fixed_pmc_events[index].event;
+	/*
+	 * Yell if perf reports support for a fixed counter but perf doesn't
+	 * have a known encoding for the associated general purpose event.
+	 */
+	eventsel = perf_get_hw_event_config(fixed_pmc_perf_ids[index]);
+	WARN_ON_ONCE(!eventsel && index < kvm_pmu_cap.num_counters_fixed);
+	return eventsel;
 }
 
 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)