Message ID | 20221111102645.82001-2-likexu@tencent.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86: Add AMD Guest PerfMonV2 PMU support | expand |
On Fri, Nov 11, 2022, Like Xu wrote: > From: Like Xu <likexu@tencent.com> > > The name of function pmc_is_enabled() is a bit misleading. A PMC can > be disabled either by PERF_CLOBAL_CTRL or by its corresponding EVTSEL. > Add the global semantic to its name. > > Suggested-by: Jim Mattson <jmattson@google.com> > Signed-off-by: Like Xu <likexu@tencent.com> > --- ... > diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > index 684393c22105..e57f707fb940 100644 > --- a/arch/x86/kvm/pmu.c > +++ b/arch/x86/kvm/pmu.c > @@ -83,7 +83,7 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops) > #undef __KVM_X86_PMU_OP > } > > -static inline bool pmc_is_enabled(struct kvm_pmc *pmc) > +static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc) > { > return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc); This doesn't compile. v3, and I'm getting pings, and the very first patch doesn't compile.
On 27/1/2023 10:03 am, Sean Christopherson wrote: > On Fri, Nov 11, 2022, Like Xu wrote: >> From: Like Xu<likexu@tencent.com> >> >> The name of function pmc_is_enabled() is a bit misleading. A PMC can >> be disabled either by PERF_CLOBAL_CTRL or by its corresponding EVTSEL. >> Add the global semantic to its name. >> >> Suggested-by: Jim Mattson<jmattson@google.com> >> Signed-off-by: Like Xu<likexu@tencent.com> >> --- > ... > >> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c >> index 684393c22105..e57f707fb940 100644 >> --- a/arch/x86/kvm/pmu.c >> +++ b/arch/x86/kvm/pmu.c >> @@ -83,7 +83,7 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops) >> #undef __KVM_X86_PMU_OP >> } >> >> -static inline bool pmc_is_enabled(struct kvm_pmc *pmc) >> +static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc) >> { >> return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc); > This doesn't compile. v3, and I'm getting pings, and the very first patch doesn't > compile. > Oops, very sorry for this breaking the git-bisect attribute, it's my fault to split the code diff incorrectly (weakly it compiles fine w/ 4th patch), I will enhance the process before sending any patches to you. Thank you for taking time to review the rest of patches in detail as you always do. The new version is under construction, apologies again.
diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h index c17e3e96fc1d..86a3fb01e103 100644 --- a/arch/x86/include/asm/kvm-x86-pmu-ops.h +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h @@ -13,7 +13,7 @@ BUILD_BUG_ON(1) * at the call sites. */ KVM_X86_PMU_OP(hw_event_available) -KVM_X86_PMU_OP(pmc_is_enabled) +KVM_X86_PMU_OP(pmc_is_globally_enabled) KVM_X86_PMU_OP(pmc_idx_to_pmc) KVM_X86_PMU_OP(rdpmc_ecx_to_pmc) KVM_X86_PMU_OP(msr_idx_to_pmc) diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 684393c22105..e57f707fb940 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -83,7 +83,7 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops) #undef __KVM_X86_PMU_OP } -static inline bool pmc_is_enabled(struct kvm_pmc *pmc) +static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc) { return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc); } @@ -306,7 +306,7 @@ static void reprogram_counter(struct kvm_pmc *pmc) pmc_pause_counter(pmc); - if (!pmc_speculative_in_use(pmc) || !pmc_is_enabled(pmc)) + if (!pmc_speculative_in_use(pmc) || !pmc_is_globally_enabled(pmc)) goto reprogram_complete; if (!check_pmu_event_filter(pmc)) @@ -581,7 +581,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id) for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) { pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i); - if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc)) + if (!pmc || !pmc_is_globally_enabled(pmc) || !pmc_speculative_in_use(pmc)) continue; /* Ignore checks for edge detect, pin control, invert and CMASK bits */ diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 85ff3c0588ba..2b5376ba66ea 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -26,7 +26,7 @@ struct kvm_event_hw_type_mapping { struct kvm_pmu_ops { bool (*hw_event_available)(struct kvm_pmc *pmc); - bool (*pmc_is_enabled)(struct kvm_pmc *pmc); + bool (*pmc_is_globally_enabled)(struct kvm_pmc *pmc); struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, unsigned int idx, u64 *mask); diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c index 0e313fbae055..7958a983b760 100644 --- a/arch/x86/kvm/svm/pmu.c +++ b/arch/x86/kvm/svm/pmu.c @@ -218,7 +218,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu) struct kvm_pmu_ops amd_pmu_ops __initdata = { .hw_event_available = amd_hw_event_available, - .pmc_is_enabled = amd_pmc_is_enabled, + .pmc_is_globally_enabled = amd_pmc_is_enabled, .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, .msr_idx_to_pmc = amd_msr_idx_to_pmc, diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c index e5cec07ca8d9..f81cf54a245f 100644 --- a/arch/x86/kvm/vmx/pmu_intel.c +++ b/arch/x86/kvm/vmx/pmu_intel.c @@ -797,7 +797,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu) struct kvm_pmu_ops intel_pmu_ops __initdata = { .hw_event_available = intel_hw_event_available, - .pmc_is_enabled = intel_pmc_is_enabled, + .pmc_is_globally_enabled = intel_pmc_is_enabled, .pmc_idx_to_pmc = intel_pmc_idx_to_pmc, .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc, .msr_idx_to_pmc = intel_msr_idx_to_pmc,