Message ID | 20211108111032.24457-7-likexu@tencent.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Use static_call for kvm_pmu_ops | expand |
On Mon, Nov 08, 2021, Like Xu wrote: > From: Like Xu <likexu@tencent.com> > > Use static calls to improve kvm_pmu_ops performance. Introduce the > definitions that will be used by a subsequent patch to actualize the > savings. Add a new kvm-x86-pmu-ops.h header that can be used for the > definition of static calls. This header is also intended to be > used to simplify the defition of amd_pmu_ops and intel_pmu_ops. > > Like what we did for kvm_x86_ops, 'pmu_ops' can be covered by > static calls in a simlilar manner for insignificant but not > negligible performance impact, especially on older models. > > Signed-off-by: Like Xu <likexu@tencent.com> > --- This absolutely shouldn't be separated from patch 7/7. By _defining_ the static calls but not providing the logic to actually _update_ the calls, it's entirely possible to add static_call() invocations that will compile cleanly without any chance of doing the right thing at runtime. diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index 0236c1a953d0..804f98b5552e 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -99,7 +99,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc) static inline bool pmc_is_enabled(struct kvm_pmc *pmc) { - return kvm_pmu_ops.pmc_is_enabled(pmc); + return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc); } static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, > @@ -0,0 +1,32 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > +#if !defined(KVM_X86_PMU_OP) || !defined(KVM_X86_PMU_OP_NULL) > +BUILD_BUG_ON(1) > +#endif > + > +/* > + * KVM_X86_PMU_OP() and KVM_X86_PMU_OP_NULL() are used to Please use all 80 chars. > + * help generate "static_call()"s. They are also intended for use when defining > + * the amd/intel KVM_X86_PMU_OPs. KVM_X86_PMU_OP() can be used AMD/Intel since this is referring to the vendor and not to function names (like the below reference). > + * for those functions that follow the [amd|intel]_func_name convention. > + * KVM_X86_PMU_OP_NULL() can leave a NULL definition for the As below, please drop the _NULL() variant. > + * case where there is no definition or a function name that > + * doesn't match the typical naming convention is supplied. > + */ ... > diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c > index 353989bf0102..bfdd9f2bc0fa 100644 > --- a/arch/x86/kvm/pmu.c > +++ b/arch/x86/kvm/pmu.c > @@ -50,6 +50,12 @@ > struct kvm_pmu_ops kvm_pmu_ops __read_mostly; > EXPORT_SYMBOL_GPL(kvm_pmu_ops); > > +#define KVM_X86_PMU_OP(func) \ > + DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func, \ > + *(((struct kvm_pmu_ops *)0)->func)) > +#define KVM_X86_PMU_OP_NULL KVM_X86_PMU_OP > +#include <asm/kvm-x86-pmu-ops.h> > + > static void kvm_pmi_trigger_fn(struct irq_work *irq_work) > { > struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); > diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h > index b2fe135d395a..40e0b523637b 100644 > --- a/arch/x86/kvm/pmu.h > +++ b/arch/x86/kvm/pmu.h > @@ -45,6 +45,11 @@ struct kvm_pmu_ops { > void (*cleanup)(struct kvm_vcpu *vcpu); > }; > > +#define KVM_X86_PMU_OP(func) \ > + DECLARE_STATIC_CALL(kvm_x86_pmu_##func, *(((struct kvm_pmu_ops *)0)->func)) > +#define KVM_X86_PMU_OP_NULL KVM_X86_PMU_OP I don't want to proliferate the pointless and bitrot-prone KVM_X86_OP_NULL macro, just omit this. I'll send a patch to drop KVM_X86_OP_NULL. > +#include <asm/kvm-x86-pmu-ops.h> > + > static inline u64 pmc_bitmask(struct kvm_pmc *pmc) > { > struct kvm_pmu *pmu = pmc_to_pmu(pmc); > -- > 2.33.0 >
Hi Sean, On 9/12/2021 2:35 am, Sean Christopherson wrote: > On Mon, Nov 08, 2021, Like Xu wrote: >> From: Like Xu <likexu@tencent.com> >> >> Use static calls to improve kvm_pmu_ops performance. Introduce the >> definitions that will be used by a subsequent patch to actualize the >> savings. Add a new kvm-x86-pmu-ops.h header that can be used for the >> definition of static calls. This header is also intended to be >> used to simplify the defition of amd_pmu_ops and intel_pmu_ops. >> >> Like what we did for kvm_x86_ops, 'pmu_ops' can be covered by >> static calls in a simlilar manner for insignificant but not >> negligible performance impact, especially on older models. >> >> Signed-off-by: Like Xu <likexu@tencent.com> >> --- > > This absolutely shouldn't be separated from patch 7/7. By _defining_ the static > calls but not providing the logic to actually _update_ the calls, it's entirely > possible to add static_call() invocations that will compile cleanly without any > chance of doing the right thing at runtime. > > diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h > index 0236c1a953d0..804f98b5552e 100644 > --- a/arch/x86/kvm/pmu.h > +++ b/arch/x86/kvm/pmu.h > @@ -99,7 +99,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc) > > static inline bool pmc_is_enabled(struct kvm_pmc *pmc) > { > - return kvm_pmu_ops.pmc_is_enabled(pmc); > + return static_call(kvm_x86_pmu_pmc_is_enabled)(pmc); > } > > static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, > >> @@ -0,0 +1,32 @@ >> +/* SPDX-License-Identifier: GPL-2.0 */ >> +#if !defined(KVM_X86_PMU_OP) || !defined(KVM_X86_PMU_OP_NULL) >> +BUILD_BUG_ON(1) >> +#endif >> + >> +/* >> + * KVM_X86_PMU_OP() and KVM_X86_PMU_OP_NULL() are used to > > Please use all 80 chars. > >> + * help generate "static_call()"s. They are also intended for use when defining >> + * the amd/intel KVM_X86_PMU_OPs. KVM_X86_PMU_OP() can be used > > AMD/Intel since this is referring to the vendor and not to function names (like > the below reference). > >> + * for those functions that follow the [amd|intel]_func_name convention. >> + * KVM_X86_PMU_OP_NULL() can leave a NULL definition for the > > As below, please drop the _NULL() variant. > >> + * case where there is no definition or a function name that >> + * doesn't match the typical naming convention is supplied. >> + */ > > ... > >> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c >> index 353989bf0102..bfdd9f2bc0fa 100644 >> --- a/arch/x86/kvm/pmu.c >> +++ b/arch/x86/kvm/pmu.c >> @@ -50,6 +50,12 @@ >> struct kvm_pmu_ops kvm_pmu_ops __read_mostly; >> EXPORT_SYMBOL_GPL(kvm_pmu_ops); >> >> +#define KVM_X86_PMU_OP(func) \ >> + DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func, \ >> + *(((struct kvm_pmu_ops *)0)->func)) >> +#define KVM_X86_PMU_OP_NULL KVM_X86_PMU_OP >> +#include <asm/kvm-x86-pmu-ops.h> >> + >> static void kvm_pmi_trigger_fn(struct irq_work *irq_work) >> { >> struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); >> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h >> index b2fe135d395a..40e0b523637b 100644 >> --- a/arch/x86/kvm/pmu.h >> +++ b/arch/x86/kvm/pmu.h >> @@ -45,6 +45,11 @@ struct kvm_pmu_ops { >> void (*cleanup)(struct kvm_vcpu *vcpu); >> }; >> >> +#define KVM_X86_PMU_OP(func) \ >> + DECLARE_STATIC_CALL(kvm_x86_pmu_##func, *(((struct kvm_pmu_ops *)0)->func)) >> +#define KVM_X86_PMU_OP_NULL KVM_X86_PMU_OP > > I don't want to proliferate the pointless and bitrot-prone KVM_X86_OP_NULL macro, > just omit this. I'll send a patch to drop KVM_X86_OP_NULL. Thanks for your clear comments on this patch set. I will send out V3 once KVM_X86_OP_NULL is dropped as well as the comment in arch/x86/include/asm/kvm-x86-ops.h is updated. > >> +#include <asm/kvm-x86-pmu-ops.h> >> + >> static inline u64 pmc_bitmask(struct kvm_pmc *pmc) >> { >> struct kvm_pmu *pmu = pmc_to_pmu(pmc); >> -- >> 2.33.0 >> >
diff --git a/arch/x86/include/asm/kvm-x86-pmu-ops.h b/arch/x86/include/asm/kvm-x86-pmu-ops.h new file mode 100644 index 000000000000..b7713b16d21d --- /dev/null +++ b/arch/x86/include/asm/kvm-x86-pmu-ops.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(KVM_X86_PMU_OP) || !defined(KVM_X86_PMU_OP_NULL) +BUILD_BUG_ON(1) +#endif + +/* + * KVM_X86_PMU_OP() and KVM_X86_PMU_OP_NULL() are used to + * help generate "static_call()"s. They are also intended for use when defining + * the amd/intel KVM_X86_PMU_OPs. KVM_X86_PMU_OP() can be used + * for those functions that follow the [amd|intel]_func_name convention. + * KVM_X86_PMU_OP_NULL() can leave a NULL definition for the + * case where there is no definition or a function name that + * doesn't match the typical naming convention is supplied. + */ +KVM_X86_PMU_OP(find_arch_event); +KVM_X86_PMU_OP(find_fixed_event); +KVM_X86_PMU_OP(pmc_is_enabled); +KVM_X86_PMU_OP(pmc_idx_to_pmc); +KVM_X86_PMU_OP(rdpmc_ecx_to_pmc); +KVM_X86_PMU_OP(msr_idx_to_pmc); +KVM_X86_PMU_OP(is_valid_rdpmc_ecx); +KVM_X86_PMU_OP(is_valid_msr); +KVM_X86_PMU_OP(get_msr); +KVM_X86_PMU_OP(set_msr); +KVM_X86_PMU_OP(refresh); +KVM_X86_PMU_OP(init); +KVM_X86_PMU_OP(reset); +KVM_X86_PMU_OP_NULL(deliver_pmi); +KVM_X86_PMU_OP_NULL(cleanup); + +#undef KVM_X86_PMU_OP +#undef KVM_X86_PMU_OP_NULL diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index 353989bf0102..bfdd9f2bc0fa 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c @@ -50,6 +50,12 @@ struct kvm_pmu_ops kvm_pmu_ops __read_mostly; EXPORT_SYMBOL_GPL(kvm_pmu_ops); +#define KVM_X86_PMU_OP(func) \ + DEFINE_STATIC_CALL_NULL(kvm_x86_pmu_##func, \ + *(((struct kvm_pmu_ops *)0)->func)) +#define KVM_X86_PMU_OP_NULL KVM_X86_PMU_OP +#include <asm/kvm-x86-pmu-ops.h> + static void kvm_pmi_trigger_fn(struct irq_work *irq_work) { struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h index b2fe135d395a..40e0b523637b 100644 --- a/arch/x86/kvm/pmu.h +++ b/arch/x86/kvm/pmu.h @@ -45,6 +45,11 @@ struct kvm_pmu_ops { void (*cleanup)(struct kvm_vcpu *vcpu); }; +#define KVM_X86_PMU_OP(func) \ + DECLARE_STATIC_CALL(kvm_x86_pmu_##func, *(((struct kvm_pmu_ops *)0)->func)) +#define KVM_X86_PMU_OP_NULL KVM_X86_PMU_OP +#include <asm/kvm-x86-pmu-ops.h> + static inline u64 pmc_bitmask(struct kvm_pmc *pmc) { struct kvm_pmu *pmu = pmc_to_pmu(pmc);