Message ID | 20220415161206.706185092@linutronix.de (mailing list archive) |
---|---|
State | Handled Elsewhere, archived |
Headers | show |
Series | x86/cpu: Consolidate APERF/MPERF code | expand |
On Fri, Apr 15, 2022 at 9:20 PM Thomas Gleixner <tglx@linutronix.de> wrote: > > Preparation for sharing code with the CPU frequency portion of the > aperf/mperf code. > > No functional change. > > Signed-off-by: Thomas Gleixner <tglx@linutronix.de> All good AFAICS: Reviewed-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> > --- > arch/x86/kernel/cpu/aperfmperf.c | 36 +++++++++++++++++++++--------------- > 1 file changed, 21 insertions(+), 15 deletions(-) > > --- a/arch/x86/kernel/cpu/aperfmperf.c > +++ b/arch/x86/kernel/cpu/aperfmperf.c > @@ -477,22 +477,9 @@ static DECLARE_WORK(disable_freq_invaria > > DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; > > -void arch_scale_freq_tick(void) > +static void scale_freq_tick(u64 acnt, u64 mcnt) > { > - struct aperfmperf *s = this_cpu_ptr(&cpu_samples); > - u64 aperf, mperf, acnt, mcnt, freq_scale; > - > - if (!arch_scale_freq_invariant()) > - return; > - > - rdmsrl(MSR_IA32_APERF, aperf); > - rdmsrl(MSR_IA32_MPERF, mperf); > - > - acnt = aperf - s->aperf; > - mcnt = mperf - s->mperf; > - > - s->aperf = aperf; > - s->mperf = mperf; > + u64 freq_scale; > > if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt)) > goto error; > @@ -514,4 +501,23 @@ void arch_scale_freq_tick(void) > pr_warn("Scheduler frequency invariance went wobbly, disabling!\n"); > schedule_work(&disable_freq_invariance_work); > } > + > +void arch_scale_freq_tick(void) > +{ > + struct aperfmperf *s = this_cpu_ptr(&cpu_samples); > + u64 acnt, mcnt, aperf, mperf; > + > + if (!arch_scale_freq_invariant()) > + return; > + > + rdmsrl(MSR_IA32_APERF, aperf); > + rdmsrl(MSR_IA32_MPERF, mperf); > + acnt = aperf - s->aperf; > + mcnt = mperf - s->mperf; > + > + s->aperf = aperf; > + s->mperf = mperf; > + > + scale_freq_tick(acnt, mcnt); > +} > #endif /* CONFIG_X86_64 && CONFIG_SMP */ >
--- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -477,22 +477,9 @@ static DECLARE_WORK(disable_freq_invaria DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE; -void arch_scale_freq_tick(void) +static void scale_freq_tick(u64 acnt, u64 mcnt) { - struct aperfmperf *s = this_cpu_ptr(&cpu_samples); - u64 aperf, mperf, acnt, mcnt, freq_scale; - - if (!arch_scale_freq_invariant()) - return; - - rdmsrl(MSR_IA32_APERF, aperf); - rdmsrl(MSR_IA32_MPERF, mperf); - - acnt = aperf - s->aperf; - mcnt = mperf - s->mperf; - - s->aperf = aperf; - s->mperf = mperf; + u64 freq_scale; if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt)) goto error; @@ -514,4 +501,23 @@ void arch_scale_freq_tick(void) pr_warn("Scheduler frequency invariance went wobbly, disabling!\n"); schedule_work(&disable_freq_invariance_work); } + +void arch_scale_freq_tick(void) +{ + struct aperfmperf *s = this_cpu_ptr(&cpu_samples); + u64 acnt, mcnt, aperf, mperf; + + if (!arch_scale_freq_invariant()) + return; + + rdmsrl(MSR_IA32_APERF, aperf); + rdmsrl(MSR_IA32_MPERF, mperf); + acnt = aperf - s->aperf; + mcnt = mperf - s->mperf; + + s->aperf = aperf; + s->mperf = mperf; + + scale_freq_tick(acnt, mcnt); +} #endif /* CONFIG_X86_64 && CONFIG_SMP */
Preparation for sharing code with the CPU frequency portion of the aperf/mperf code. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- arch/x86/kernel/cpu/aperfmperf.c | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-)