@@ -22,13 +22,6 @@ static unsigned long arch_timer_read_counter_long(void)
return arch_timer_read_counter();
}
-static u32 sched_clock_mult __read_mostly;
-
-static unsigned long long notrace arch_timer_sched_clock(void)
-{
- return arch_timer_read_counter() * sched_clock_mult;
-}
-
static struct delay_timer arch_delay_timer;
static void __init arch_timer_delay_timer_register(void)
@@ -48,11 +41,8 @@ int __init arch_timer_arch_init(void)
arch_timer_delay_timer_register();
- /* Cache the sched_clock multiplier to save a divide in the hot path. */
- sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
- sched_clock_func = arch_timer_sched_clock;
- pr_info("sched_clock: ARM arch timer >56 bits at %ukHz, resolution %uns\n",
- arch_timer_rate / 1000, sched_clock_mult);
+ /* 56 bits minimum, so we assume worst case rollover */
+ sched_clock_setup(arch_timer_read_counter, 56, arch_timer_rate);
return 0;
}
@@ -17,6 +17,4 @@ static inline void sched_clock_postinit(void) { }
extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate);
extern void sched_clock_setup(u64 (*read)(void), int bits, unsigned long rate);
-extern unsigned long long (*sched_clock_func)(void);
-
#endif
@@ -173,20 +173,15 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
sched_clock_setup(read_sched_clock_32_wrapper, bits, rate);
}
-static unsigned long long notrace sched_clock_32(void)
-{
- u64 cyc = read_sched_clock();
- return cyc_to_sched_clock(cyc, sched_clock_mask);
-}
-
-unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
-
unsigned long long notrace sched_clock(void)
{
+ u64 cyc;
+
if (cd.suspended)
return cd.epoch_ns;
- return sched_clock_func();
+ cyc = read_sched_clock();
+ return cyc_to_sched_clock(cyc, sched_clock_mask);
}
void __init sched_clock_postinit(void)
Register with the generic sched_clock framework now that it supports 64 bits. This fixes two problems with the current sched_clock support for machines using the architected timers. First off, we don't subtract the start value from subsequent sched_clock calls so we can potentially start off with sched_clock returning gigantic numbers. Second, there is no support for suspend/resume handling so problems such as discussed in 6a4dae5 (ARM: 7565/1: sched: stop sched_clock() during suspend, 2012-10-23) can happen without this patch. Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> --- arch/arm/kernel/arch_timer.c | 14 ++------------ include/linux/sched_clock.h | 2 -- kernel/time/sched_clock.c | 13 ++++--------- 3 files changed, 6 insertions(+), 23 deletions(-)