diff mbox

[v2,01/11] timekeeper: change interface of clocksource reding functions

Message ID 1500651918-14156-2-git-send-email-dplotnikov@virtuozzo.com (mailing list archive)
State New, archived
Headers show

Commit Message

Denis Plotnikov July 21, 2017, 3:45 p.m. UTC
When using timekeepeing API in some cases it is useful to return
cycles stamp value which has been used used along with the time calculated
to use that cycles stamp value for other purpuses
(e.g. in KVM master clock)

Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com>
---
 arch/x86/hyperv/hv_init.c      |  4 ++--
 arch/x86/include/asm/pvclock.h |  3 ++-
 arch/x86/kernel/hpet.c         |  4 ++--
 arch/x86/kernel/kvmclock.c     |  4 ++--
 arch/x86/kernel/pvclock.c      |  6 ++++--
 arch/x86/kernel/tsc.c          |  2 +-
 arch/x86/lguest/boot.c         |  2 +-
 arch/x86/platform/uv/uv_time.c | 10 +++++-----
 arch/x86/xen/time.c            |  4 ++--
 drivers/char/hpet.c            |  2 +-
 drivers/clocksource/acpi_pm.c  | 13 +++++++------
 drivers/hv/hv_util.c           |  6 +++---
 include/linux/clocksource.h    |  7 +++++--
 kernel/time/clocksource.c      |  4 ++--
 kernel/time/jiffies.c          |  2 +-
 kernel/time/timekeeping.c      | 26 +++++++++++++-------------
 16 files changed, 53 insertions(+), 46 deletions(-)

Comments

kernel test robot July 23, 2017, 4:24 a.m. UTC | #1
Hi Denis,

[auto build test ERROR on tip/x86/core]
[cannot apply to v4.13-rc1 next-20170721]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Denis-Plotnikov/make-L2-s-kvm-clock-stable-get-rid-of-pvclock_gtod/20170723-113103
config: sh-allmodconfig (attached as .config)
compiler: sh4-linux-gnu-gcc (Debian 6.1.1-9) 6.1.1 20160705
reproduce:
        wget https://raw.githubusercontent.com/01org/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=sh 

All errors (new ones prefixed by >>):

   drivers/clocksource/timer-sun5i.c: In function 'sun5i_setup_clocksource':
>> drivers/clocksource/timer-sun5i.c:223:18: error: assignment from incompatible pointer type [-Werror=incompatible-pointer-types]
     cs->clksrc.read = sun5i_clksrc_read;
                     ^
   cc1: some warnings being treated as errors

vim +223 drivers/clocksource/timer-sun5i.c

3071efa4 Maxime Ripard 2015-03-31  185  
4a59058f Maxime Ripard 2015-03-31  186  static int __init sun5i_setup_clocksource(struct device_node *node,
4a59058f Maxime Ripard 2015-03-31  187  					  void __iomem *base,
4a59058f Maxime Ripard 2015-03-31  188  					  struct clk *clk, int irq)
4a59058f Maxime Ripard 2015-03-31  189  {
4a59058f Maxime Ripard 2015-03-31  190  	struct sun5i_timer_clksrc *cs;
4a59058f Maxime Ripard 2015-03-31  191  	unsigned long rate;
4a59058f Maxime Ripard 2015-03-31  192  	int ret;
4a59058f Maxime Ripard 2015-03-31  193  
4a59058f Maxime Ripard 2015-03-31  194  	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
4a59058f Maxime Ripard 2015-03-31  195  	if (!cs)
4a59058f Maxime Ripard 2015-03-31  196  		return -ENOMEM;
4a59058f Maxime Ripard 2015-03-31  197  
4a59058f Maxime Ripard 2015-03-31  198  	ret = clk_prepare_enable(clk);
4a59058f Maxime Ripard 2015-03-31  199  	if (ret) {
4a59058f Maxime Ripard 2015-03-31  200  		pr_err("Couldn't enable parent clock\n");
4a59058f Maxime Ripard 2015-03-31  201  		goto err_free;
4a59058f Maxime Ripard 2015-03-31  202  	}
4a59058f Maxime Ripard 2015-03-31  203  
4a59058f Maxime Ripard 2015-03-31  204  	rate = clk_get_rate(clk);
4a59058f Maxime Ripard 2015-03-31  205  
4a59058f Maxime Ripard 2015-03-31  206  	cs->timer.base = base;
4a59058f Maxime Ripard 2015-03-31  207  	cs->timer.clk = clk;
3071efa4 Maxime Ripard 2015-03-31  208  	cs->timer.clk_rate_cb.notifier_call = sun5i_rate_cb_clksrc;
3071efa4 Maxime Ripard 2015-03-31  209  	cs->timer.clk_rate_cb.next = NULL;
3071efa4 Maxime Ripard 2015-03-31  210  
3071efa4 Maxime Ripard 2015-03-31  211  	ret = clk_notifier_register(clk, &cs->timer.clk_rate_cb);
3071efa4 Maxime Ripard 2015-03-31  212  	if (ret) {
3071efa4 Maxime Ripard 2015-03-31  213  		pr_err("Unable to register clock notifier.\n");
3071efa4 Maxime Ripard 2015-03-31  214  		goto err_disable_clk;
3071efa4 Maxime Ripard 2015-03-31  215  	}
4a59058f Maxime Ripard 2015-03-31  216  
4a59058f Maxime Ripard 2015-03-31  217  	writel(~0, base + TIMER_INTVAL_LO_REG(1));
4a59058f Maxime Ripard 2015-03-31  218  	writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
4a59058f Maxime Ripard 2015-03-31  219  	       base + TIMER_CTL_REG(1));
4a59058f Maxime Ripard 2015-03-31  220  
59387683 Chen-Yu Tsai  2016-10-18  221  	cs->clksrc.name = node->name;
59387683 Chen-Yu Tsai  2016-10-18  222  	cs->clksrc.rating = 340;
59387683 Chen-Yu Tsai  2016-10-18 @223  	cs->clksrc.read = sun5i_clksrc_read;
59387683 Chen-Yu Tsai  2016-10-18  224  	cs->clksrc.mask = CLOCKSOURCE_MASK(32);
59387683 Chen-Yu Tsai  2016-10-18  225  	cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
59387683 Chen-Yu Tsai  2016-10-18  226  
59387683 Chen-Yu Tsai  2016-10-18  227  	ret = clocksource_register_hz(&cs->clksrc, rate);
4a59058f Maxime Ripard 2015-03-31  228  	if (ret) {
4a59058f Maxime Ripard 2015-03-31  229  		pr_err("Couldn't register clock source.\n");
3071efa4 Maxime Ripard 2015-03-31  230  		goto err_remove_notifier;
4a59058f Maxime Ripard 2015-03-31  231  	}
4a59058f Maxime Ripard 2015-03-31  232  
4a59058f Maxime Ripard 2015-03-31  233  	return 0;
4a59058f Maxime Ripard 2015-03-31  234  
3071efa4 Maxime Ripard 2015-03-31  235  err_remove_notifier:
3071efa4 Maxime Ripard 2015-03-31  236  	clk_notifier_unregister(clk, &cs->timer.clk_rate_cb);
4a59058f Maxime Ripard 2015-03-31  237  err_disable_clk:
4a59058f Maxime Ripard 2015-03-31  238  	clk_disable_unprepare(clk);
4a59058f Maxime Ripard 2015-03-31  239  err_free:
4a59058f Maxime Ripard 2015-03-31  240  	kfree(cs);
4a59058f Maxime Ripard 2015-03-31  241  	return ret;
4a59058f Maxime Ripard 2015-03-31  242  }
4a59058f Maxime Ripard 2015-03-31  243  

:::::: The code at line 223 was first introduced by commit
:::::: 593876838826914a7e4e05fbbcb728be6fbc4d89 Revert "clocksource/drivers/timer_sun5i: Replace code by clocksource_mmio_init"

:::::: TO: Chen-Yu Tsai <wens@csie.org>
:::::: CC: Thomas Gleixner <tglx@linutronix.de>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
diff mbox

Patch

diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 5b882cc..43ed8c2 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -36,7 +36,7 @@  struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
 	return tsc_pg;
 }
 
-static u64 read_hv_clock_tsc(struct clocksource *arg)
+static u64 read_hv_clock_tsc(struct clocksource *arg, u64 *cycle_stamp)
 {
 	u64 current_tick = hv_read_tsc_page(tsc_pg);
 
@@ -55,7 +55,7 @@  static struct clocksource hyperv_cs_tsc = {
 };
 #endif
 
-static u64 read_hv_clock_msr(struct clocksource *arg)
+static u64 read_hv_clock_msr(struct clocksource *arg, u64 *cycles_stamp)
 {
 	u64 current_tick;
 	/*
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 448cfe1..1095ad6 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -14,7 +14,8 @@  static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
 #endif
 
 /* some helper functions for xen and kvm pv clock sources */
-u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src,
+				u64 *cycles_stamp);
 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
 void pvclock_set_flags(u8 flags);
 unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 89ff7af..091ef2f 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -792,7 +792,7 @@  static union hpet_lock hpet __cacheline_aligned = {
 	{ .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
 };
 
-static u64 read_hpet(struct clocksource *cs)
+static u64 read_hpet(struct clocksource *cs, u64 *cycles_stamp)
 {
 	unsigned long flags;
 	union hpet_lock old, new;
@@ -850,7 +850,7 @@  static u64 read_hpet(struct clocksource *cs)
 /*
  * For UP or 32-bit.
  */
-static u64 read_hpet(struct clocksource *cs)
+static u64 read_hpet(struct clocksource *cs, u64 *cycles_stamp)
 {
 	return (u64)hpet_readl(HPET_COUNTER);
 }
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d889676..177f2f4 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -91,12 +91,12 @@  static u64 kvm_clock_read(void)
 	preempt_disable_notrace();
 	cpu = smp_processor_id();
 	src = &hv_clock[cpu].pvti;
-	ret = pvclock_clocksource_read(src);
+	ret = pvclock_clocksource_read(src, NULL);
 	preempt_enable_notrace();
 	return ret;
 }
 
-static u64 kvm_clock_get_cycles(struct clocksource *cs)
+static u64 kvm_clock_get_cycles(struct clocksource *cs, u64 *cycles_stamp)
 {
 	return kvm_clock_read();
 }
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 5c3f6d6..1a0d86a 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -73,7 +73,8 @@  u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
 	return flags & valid_flags;
 }
 
-u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
+u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src,
+				u64 *cycles_stamp)
 {
 	unsigned version;
 	u64 ret;
@@ -136,7 +137,8 @@  void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
 		rmb();		/* fetch time before checking version */
 	} while ((wall_clock->version & 1) || (version != wall_clock->version));
 
-	delta = pvclock_clocksource_read(vcpu_time);	/* time since system boot */
+	/* time since system boot */
+	delta = pvclock_clocksource_read(vcpu_time, NULL);
 	delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
 
 	now.tv_nsec = do_div(delta, NSEC_PER_SEC);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 714dfba..b475f6c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1110,7 +1110,7 @@  static void tsc_resume(struct clocksource *cs)
  * checking the result of read_tsc() - cycle_last for being negative.
  * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
  */
-static u64 read_tsc(struct clocksource *cs)
+static u64 read_tsc(struct clocksource *cs, u64 *cycles_stamp)
 {
 	return (u64)rdtsc_ordered();
 }
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 9947269..9109cdc 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -916,7 +916,7 @@  static unsigned long lguest_tsc_khz(void)
  * If we can't use the TSC, the kernel falls back to our lower-priority
  * "lguest_clock", where we read the time value given to us by the Host.
  */
-static u64 lguest_clock_read(struct clocksource *cs)
+static u64 lguest_clock_read(struct clocksource *cs, u64 *cycles_stamp)
 {
 	unsigned long sec, nsec;
 
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index b082d71..4dddc4c 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -30,7 +30,7 @@ 
 
 #define RTC_NAME		"sgi_rtc"
 
-static u64 uv_read_rtc(struct clocksource *cs);
+static u64 uv_read_rtc(struct clocksource *cs, u64 *cycles_stamp);
 static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
 static int uv_rtc_shutdown(struct clock_event_device *evt);
 
@@ -133,7 +133,7 @@  static int uv_setup_intr(int cpu, u64 expires)
 	/* Initialize comparator value */
 	uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
 
-	if (uv_read_rtc(NULL) <= expires)
+	if (uv_read_rtc(NULL, NULL) <= expires)
 		return 0;
 
 	return !uv_intr_pending(pnode);
@@ -269,7 +269,7 @@  static int uv_rtc_unset_timer(int cpu, int force)
 
 	spin_lock_irqsave(&head->lock, flags);
 
-	if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
+	if ((head->next_cpu == bcpu && uv_read_rtc(NULL, NULL) >= *t) || force)
 		rc = 1;
 
 	if (rc) {
@@ -296,7 +296,7 @@  static int uv_rtc_unset_timer(int cpu, int force)
  * cachelines of it's own page.  This allows faster simultaneous reads
  * from a given socket.
  */
-static u64 uv_read_rtc(struct clocksource *cs)
+static u64 uv_read_rtc(struct clocksource *cs, u64 *cycles_stamp)
 {
 	unsigned long offset;
 
@@ -316,7 +316,7 @@  static int uv_rtc_next_event(unsigned long delta,
 {
 	int ced_cpu = cpumask_first(ced->cpumask);
 
-	return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
+	return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL, NULL));
 }
 
 /*
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index a1895a8..aafbe6d 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -46,12 +46,12 @@  u64 xen_clocksource_read(void)
 
 	preempt_disable_notrace();
 	src = &__this_cpu_read(xen_vcpu)->time;
-	ret = pvclock_clocksource_read(src);
+	ret = pvclock_clocksource_read(src, NULL);
 	preempt_enable_notrace();
 	return ret;
 }
 
-static u64 xen_clocksource_get_cycles(struct clocksource *cs)
+static u64 xen_clocksource_get_cycles(struct clocksource *cs, u64 *cycles_stamp)
 {
 	return xen_clocksource_read();
 }
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index b941e6d..4702207 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -70,7 +70,7 @@  static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
 #ifdef CONFIG_IA64
 static void __iomem *hpet_mctr;
 
-static u64 read_hpet(struct clocksource *cs)
+static u64 read_hpet(struct clocksource *cs, u64 *cycles_stamp)
 {
 	return (u64)read_counter((void __iomem *)hpet_mctr);
 }
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 1961e35..c7420b2 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -58,7 +58,7 @@  u32 acpi_pm_read_verified(void)
 	return v2;
 }
 
-static u64 acpi_pm_read(struct clocksource *cs)
+static u64 acpi_pm_read(struct clocksource *cs, u64 *cycles_stamp)
 {
 	return (u64)read_pmtmr();
 }
@@ -81,7 +81,7 @@  static int __init acpi_pm_good_setup(char *__str)
 }
 __setup("acpi_pm_good", acpi_pm_good_setup);
 
-static u64 acpi_pm_read_slow(struct clocksource *cs)
+static u64 acpi_pm_read_slow(struct clocksource *cs, u64 *cycles_stamp)
 {
 	return (u64)acpi_pm_read_verified();
 }
@@ -149,9 +149,9 @@  static int verify_pmtmr_rate(void)
 	unsigned long count, delta;
 
 	mach_prepare_counter();
-	value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
+	value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm, NULL);
 	mach_countup(&count);
-	value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
+	value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm, NULL);
 	delta = (value2 - value1) & ACPI_PM_MASK;
 
 	/* Check that the PMTMR delta is within 5% of what we expect */
@@ -184,9 +184,10 @@  static int __init init_acpi_pm_clocksource(void)
 	/* "verify" this timing source: */
 	for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
 		udelay(100 * j);
-		value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
+		value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm, NULL);
 		for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
-			value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
+			value2 = clocksource_acpi_pm.read(
+					&clocksource_acpi_pm, NULL);
 			if (value2 == value1)
 				continue;
 			if (value2 > value1)
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 186b100..74def09 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -218,7 +218,7 @@  static void hv_set_host_time(struct work_struct *work)
 
 	wrk = container_of(work, struct adj_time_work, work);
 
-	reftime = hyperv_cs->read(hyperv_cs);
+	reftime = hyperv_cs->read(hyperv_cs, NULL);
 	newtime = wrk->host_time + (reftime - wrk->ref_time);
 	host_ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
 
@@ -278,7 +278,7 @@  static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
 		 */
 		spin_lock_irqsave(&host_ts.lock, flags);
 
-		cur_reftime = hyperv_cs->read(hyperv_cs);
+		cur_reftime = hyperv_cs->read(hyperv_cs, NULL);
 		host_ts.host_time = hosttime;
 		host_ts.ref_time = cur_reftime;
 		ktime_get_snapshot(&host_ts.snap);
@@ -530,7 +530,7 @@  static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
 	u64 newtime, reftime;
 
 	spin_lock_irqsave(&host_ts.lock, flags);
-	reftime = hyperv_cs->read(hyperv_cs);
+	reftime = hyperv_cs->read(hyperv_cs, NULL);
 	newtime = host_ts.host_time + (reftime - host_ts.ref_time);
 	*ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
 	spin_unlock_irqrestore(&host_ts.lock, flags);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index f2b10d9..b6f00a4 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -48,7 +48,10 @@  struct module;
  *			400-499: Perfect
  *				The ideal clocksource. A must-use where
  *				available.
- * @read:		returns a cycle value, passes clocksource as argument
+ * @read:		returns a cycle value, passes as arguments clocksource
+ *			and a pointer where the cycles "stamp" is stored which
+ *			was used in calcualtion of the returning cycle value,
+ *			if any,	otherwise the pointer value is untouched.
  * @enable:		optional function to enable the clocksource
  * @disable:		optional function to disable the clocksource
  * @mask:		bitmask for two's complement
@@ -77,7 +80,7 @@  struct module;
  * structure.
  */
 struct clocksource {
-	u64 (*read)(struct clocksource *cs);
+	u64 (*read)(struct clocksource *cs, u64 *cycles_stamp);
 	u64 mask;
 	u32 mult;
 	u32 shift;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 93621ae..e48a6eb 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -194,8 +194,8 @@  static void clocksource_watchdog(unsigned long data)
 		}
 
 		local_irq_disable();
-		csnow = cs->read(cs);
-		wdnow = watchdog->read(watchdog);
+		csnow = cs->read(cs, NULL);
+		wdnow = watchdog->read(watchdog, NULL);
 		local_irq_enable();
 
 		/* Clocksource initialized ? */
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 4977191..b235dce 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -48,7 +48,7 @@ 
 #define JIFFIES_SHIFT	8
 #endif
 
-static u64 jiffies_read(struct clocksource *cs)
+static u64 jiffies_read(struct clocksource *cs, u64 *cycles_stamp)
 {
 	return (u64) jiffies;
 }
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b602c48..5d0c4d0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -131,11 +131,11 @@  static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
  * a read of the fast-timekeeper tkrs (which is protected by its own locking
  * and update logic).
  */
-static inline u64 tk_clock_read(struct tk_read_base *tkr)
+static inline u64 tk_clock_read(struct tk_read_base *tkr, u64 *cycles_stamp)
 {
 	struct clocksource *clock = READ_ONCE(tkr->clock);
 
-	return clock->read(clock);
+	return clock->read(clock, cycles_stamp);
 }
 
 #ifdef CONFIG_DEBUG_TIMEKEEPING
@@ -195,7 +195,7 @@  static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
 	 */
 	do {
 		seq = read_seqcount_begin(&tk_core.seq);
-		now = tk_clock_read(tkr);
+		now = tk_clock_read(tkr, NULL);
 		last = tkr->cycle_last;
 		mask = tkr->mask;
 		max = tkr->clock->max_cycles;
@@ -229,7 +229,7 @@  static inline u64 timekeeping_get_delta(struct tk_read_base *tkr)
 	u64 cycle_now, delta;
 
 	/* read clocksource */
-	cycle_now = tk_clock_read(tkr);
+	cycle_now = tk_clock_read(tkr, NULL);
 
 	/* calculate the delta since the last update_wall_time */
 	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -259,7 +259,7 @@  static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
 	old_clock = tk->tkr_mono.clock;
 	tk->tkr_mono.clock = clock;
 	tk->tkr_mono.mask = clock->mask;
-	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
+	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono, NULL);
 
 	tk->tkr_raw.clock = clock;
 	tk->tkr_raw.mask = clock->mask;
@@ -422,7 +422,7 @@  static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
 
 		now += timekeeping_delta_to_ns(tkr,
 				clocksource_delta(
-					tk_clock_read(tkr),
+					tk_clock_read(tkr, NULL),
 					tkr->cycle_last,
 					tkr->mask));
 	} while (read_seqcount_retry(&tkf->seq, seq));
@@ -474,7 +474,7 @@  EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns);
 /* Suspend-time cycles value for halted fast timekeeper. */
 static u64 cycles_at_suspend;
 
-static u64 dummy_clock_read(struct clocksource *cs)
+static u64 dummy_clock_read(struct clocksource *cs, u64 *cycles_stamp)
 {
 	return cycles_at_suspend;
 }
@@ -499,7 +499,7 @@  static void halt_fast_timekeeper(struct timekeeper *tk)
 	struct tk_read_base *tkr = &tk->tkr_mono;
 
 	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
-	cycles_at_suspend = tk_clock_read(tkr);
+	cycles_at_suspend = tk_clock_read(tkr, NULL);
 	tkr_dummy.clock = &dummy_clock;
 	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
 
@@ -674,7 +674,7 @@  static void timekeeping_forward_now(struct timekeeper *tk)
 	u64 cycle_now, delta;
 	u64 nsec;
 
-	cycle_now = tk_clock_read(&tk->tkr_mono);
+	cycle_now = tk_clock_read(&tk->tkr_mono, NULL);
 	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 	tk->tkr_mono.cycle_last = cycle_now;
 	tk->tkr_raw.cycle_last  = cycle_now;
@@ -950,7 +950,7 @@  void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot)
 
 	do {
 		seq = read_seqcount_begin(&tk_core.seq);
-		now = tk_clock_read(&tk->tkr_mono);
+		now = tk_clock_read(&tk->tkr_mono, NULL);
 		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
 		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
 		base_real = ktime_add(tk->tkr_mono.base,
@@ -1128,7 +1128,7 @@  int get_device_system_crosststamp(int (*get_time_fn)
 		 * Check whether the system counter value provided by the
 		 * device driver is on the current timekeeping interval.
 		 */
-		now = tk_clock_read(&tk->tkr_mono);
+		now = tk_clock_read(&tk->tkr_mono, NULL);
 		interval_start = tk->tkr_mono.cycle_last;
 		if (!cycle_between(interval_start, cycles, now)) {
 			clock_was_set_seq = tk->clock_was_set_seq;
@@ -1649,7 +1649,7 @@  void timekeeping_resume(void)
 	 * The less preferred source will only be tried if there is no better
 	 * usable source. The rtc part is handled separately in rtc core code.
 	 */
-	cycle_now = tk_clock_read(&tk->tkr_mono);
+	cycle_now = tk_clock_read(&tk->tkr_mono, NULL);
 	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
 		cycle_now > tk->tkr_mono.cycle_last) {
 		u64 nsec, cyc_delta;
@@ -2051,7 +2051,7 @@  void update_wall_time(void)
 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
 	offset = real_tk->cycle_interval;
 #else
-	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
+	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono, NULL),
 				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
 #endif