diff mbox series

[v2,1/3] time/sched_clock: Add new variant sched_clock_register_epoch()

Message ID 20200505135544.6003-2-leo.yan@linaro.org (mailing list archive)
State New, archived
Headers show
Series arm64: perf_event: Fix time offset prior to epoch | expand

Commit Message

Leo Yan May 5, 2020, 1:55 p.m. UTC
Except the sched clock's raw counter is used by sched clock itself, it
also can be used by other purposes in the same system, e.g. the raw
counter can be injected into hardware tracing data (like Arm's SPE) and
Perf tool can capture trace data and extract the raw counter from it
which finally can be used to generate timestamp.

Perf tool needs a way to convert sched clock's raw counter cycles into a
nanosecond that can be compared against values coming out of sched_clock.

To do this accurately, this patch adds a new variant API
sched_clock_register_epoch() with introducing an extra argument
'epoch_offset', as its naming indicates, this argument contains the
offset time (in nanosecond) for the clock source has been enabled prior
to epoch.

Signed-off-by: Leo Yan <leo.yan@linaro.org>
---
 include/linux/sched_clock.h | 10 ++++++++++
 kernel/time/sched_clock.c   | 13 ++++++++++++-
 2 files changed, 22 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 0bb04a96a6d4..98965c0c7cd4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -10,6 +10,10 @@  extern void generic_sched_clock_init(void);
 
 extern void sched_clock_register(u64 (*read)(void), int bits,
 				 unsigned long rate);
+
+extern void sched_clock_register_epoch(u64 (*read)(void), int bits,
+				       unsigned long rate,
+				       u64 *epoch_offset);
 #else
 static inline void generic_sched_clock_init(void) { }
 
@@ -17,6 +21,12 @@  static inline void sched_clock_register(u64 (*read)(void), int bits,
 					unsigned long rate)
 {
 }
+
+static inline void sched_clock_register_epoch(u64 (*read)(void), int bits,
+					      unsigned long rate,
+					      u64 *epoch_offset)
+{
+}
 #endif
 
 #endif
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index fa3f800d7d76..b402196afc3f 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -165,7 +165,8 @@  static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
 }
 
 void __init
-sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
+sched_clock_register_epoch(u64 (*read)(void), int bits, unsigned long rate,
+			   u64 *epoch_offset)
 {
 	u64 res, wrap, new_mask, new_epoch, cyc, ns;
 	u32 new_mult, new_shift;
@@ -204,6 +205,10 @@  sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
 	rd.epoch_cyc		= new_epoch;
 	rd.epoch_ns		= ns;
 
+	/* Output epoch offset (ns) to clock event driver */
+	if (epoch_offset)
+		*epoch_offset = cyc_to_ns(new_epoch & new_mask, new_mult, new_shift) - ns;
+
 	update_clock_read_data(&rd);
 
 	if (sched_clock_timer.function != NULL) {
@@ -240,6 +245,12 @@  sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
 	pr_debug("Registered %pS as sched_clock source\n", read);
 }
 
+void __init
+sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
+{
+	sched_clock_register_epoch(read, bits, rate, NULL);
+}
+
 void __init generic_sched_clock_init(void)
 {
 	/*