@@ -64,6 +64,8 @@ struct vdso_data {
u32 tz_minuteswest; /* timezone info for gettimeofday(2) */
u32 tz_dsttime;
+ u32 btm_sec; /* monotonic to boot time */
+ u32 btm_nsec;
/* Raw clocksource multipler */
u32 cs_raw_mult;
/* Raw time */
@@ -337,6 +337,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
if (!vdso_data->use_syscall) {
+ struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_sec;
vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
@@ -347,6 +349,8 @@ void update_vsyscall(struct timekeeper *tk)
/* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
vdso_data->cs_mask = tk->tkr_mono.mask;
+ vdso_data->btm_sec = btm.tv_sec;
+ vdso_data->btm_nsec = btm.tv_nsec;
}
vdso_write_end(vdso_data);
@@ -45,6 +45,8 @@ struct vdso_data {
__u64 xtime_coarse_nsec;
__u64 wtm_clock_sec; /* Wall to monotonic time */
vdso_wtm_clock_nsec_t wtm_clock_nsec;
+ __u32 btm_sec; /* monotonic to boot time */
+ __u32 btm_nsec;
__u32 tb_seq_count; /* Timebase sequence counter */
/* cs_* members must be adjacent and in this order (ldp accesses) */
__u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */
@@ -233,6 +233,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
+ struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
vdso_data->raw_time_sec = tk->raw_sec;
@@ -243,6 +245,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
/* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
+ vdso_data->btm_sec = btm.tv_sec;
+ vdso_data->btm_nsec = btm.tv_nsec;
}
smp_wmb();
@@ -247,6 +247,51 @@ static notrace int do_monotonic_raw(const struct vdso_data *vd,
return 0;
}
+static notrace int do_boottime(const struct vdso_data *vd, struct timespec *ts)
+{
+ u32 seq, mult, shift;
+ u64 nsec, cycle_last;
+ vdso_wtm_clock_nsec_t wtm_nsec;
+#ifdef ARCH_CLOCK_FIXED_MASK
+ static const u64 mask = ARCH_CLOCK_FIXED_MASK;
+#else
+ u64 mask;
+#endif
+ __kernel_time_t sec;
+
+ do {
+ seq = vdso_read_begin(vd);
+
+ if (vd->use_syscall)
+ return -1;
+
+ cycle_last = vd->cs_cycle_last;
+
+ mult = vd->cs_mono_mult;
+ shift = vd->cs_shift;
+#ifndef ARCH_CLOCK_FIXED_MASK
+ mask = vd->cs_mask;
+#endif
+
+ sec = vd->xtime_clock_sec;
+ nsec = vd->xtime_clock_snsec;
+
+ sec += vd->wtm_clock_sec + vd->btm_sec;
+ wtm_nsec = vd->wtm_clock_nsec + vd->btm_nsec;
+
+ } while (unlikely(vdso_read_retry(vd, seq)));
+
+ nsec += get_clock_shifted_nsec(cycle_last, mult, mask);
+ nsec >>= shift;
+ nsec += wtm_nsec;
+
+ /* open coding timespec_add_ns to save a ts->tv_nsec = 0 */
+ ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
+ ts->tv_nsec = nsec;
+
+ return 0;
+}
+
#else /* ARCH_PROVIDES_TIMER */
static notrace int do_realtime(const struct vdso_data *vd, struct timespec *ts)
@@ -265,6 +310,12 @@ static notrace int do_monotonic_raw(const struct vdso_data *vd,
return -1;
}
+static notrace int do_boottime(const struct vdso_data *vd,
+ struct timespec *ts)
+{
+ return -1;
+}
+
#endif /* ARCH_PROVIDES_TIMER */
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
@@ -290,6 +341,10 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
if (do_monotonic_raw(vd, ts))
goto fallback;
break;
+ case CLOCK_BOOTTIME:
+ if (do_boottime(vd, ts))
+ goto fallback;
+ break;
default:
goto fallback;
}
@@ -326,6 +381,7 @@ int __vdso_clock_getres(clockid_t clock, struct timespec *res)
long nsec;
if (clock == CLOCK_REALTIME ||
+ clock == CLOCK_BOOTTIME ||
clock == CLOCK_MONOTONIC ||
clock == CLOCK_MONOTONIC_RAW)
nsec = MONOTONIC_RES_NSEC;