@@ -144,5 +144,4 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
extern unsigned long kvm_nvhe_sym(__icache_flags);
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
-
#endif /* __ARM64_KVM_HYP_H__ */
new file mode 100644
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ARM64_KVM_HYP_NVHE_CLOCK_H
+#define __ARM64_KVM_HYP_NVHE_CLOCK_H
+#include <linux/types.h>
+
+#include <asm/kvm_hyp.h>
+
+#ifdef CONFIG_TRACING
+void trace_clock_update(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc);
+u64 trace_clock(void);
+#else
+static inline void
+trace_clock_update(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc) { }
+static inline u64 trace_clock(void) { return 0; }
+#endif
+#endif
@@ -28,6 +28,7 @@ hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
+hyp-obj-$(CONFIG_TRACING) += clock.o
hyp-obj-y += $(lib-objs)
##
new file mode 100644
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2024 Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <nvhe/clock.h>
+
+#include <asm/arch_timer.h>
+#include <asm/div64.h>
+
+static struct clock_data {
+ struct {
+ u32 mult;
+ u32 shift;
+ u64 epoch_ns;
+ u64 epoch_cyc;
+ u64 cyc_overflow64;
+ } data[2];
+ u64 cur;
+} trace_clock_data;
+
+static u64 __clock_mult_uint128(u64 cyc, u32 mult, u32 shift)
+{
+ __uint128_t ns = (__uint128_t)cyc * mult;
+
+ ns >>= shift;
+
+ return (u64)ns;
+}
+
+/* Does not guarantee no reader on the modified bank. */
+void trace_clock_update(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc)
+{
+ struct clock_data *clock = &trace_clock_data;
+ u64 bank = clock->cur ^ 1;
+
+ clock->data[bank].mult = mult;
+ clock->data[bank].shift = shift;
+ clock->data[bank].epoch_ns = epoch_ns;
+ clock->data[bank].epoch_cyc = epoch_cyc;
+ clock->data[bank].cyc_overflow64 = ULONG_MAX / mult;
+
+ smp_store_release(&clock->cur, bank);
+}
+
+/* Using host provided data. Do not use for anything else than debugging. */
+u64 trace_clock(void)
+{
+ struct clock_data *clock = &trace_clock_data;
+ u64 bank = smp_load_acquire(&clock->cur);
+ u64 cyc, ns;
+
+ cyc = __arch_counter_get_cntpct() - clock->data[bank].epoch_cyc;
+
+ if (likely(cyc < clock->data[bank].cyc_overflow64)) {
+ ns = cyc * clock->data[bank].mult;
+ ns >>= clock->data[bank].shift;
+ } else {
+ ns = __clock_mult_uint128(cyc, clock->data[bank].mult,
+ clock->data[bank].shift);
+ }
+
+ return (u64)ns + clock->data[bank].epoch_ns;
+}
By default, the arm64 host kernel is using the arch timer as a source for sched_clock. Conveniently, EL2 has access to that same counter, allowing to generate clock values that are synchronized. The clock needs nonetheless to be setup with the same slope values as the kernel. Introducing at the same time trace_clock() which is expected to be later configured by the hypervisor tracing. Signed-off-by: Vincent Donnefort <vdonnefort@google.com>