@@ -2728,6 +2728,15 @@ void arch_perf_update_userpage(struct perf_event *event,
!!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
userpg->pmc_width = x86_pmu.cntval_bits;
+ if (event->attr.use_clockid && event->attr.clockid == CLOCK_PERF_HW_CLOCK) {
+ userpg->cap_user_time_zero = 1;
+ userpg->time_mult = 1;
+ userpg->time_shift = 0;
+ userpg->time_offset = 0;
+ userpg->time_zero = 0;
+ return;
+ }
+
if (!using_native_sched_clock() || !sched_clock_stable())
return;
@@ -2980,6 +2989,11 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
return misc;
}
+u64 perf_hw_clock(void)
+{
+ return rdtsc_ordered();
+}
+
void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{
cap->version = x86_pmu.version;
@@ -451,6 +451,9 @@ extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_misc_flags(regs) perf_misc_flags(regs)
+extern u64 perf_hw_clock(void);
+#define perf_hw_clock perf_hw_clock
+
#include <asm/stacktrace.h>
/*
@@ -67,6 +67,17 @@ struct timezone {
#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
#define CLOCKS_MONO CLOCK_MONOTONIC
+/*
+ * If supported, clockid value for use in struct perf_event_attr to select an
+ * architecture dependent hardware clock. Note this means the unit of time is
+ * ticks not nanoseconds. WARNING: This clock may not be stable or well-behaved
+ * in any way, including varying across different CPUs.
+ *
+ * On x86, this is provided by the rdtsc instruction, and is not
+ * paravirtualized. Note the warning above can also apply to TSC.
+ */
+#define CLOCK_PERF_HW_CLOCK 0x10000000
+
/*
* The various flags for setting POSIX.1b interval timers:
*/
@@ -12034,6 +12034,13 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
event->clock = &ktime_get_clocktai_ns;
break;
+#ifdef perf_hw_clock
+ case CLOCK_PERF_HW_CLOCK:
+ event->clock = &perf_hw_clock;
+ nmi_safe = true;
+ break;
+#endif
+
default:
return -EINVAL;
}
Currently, using Intel PT to trace a VM guest is limited to kernel space because decoding requires side band events such as MMAP and CONTEXT_SWITCH. While these events can be collected for the host, there is not a way to do that yet for a guest. One approach, would be to collect them inside the guest, but that would require being able to synchronize with host timestamps. The motivation for this patch is to provide a clock that can be used within a VM guest, and that correlates to a VM host clock. In the case of TSC, if the hypervisor leaves rdtsc alone, the TSC value will be subject only to the VMCS TSC Offset and Scaling. Adjusting for that would make it possible to inject events from a guest perf.data file, into a host perf.data file. Thus making possible the collection of VM guest side band for Intel PT decoding. There are other potential benefits of TSC as a perf event clock: - ability to work directly with TSC - ability to inject non-Intel-PT-related events from a guest Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> --- arch/x86/events/core.c | 14 ++++++++++++++ arch/x86/include/asm/perf_event.h | 3 +++ include/uapi/linux/time.h | 11 +++++++++++ kernel/events/core.c | 7 +++++++ 4 files changed, 35 insertions(+)