@@ -953,6 +953,11 @@ struct perf_event_groups {
u64 index;
};
+struct perf_time_ctx {
+ u64 time;
+ u64 stamp;
+ u64 offset;
+};
/**
* struct perf_event_context - event context structure
@@ -992,9 +997,7 @@ struct perf_event_context {
/*
* Context clock, runs when context enabled.
*/
- u64 time;
- u64 timestamp;
- u64 timeoffset;
+ struct perf_time_ctx time;
/*
* These fields let us detect when two contexts have both
@@ -1085,9 +1088,7 @@ struct bpf_perf_event_data_kern {
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
- u64 time;
- u64 timestamp;
- u64 timeoffset;
+ struct perf_time_ctx time;
int active;
};
@@ -770,6 +770,24 @@ static void perf_ctx_enable(struct perf_event_context *ctx,
static void ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
static void ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
+static inline void update_perf_time_ctx(struct perf_time_ctx *time, u64 now, bool adv)
+{
+ if (adv)
+ time->time += now - time->stamp;
+ time->stamp = now;
+
+ /*
+ * The above: time' = time + (now - timestamp), can be re-arranged
+ * into: time` = now + (time - timestamp), which gives a single value
+ * offset to compute future time without locks on.
+ *
+ * See perf_event_time_now(), which can be used from NMI context where
+ * it's (obviously) not possible to acquire ctx->lock in order to read
+ * both the above values in a consistent manner.
+ */
+ WRITE_ONCE(time->offset, time->time - time->stamp);
+}
+
#ifdef CONFIG_CGROUP_PERF
static inline bool
@@ -811,7 +829,7 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
- return t->time;
+ return t->time.time;
}
static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
@@ -820,22 +838,11 @@ static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
t = per_cpu_ptr(event->cgrp->info, event->cpu);
if (!__load_acquire(&t->active))
- return t->time;
- now += READ_ONCE(t->timeoffset);
+ return t->time.time;
+ now += READ_ONCE(t->time.offset);
return now;
}
-static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
-{
- if (adv)
- info->time += now - info->timestamp;
- info->timestamp = now;
- /*
- * see update_context_time()
- */
- WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
-}
-
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
{
struct perf_cgroup *cgrp = cpuctx->cgrp;
@@ -849,7 +856,7 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
cgrp = container_of(css, struct perf_cgroup, css);
info = this_cpu_ptr(cgrp->info);
- __update_cgrp_time(info, now, true);
+ update_perf_time_ctx(&info->time, now, true);
if (final)
__store_release(&info->active, 0);
}
@@ -872,7 +879,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
* Do not update time when cgroup is not active
*/
if (info->active)
- __update_cgrp_time(info, perf_clock(), true);
+ update_perf_time_ctx(&info->time, perf_clock(), true);
}
static inline void
@@ -896,7 +903,7 @@ perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css);
info = this_cpu_ptr(cgrp->info);
- __update_cgrp_time(info, ctx->timestamp, false);
+ update_perf_time_ctx(&info->time, ctx->time.stamp, false);
__store_release(&info->active, 1);
}
}
@@ -1511,20 +1518,7 @@ static void __update_context_time(struct perf_event_context *ctx, bool adv)
lockdep_assert_held(&ctx->lock);
- if (adv)
- ctx->time += now - ctx->timestamp;
- ctx->timestamp = now;
-
- /*
- * The above: time' = time + (now - timestamp), can be re-arranged
- * into: time` = now + (time - timestamp), which gives a single value
- * offset to compute future time without locks on.
- *
- * See perf_event_time_now(), which can be used from NMI context where
- * it's (obviously) not possible to acquire ctx->lock in order to read
- * both the above values in a consistent manner.
- */
- WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
+ update_perf_time_ctx(&ctx->time, now, adv);
}
static void update_context_time(struct perf_event_context *ctx)
@@ -1542,7 +1536,7 @@ static u64 perf_event_time(struct perf_event *event)
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
- return ctx->time;
+ return ctx->time.time;
}
static u64 perf_event_time_now(struct perf_event *event, u64 now)
@@ -1556,9 +1550,9 @@ static u64 perf_event_time_now(struct perf_event *event, u64 now)
return perf_cgroup_event_time_now(event, now);
if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
- return ctx->time;
+ return ctx->time.time;
- now += READ_ONCE(ctx->timeoffset);
+ now += READ_ONCE(ctx->time.offset);
return now;
}
@@ -11533,14 +11527,14 @@ static void task_clock_event_update(struct perf_event *event, u64 now)
static void task_clock_event_start(struct perf_event *event, int flags)
{
- local64_set(&event->hw.prev_count, event->ctx->time);
+ local64_set(&event->hw.prev_count, event->ctx->time.time);
perf_swevent_start_hrtimer(event);
}
static void task_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
- task_clock_event_update(event, event->ctx->time);
+ task_clock_event_update(event, event->ctx->time.time);
}
static int task_clock_event_add(struct perf_event *event, int flags)
@@ -11560,8 +11554,8 @@ static void task_clock_event_del(struct perf_event *event, int flags)
static void task_clock_event_read(struct perf_event *event)
{
u64 now = perf_clock();
- u64 delta = now - event->ctx->timestamp;
- u64 time = event->ctx->time + delta;
+ u64 delta = now - event->ctx->time.stamp;
+ u64 time = event->ctx->time.time + delta;
task_clock_event_update(event, time);
}