@@ -713,6 +713,9 @@ static void perf_ctx_enable(struct perf_
perf_pmu_enable(pmu_ctx->pmu);
}
+DEFINE_GUARD(perf_ctx_disable, struct perf_event_context *,
+ perf_ctx_disable(_T), perf_ctx_enable(_T))
+
static void ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type);
static void ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type);
@@ -3906,31 +3909,27 @@ static void perf_event_context_sched_in(
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_context *ctx;
- rcu_read_lock();
+ guard(rcu)();
+
ctx = rcu_dereference(task->perf_event_ctxp);
if (!ctx)
- goto rcu_unlock;
-
- if (cpuctx->task_ctx == ctx) {
- perf_ctx_lock(cpuctx, ctx);
- perf_ctx_disable(ctx);
-
- perf_ctx_sched_task_cb(ctx, true);
-
- perf_ctx_enable(ctx);
- perf_ctx_unlock(cpuctx, ctx);
- goto rcu_unlock;
- }
+ return;
- perf_ctx_lock(cpuctx, ctx);
+ guard(perf_ctx_lock)(cpuctx, ctx);
/*
* We must check ctx->nr_events while holding ctx->lock, such
* that we serialize against perf_install_in_context().
*/
if (!ctx->nr_events)
- goto unlock;
+ return;
+
+ guard(perf_ctx_disable)(ctx);
+
+ if (cpuctx->task_ctx == ctx) {
+ perf_ctx_sched_task_cb(ctx, true);
+ return;
+ }
- perf_ctx_disable(ctx);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
@@ -3950,13 +3949,6 @@ static void perf_event_context_sched_in(
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
perf_ctx_enable(&cpuctx->ctx);
-
- perf_ctx_enable(ctx);
-
-unlock:
- perf_ctx_unlock(cpuctx, ctx);
-rcu_unlock:
- rcu_read_unlock();
}
/*
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- kernel/events/core.c | 38 +++++++++++++++----------------------- 1 file changed, 15 insertions(+), 23 deletions(-)