@@ -1202,6 +1202,8 @@ static void put_ctx(struct perf_event_co
}
}
+DEFINE_FREE(put_ctx, struct perf_event_context *, if (_T) put_ctx(_T))
+
/*
* Because of perf_event::ctx migration in sys_perf_event_open::move_group and
* perf_pmu_migrate_context() we need some magic.
@@ -4718,41 +4720,29 @@ find_get_context(struct task_struct *tas
if (clone_ctx)
put_ctx(clone_ctx);
} else {
- ctx = alloc_perf_context(task);
- err = -ENOMEM;
- if (!ctx)
- goto errout;
-
- err = 0;
- mutex_lock(&task->perf_event_mutex);
- /*
- * If it has already passed perf_event_exit_task().
- * we must see PF_EXITING, it takes this mutex too.
- */
- if (task->flags & PF_EXITING)
- err = -ESRCH;
- else if (task->perf_event_ctxp)
- err = -EAGAIN;
- else {
- get_ctx(ctx);
- ++ctx->pin_count;
- rcu_assign_pointer(task->perf_event_ctxp, ctx);
- }
- mutex_unlock(&task->perf_event_mutex);
+ struct perf_event_context *new __free(put_ctx) =
+ alloc_perf_context(task);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
- if (unlikely(err)) {
- put_ctx(ctx);
+ scoped_guard (mutex, &task->perf_event_mutex) {
+ /*
+ * If it has already passed perf_event_exit_task().
+ * we must see PF_EXITING, it takes this mutex too.
+ */
+ if (task->flags & PF_EXITING)
+ return ERR_PTR(-ESRCH);
- if (err == -EAGAIN)
+ if (task->perf_event_ctxp)
goto retry;
- goto errout;
+
+ ctx = get_ctx(no_free_ptr(new));
+ ++ctx->pin_count;
+ rcu_assign_pointer(task->perf_event_ctxp, ctx);
}
}
return ctx;
-
-errout:
- return ERR_PTR(err);
}
DEFINE_CLASS(find_get_ctx, struct perf_event_context *,
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- kernel/events/core.c | 46 ++++++++++++++++++---------------------------- 1 file changed, 18 insertions(+), 28 deletions(-)