Message ID | 20230612093539.371360635@infradead.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Scope-based Resource Management | expand |
On Mon, Jun 12, 2023 at 11:07:39AM +0200, Peter Zijlstra wrote: > @@ -224,17 +243,15 @@ static int event_function(void *info) > int ret = 0; > > lockdep_assert_irqs_disabled(); > + guard(perf_ctx_lock)(cpuctx, task_ctx); > > - perf_ctx_lock(cpuctx, task_ctx); > /* > * Since we do the IPI call without holding ctx->lock things can have > * changed, double check we hit the task we set out to hit. > */ > if (ctx->task) { > - if (ctx->task != current) { > - ret = -ESRCH; > - goto unlock; > - } > + if (ctx->task != current) > + return -ESRCH; > > /* > * We only use event_function_call() on established contexts, > @@ -254,8 +271,6 @@ static int event_function(void *info) > } > > efs->func(event, cpuctx, ctx, efs->data); > -unlock: > - perf_ctx_unlock(cpuctx, task_ctx); > > return ret; We can change this to a return 0; and get rid of the "ret" variable. regards, dan carpenter
On Mon, Jun 12, 2023 at 05:46:47PM +0300, Dan Carpenter wrote: > On Mon, Jun 12, 2023 at 11:07:39AM +0200, Peter Zijlstra wrote: > > @@ -224,17 +243,15 @@ static int event_function(void *info) > > int ret = 0; > > > > lockdep_assert_irqs_disabled(); > > + guard(perf_ctx_lock)(cpuctx, task_ctx); > > > > - perf_ctx_lock(cpuctx, task_ctx); > > /* > > * Since we do the IPI call without holding ctx->lock things can have > > * changed, double check we hit the task we set out to hit. > > */ > > if (ctx->task) { > > - if (ctx->task != current) { > > - ret = -ESRCH; > > - goto unlock; > > - } > > + if (ctx->task != current) > > + return -ESRCH; > > > > /* > > * We only use event_function_call() on established contexts, > > @@ -254,8 +271,6 @@ static int event_function(void *info) > > } > > > > efs->func(event, cpuctx, ctx, efs->data); > > -unlock: > > - perf_ctx_unlock(cpuctx, task_ctx); > > > > return ret; > > > We can change this to a return 0; and get rid of the "ret" variable. This and the previous one, done!
Hi Peter, On Mon, Jun 12, 2023 at 2:39 AM Peter Zijlstra <peterz@infradead.org> wrote: > > Use guards to reduce gotos and simplify control flow. > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > --- > kernel/events/core.c | 39 ++++++++++++++++++++++++++------------- > 1 file changed, 26 insertions(+), 13 deletions(-) > > --- a/kernel/events/core.c > +++ b/kernel/events/core.c > @@ -214,6 +214,25 @@ struct event_function_struct { > void *data; > }; > > +typedef struct { > + struct perf_cpu_context *cpuctx; > + struct perf_event_context *ctx; > +} class_perf_ctx_lock_t; > + > +static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T) > +{ > + if (_T->cpuctx) > + perf_ctx_unlock(_T->cpuctx, _T->ctx); Shouldn't it be called unconditionally? Thanks, Namhyung > +} > + > +static inline class_perf_ctx_lock_t > +class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx, > + struct perf_event_context *ctx) > +{ > + perf_ctx_lock(cpuctx, ctx); > + return (class_perf_ctx_lock_t){ cpuctx, ctx }; > +} > + > static int event_function(void *info) > { > struct event_function_struct *efs = info; > @@ -224,17 +243,15 @@ static int event_function(void *info) > int ret = 0; > > lockdep_assert_irqs_disabled(); > + guard(perf_ctx_lock)(cpuctx, task_ctx); > > - perf_ctx_lock(cpuctx, task_ctx); > /* > * Since we do the IPI call without holding ctx->lock things can have > * changed, double check we hit the task we set out to hit. > */ > if (ctx->task) { > - if (ctx->task != current) { > - ret = -ESRCH; > - goto unlock; > - } > + if (ctx->task != current) > + return -ESRCH; > > /* > * We only use event_function_call() on established contexts, > @@ -254,8 +271,6 @@ static int event_function(void *info) > } > > efs->func(event, cpuctx, ctx, efs->data); > -unlock: > - perf_ctx_unlock(cpuctx, task_ctx); > > return ret; > } > @@ -329,11 +344,11 @@ static void event_function_local(struct > task_ctx = ctx; > } > > - perf_ctx_lock(cpuctx, task_ctx); > + guard(perf_ctx_lock)(cpuctx, task_ctx); > > task = ctx->task; > if (task == TASK_TOMBSTONE) > - goto unlock; > + return; > > if (task) { > /* > @@ -343,18 +358,16 @@ static void event_function_local(struct > */ > if (ctx->is_active) { > if (WARN_ON_ONCE(task != current)) > - goto unlock; > + return; > > if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) > - goto unlock; > + return; > } > } else { > WARN_ON_ONCE(&cpuctx->ctx != ctx); > } > > func(event, cpuctx, ctx, data); > -unlock: > - perf_ctx_unlock(cpuctx, task_ctx); > } > > #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ > >
On Mon, Jun 12, 2023 at 10:56:06PM -0700, Namhyung Kim wrote: > Hi Peter, > > On Mon, Jun 12, 2023 at 2:39 AM Peter Zijlstra <peterz@infradead.org> wrote: > > > > Use guards to reduce gotos and simplify control flow. > > > > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > > --- > > kernel/events/core.c | 39 ++++++++++++++++++++++++++------------- > > 1 file changed, 26 insertions(+), 13 deletions(-) > > > > --- a/kernel/events/core.c > > +++ b/kernel/events/core.c > > @@ -214,6 +214,25 @@ struct event_function_struct { > > void *data; > > }; > > > > +typedef struct { > > + struct perf_cpu_context *cpuctx; > > + struct perf_event_context *ctx; > > +} class_perf_ctx_lock_t; > > + > > +static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T) > > +{ > > + if (_T->cpuctx) > > + perf_ctx_unlock(_T->cpuctx, _T->ctx); > > Shouldn't it be called unconditionally? In all surviving cases it will be, so yeah, I can remove that condition.
--- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -214,6 +214,25 @@ struct event_function_struct { void *data; }; +typedef struct { + struct perf_cpu_context *cpuctx; + struct perf_event_context *ctx; +} class_perf_ctx_lock_t; + +static inline void class_perf_ctx_lock_destructor(class_perf_ctx_lock_t *_T) +{ + if (_T->cpuctx) + perf_ctx_unlock(_T->cpuctx, _T->ctx); +} + +static inline class_perf_ctx_lock_t +class_perf_ctx_lock_constructor(struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + perf_ctx_lock(cpuctx, ctx); + return (class_perf_ctx_lock_t){ cpuctx, ctx }; +} + static int event_function(void *info) { struct event_function_struct *efs = info; @@ -224,17 +243,15 @@ static int event_function(void *info) int ret = 0; lockdep_assert_irqs_disabled(); + guard(perf_ctx_lock)(cpuctx, task_ctx); - perf_ctx_lock(cpuctx, task_ctx); /* * Since we do the IPI call without holding ctx->lock things can have * changed, double check we hit the task we set out to hit. */ if (ctx->task) { - if (ctx->task != current) { - ret = -ESRCH; - goto unlock; - } + if (ctx->task != current) + return -ESRCH; /* * We only use event_function_call() on established contexts, @@ -254,8 +271,6 @@ static int event_function(void *info) } efs->func(event, cpuctx, ctx, efs->data); -unlock: - perf_ctx_unlock(cpuctx, task_ctx); return ret; } @@ -329,11 +344,11 @@ static void event_function_local(struct task_ctx = ctx; } - perf_ctx_lock(cpuctx, task_ctx); + guard(perf_ctx_lock)(cpuctx, task_ctx); task = ctx->task; if (task == TASK_TOMBSTONE) - goto unlock; + return; if (task) { /* @@ -343,18 +358,16 @@ static void event_function_local(struct */ if (ctx->is_active) { if (WARN_ON_ONCE(task != current)) - goto unlock; + return; if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) - goto unlock; + return; } } else { WARN_ON_ONCE(&cpuctx->ctx != ctx); } func(event, cpuctx, ctx, data); -unlock: - perf_ctx_unlock(cpuctx, task_ctx); } #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
Use guards to reduce gotos and simplify control flow. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- kernel/events/core.c | 39 ++++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-)