Message ID | 171952310959.1810550.17003659816794335660.stgit@firesoul (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [V4,1/2] cgroup/rstat: Helper functions for locking expose trylock | expand |
On 6/27/24 17:18, Jesper Dangaard Brouer wrote: > Signed-off-by: Jesper Dangaard Brouer <hawk@kernel.org> > --- > kernel/cgroup/rstat.c | 40 ++++++++++++++++++++++++++++++---------- > 1 file changed, 30 insertions(+), 10 deletions(-) > > diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c > index fb8b49437573..2a42be3a9bb3 100644 > --- a/kernel/cgroup/rstat.c > +++ b/kernel/cgroup/rstat.c > @@ -279,17 +279,30 @@ __bpf_hook_end(); > * value -1 is used when obtaining the main lock else this is the CPU > * number processed last. > */ > -static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop) > +static inline bool __cgroup_rstat_trylock(struct cgroup *cgrp, int cpu_in_loop) > +{ > + bool locked; > + > + locked = spin_trylock_irq(&cgroup_rstat_lock); > + if (!locked) > + trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, !locked); > + > + return locked; > +} > + > +static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop, > + bool check_contention) > __acquires(&cgroup_rstat_lock) > { > - bool contended; > + bool locked = false; > > - contended = !spin_trylock_irq(&cgroup_rstat_lock); > - if (contended) { > - trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended); > + if (check_contention) > + locked = __cgroup_rstat_trylock(cgrp, cpu_in_loop); > + > + if (!locked) > spin_lock_irq(&cgroup_rstat_lock); > - } > - trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended); > + > + trace_cgroup_rstat_locked(cgrp, cpu_in_loop, !locked); > } > > static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop) > @@ -328,7 +341,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) > __cgroup_rstat_unlock(cgrp, cpu); > if (!cond_resched()) > cpu_relax(); > - __cgroup_rstat_lock(cgrp, cpu); > + __cgroup_rstat_lock(cgrp, cpu, true); > } > } > } > @@ -348,9 +361,16 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) > */ > __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp) > { > + bool locked; > + > might_sleep(); > > - __cgroup_rstat_lock(cgrp, -1); > + locked = __cgroup_rstat_trylock(cgrp, -1); > + if (!locked) { > + /* Opportunity to ongoing flush detection */ > + __cgroup_rstat_lock(cgrp, -1, false); > + } > + > cgroup_rstat_flush_locked(cgrp); > __cgroup_rstat_unlock(cgrp, -1); > } > @@ -368,7 +388,7 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp) > __acquires(&cgroup_rstat_lock) > { > might_sleep(); > - __cgroup_rstat_lock(cgrp, -1); > + __cgroup_rstat_lock(cgrp, -1, true); > cgroup_rstat_flush_locked(cgrp); > } > > > Will it be cleaner to add a "bool *flushed" output parameter to __cgroup_rstat_lock() so that the caller can respond differently whether the flushed flag is set or not? In that way, you don't need to expose a separate trylock() API. Also your commit log is empty. Cheers, Longman
On 6/27/24 18:22, Waiman Long wrote: > > On 6/27/24 17:18, Jesper Dangaard Brouer wrote: >> Signed-off-by: Jesper Dangaard Brouer <hawk@kernel.org> >> --- >> kernel/cgroup/rstat.c | 40 ++++++++++++++++++++++++++++++---------- >> 1 file changed, 30 insertions(+), 10 deletions(-) >> >> diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c >> index fb8b49437573..2a42be3a9bb3 100644 >> --- a/kernel/cgroup/rstat.c >> +++ b/kernel/cgroup/rstat.c >> @@ -279,17 +279,30 @@ __bpf_hook_end(); >> * value -1 is used when obtaining the main lock else this is the CPU >> * number processed last. >> */ >> -static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int >> cpu_in_loop) >> +static inline bool __cgroup_rstat_trylock(struct cgroup *cgrp, int >> cpu_in_loop) >> +{ >> + bool locked; >> + >> + locked = spin_trylock_irq(&cgroup_rstat_lock); >> + if (!locked) >> + trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, !locked); >> + >> + return locked; >> +} >> + >> +static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int >> cpu_in_loop, >> + bool check_contention) >> __acquires(&cgroup_rstat_lock) >> { >> - bool contended; >> + bool locked = false; >> - contended = !spin_trylock_irq(&cgroup_rstat_lock); >> - if (contended) { >> - trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, >> contended); >> + if (check_contention) >> + locked = __cgroup_rstat_trylock(cgrp, cpu_in_loop); >> + >> + if (!locked) >> spin_lock_irq(&cgroup_rstat_lock); >> - } >> - trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended); >> + >> + trace_cgroup_rstat_locked(cgrp, cpu_in_loop, !locked); >> } >> static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int >> cpu_in_loop) >> @@ -328,7 +341,7 @@ static void cgroup_rstat_flush_locked(struct >> cgroup *cgrp) >> __cgroup_rstat_unlock(cgrp, cpu); >> if (!cond_resched()) >> cpu_relax(); >> - __cgroup_rstat_lock(cgrp, cpu); >> + __cgroup_rstat_lock(cgrp, cpu, true); >> } >> } >> } >> @@ -348,9 +361,16 @@ static void cgroup_rstat_flush_locked(struct >> cgroup *cgrp) >> */ >> __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp) >> { >> + bool locked; >> + >> might_sleep(); >> - __cgroup_rstat_lock(cgrp, -1); >> + locked = __cgroup_rstat_trylock(cgrp, -1); >> + if (!locked) { >> + /* Opportunity to ongoing flush detection */ >> + __cgroup_rstat_lock(cgrp, -1, false); >> + } >> + >> cgroup_rstat_flush_locked(cgrp); >> __cgroup_rstat_unlock(cgrp, -1); >> } >> @@ -368,7 +388,7 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp) >> __acquires(&cgroup_rstat_lock) >> { >> might_sleep(); >> - __cgroup_rstat_lock(cgrp, -1); >> + __cgroup_rstat_lock(cgrp, -1, true); >> cgroup_rstat_flush_locked(cgrp); >> } >> >> > Will it be cleaner to add a "bool *flushed" output parameter to > __cgroup_rstat_lock() so that the caller can respond differently > whether the flushed flag is set or not? In that way, you don't need to > expose a separate trylock() API. Also your commit log is empty. Looking at the use case in patch 2, I would suggest the following APIs. - bool cgroup_rstat_lock(struct cgroup *cgrp) - bool cgroup_rstat_lock_or_flushed(struct cgroup *cgrp) Both will return a bool indicating a flush has already been done if true. The 2nd function will not take the lock if a flush is ongoing. Both will wait if a flush is ongoing. Cheers, Longman
On 28/06/2024 00.22, Waiman Long wrote: > > On 6/27/24 17:18, Jesper Dangaard Brouer wrote: [...] >> > Will it be cleaner to add a "bool *flushed" output parameter to > __cgroup_rstat_lock() so that the caller can respond differently whether > the flushed flag is set or not? In that way, you don't need to expose a > separate trylock() API. Also your commit log is empty. > Dropping this API in V5 anyway. But I do think it is more natural to follow the 'trylock' API as this is something that people have seen before. --Jesper
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index fb8b49437573..2a42be3a9bb3 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -279,17 +279,30 @@ __bpf_hook_end(); * value -1 is used when obtaining the main lock else this is the CPU * number processed last. */ -static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop) +static inline bool __cgroup_rstat_trylock(struct cgroup *cgrp, int cpu_in_loop) +{ + bool locked; + + locked = spin_trylock_irq(&cgroup_rstat_lock); + if (!locked) + trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, !locked); + + return locked; +} + +static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop, + bool check_contention) __acquires(&cgroup_rstat_lock) { - bool contended; + bool locked = false; - contended = !spin_trylock_irq(&cgroup_rstat_lock); - if (contended) { - trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended); + if (check_contention) + locked = __cgroup_rstat_trylock(cgrp, cpu_in_loop); + + if (!locked) spin_lock_irq(&cgroup_rstat_lock); - } - trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended); + + trace_cgroup_rstat_locked(cgrp, cpu_in_loop, !locked); } static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop) @@ -328,7 +341,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) __cgroup_rstat_unlock(cgrp, cpu); if (!cond_resched()) cpu_relax(); - __cgroup_rstat_lock(cgrp, cpu); + __cgroup_rstat_lock(cgrp, cpu, true); } } } @@ -348,9 +361,16 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) */ __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp) { + bool locked; + might_sleep(); - __cgroup_rstat_lock(cgrp, -1); + locked = __cgroup_rstat_trylock(cgrp, -1); + if (!locked) { + /* Opportunity to ongoing flush detection */ + __cgroup_rstat_lock(cgrp, -1, false); + } + cgroup_rstat_flush_locked(cgrp); __cgroup_rstat_unlock(cgrp, -1); } @@ -368,7 +388,7 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp) __acquires(&cgroup_rstat_lock) { might_sleep(); - __cgroup_rstat_lock(cgrp, -1); + __cgroup_rstat_lock(cgrp, -1, true); cgroup_rstat_flush_locked(cgrp); }
Signed-off-by: Jesper Dangaard Brouer <hawk@kernel.org> --- kernel/cgroup/rstat.c | 40 ++++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 10 deletions(-)