Message ID | 20250213033556.9534-4-alexei.starovoitov@gmail.com (mailing list archive) |
---|---|
State | Changes Requested |
Delegated to: | BPF |
Headers | show |
Series | bpf, mm: Introduce try_alloc_pages() | expand |
On 2/13/25 04:35, Alexei Starovoitov wrote: > From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > > In !PREEMPT_RT local_lock_irqsave() disables interrupts to protect > critical section, but it doesn't prevent NMI, so the fully reentrant > code cannot use local_lock_irqsave() for exclusive access. > > Introduce localtry_lock_t and localtry_lock_irqsave() that > disables interrupts and sets acquired=1, so localtry_lock_irqsave() > from NMI attempting to acquire the same lock will return false. > > In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock(). > Map localtry_lock_irqsave() to preemptible spin_trylock(). > When in hard IRQ or NMI return false right away, since > spin_trylock() is not safe due to PI issues. > > Note there is no need to use local_inc for acquired variable, > since it's a percpu variable with strict nesting scopes. > > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > Signed-off-by: Alexei Starovoitov <ast@kernel.org> > --- > include/linux/local_lock.h | 59 +++++++++++++ > include/linux/local_lock_internal.h | 123 ++++++++++++++++++++++++++++ > 2 files changed, 182 insertions(+) > > diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h > index 091dc0b6bdfb..05c254a5d7d3 100644 > --- a/include/linux/local_lock.h > +++ b/include/linux/local_lock.h > @@ -51,6 +51,65 @@ > #define local_unlock_irqrestore(lock, flags) \ > __local_unlock_irqrestore(lock, flags) > > +/** > + * localtry_lock_init - Runtime initialize a lock instance > + */ > +#define localtry_lock_init(lock) __localtry_lock_init(lock) > + > +/** > + * localtry_lock - Acquire a per CPU local lock > + * @lock: The lock variable > + */ > +#define localtry_lock(lock) __localtry_lock(lock) > + > +/** > + * localtry_lock_irq - Acquire a per CPU local lock and disable interrupts > + * @lock: The lock variable > + */ > +#define localtry_lock_irq(lock) __localtry_lock_irq(lock) > + > +/** > + * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable > + * interrupts > + * @lock: The lock variable > + * @flags: Storage for interrupt flags > + */ > +#define localtry_lock_irqsave(lock, flags) \ > + __localtry_lock_irqsave(lock, flags) > + > +/** > + * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable > + * interrupts if acquired > + * @lock: The lock variable > + * @flags: Storage for interrupt flags > + * > + * The function can be used in any context such as NMI or HARDIRQ. Due to > + * locking constrains it will _always_ fail to acquire the lock on PREEMPT_RT. The "always fail" applies only to the NMI and HARDIRQ contexts, right? It's not entirely obvious so it sounds worse than it is. > + > +#define __localtry_trylock_irqsave(lock, flags) \ > + ({ \ > + int __locked; \ > + \ > + typecheck(unsigned long, flags); \ > + flags = 0; \ > + if (in_nmi() | in_hardirq()) { \ > + __locked = 0; \ Because of this, IIUC? > + } else { \ > + migrate_disable(); \ > + __locked = spin_trylock(this_cpu_ptr((lock))); \ > + if (!__locked) \ > + migrate_enable(); \ > + } \ > + __locked; \ > + }) > + > #endif /* CONFIG_PREEMPT_RT */
On Thu, Feb 13, 2025 at 7:04 AM Vlastimil Babka <vbabka@suse.cz> wrote: > > On 2/13/25 04:35, Alexei Starovoitov wrote: > > From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > > > > In !PREEMPT_RT local_lock_irqsave() disables interrupts to protect > > critical section, but it doesn't prevent NMI, so the fully reentrant > > code cannot use local_lock_irqsave() for exclusive access. > > > > Introduce localtry_lock_t and localtry_lock_irqsave() that > > disables interrupts and sets acquired=1, so localtry_lock_irqsave() > > from NMI attempting to acquire the same lock will return false. > > > > In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock(). > > Map localtry_lock_irqsave() to preemptible spin_trylock(). > > When in hard IRQ or NMI return false right away, since > > spin_trylock() is not safe due to PI issues. > > > > Note there is no need to use local_inc for acquired variable, > > since it's a percpu variable with strict nesting scopes. > > > > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > > Signed-off-by: Alexei Starovoitov <ast@kernel.org> > > --- > > include/linux/local_lock.h | 59 +++++++++++++ > > include/linux/local_lock_internal.h | 123 ++++++++++++++++++++++++++++ > > 2 files changed, 182 insertions(+) > > > > diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h > > index 091dc0b6bdfb..05c254a5d7d3 100644 > > --- a/include/linux/local_lock.h > > +++ b/include/linux/local_lock.h > > @@ -51,6 +51,65 @@ > > #define local_unlock_irqrestore(lock, flags) \ > > __local_unlock_irqrestore(lock, flags) > > > > +/** > > + * localtry_lock_init - Runtime initialize a lock instance > > + */ > > +#define localtry_lock_init(lock) __localtry_lock_init(lock) > > + > > +/** > > + * localtry_lock - Acquire a per CPU local lock > > + * @lock: The lock variable > > + */ > > +#define localtry_lock(lock) __localtry_lock(lock) > > + > > +/** > > + * localtry_lock_irq - Acquire a per CPU local lock and disable interrupts > > + * @lock: The lock variable > > + */ > > +#define localtry_lock_irq(lock) __localtry_lock_irq(lock) > > + > > +/** > > + * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable > > + * interrupts > > + * @lock: The lock variable > > + * @flags: Storage for interrupt flags > > + */ > > +#define localtry_lock_irqsave(lock, flags) \ > > + __localtry_lock_irqsave(lock, flags) > > + > > +/** > > + * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable > > + * interrupts if acquired > > + * @lock: The lock variable > > + * @flags: Storage for interrupt flags > > + * > > + * The function can be used in any context such as NMI or HARDIRQ. Due to > > + * locking constrains it will _always_ fail to acquire the lock on PREEMPT_RT. > > The "always fail" applies only to the NMI and HARDIRQ contexts, right? It's > not entirely obvious so it sounds worse than it is. > > > + > > +#define __localtry_trylock_irqsave(lock, flags) \ > > + ({ \ > > + int __locked; \ > > + \ > > + typecheck(unsigned long, flags); \ > > + flags = 0; \ > > + if (in_nmi() | in_hardirq()) { \ > > + __locked = 0; \ > > Because of this, IIUC? Right. It's part of commit log: + In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock(). + Map localtry_lock_irqsave() to preemptible spin_trylock(). + When in hard IRQ or NMI return false right away, since + spin_trylock() is not safe due to PI issues. Steven explained it in detail in some earlier thread. realtime is hard. bpf and realtime together are even harder. Things got much better over the years, but plenty of work ahead. I can go in detail, but offtopic for this thread.
On Thu, 13 Feb 2025 07:23:01 -0800
Alexei Starovoitov <alexei.starovoitov@gmail.com> wrote:
> realtime is hard. bpf and realtime together are even harder.
Going for an LWN Quote-of-the-week ? ;-)
-- Steve
On 2/13/25 04:35, Alexei Starovoitov wrote: > From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > > In !PREEMPT_RT local_lock_irqsave() disables interrupts to protect > critical section, but it doesn't prevent NMI, so the fully reentrant > code cannot use local_lock_irqsave() for exclusive access. > > Introduce localtry_lock_t and localtry_lock_irqsave() that > disables interrupts and sets acquired=1, so localtry_lock_irqsave() > from NMI attempting to acquire the same lock will return false. > > In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock(). > Map localtry_lock_irqsave() to preemptible spin_trylock(). > When in hard IRQ or NMI return false right away, since > spin_trylock() is not safe due to PI issues. > > Note there is no need to use local_inc for acquired variable, > since it's a percpu variable with strict nesting scopes. > > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > Signed-off-by: Alexei Starovoitov <ast@kernel.org> ... > > +/* localtry_lock_t variants */ > + > +#define __localtry_lock_init(lock) \ > +do { \ > + __local_lock_init(&(lock)->llock); \ > + WRITE_ONCE(&(lock)->acquired, 0); \ This needs to be WRITE_ONCE((lock)->acquired, 0); I'm adopting this implementation for my next slab sheaves RFC. But I'll want localtry_trylock() without _irqsave too, so I've added it locally. Posting below with the init fix and making the PREEMPT_RT comment clear. Feel free to fold everything, it would make it easier for me. Or just the fixes, if you don't want code you don't use yourself. ----8<---- From c4f47afa3d06367d8d54662d6c3a76d3ab6e349d Mon Sep 17 00:00:00 2001 From: Vlastimil Babka <vbabka@suse.cz> Date: Thu, 13 Feb 2025 19:38:31 +0100 Subject: [PATCH] locking/local_lock: add localtry_trylock() Add a localtry_trylock() variant without _irqsave that will be used in slab sheaves implementation. Thanks to only disabling preemption and not irqs, it has a lower overhead. It's not necessary to disable irqs to avoid a deadlock if the irq context uses trylock and can handle failures. Also make the comment of localtry_trylock_irqsave() more clear, and fix a compilation failure in localtry_lock_init(). Signed-off-by: Vlastimil Babka <vbabka@suse.cz> --- include/linux/local_lock.h | 13 +++++++++++- include/linux/local_lock_internal.h | 31 +++++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 5 deletions(-) diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index 05c254a5d7d3..1a0bc35839e3 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -77,6 +77,16 @@ #define localtry_lock_irqsave(lock, flags) \ __localtry_lock_irqsave(lock, flags) +/** + * localtry_trylock - Try to acquire a per CPU local lock. + * @lock: The lock variable + * + * The function can be used in any context such as NMI or HARDIRQ. Due to + * locking constrains it will _always_ fail to acquire the lock in NMI or + * HARDIRQ context on PREEMPT_RT. + */ +#define localtry_trylock(lock) __localtry_trylock(lock) + /** * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable * interrupts if acquired @@ -84,7 +94,8 @@ * @flags: Storage for interrupt flags * * The function can be used in any context such as NMI or HARDIRQ. Due to - * locking constrains it will _always_ fail to acquire the lock on PREEMPT_RT. + * locking constrains it will _always_ fail to acquire the lock in NMI or + * HARDIRQ context on PREEMPT_RT. */ #define localtry_trylock_irqsave(lock, flags) \ __localtry_trylock_irqsave(lock, flags) diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index c1369b300777..67bd13d142fa 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -137,7 +137,7 @@ do { \ #define __localtry_lock_init(lock) \ do { \ __local_lock_init(&(lock)->llock); \ - WRITE_ONCE(&(lock)->acquired, 0); \ + WRITE_ONCE((lock)->acquired, 0); \ } while (0) #define __localtry_lock(lock) \ @@ -167,6 +167,24 @@ do { \ WRITE_ONCE(lt->acquired, 1); \ } while (0) +#define __localtry_trylock(lock) \ + ({ \ + localtry_lock_t *lt; \ + bool _ret; \ + \ + preempt_disable(); \ + lt = this_cpu_ptr(lock); \ + if (!READ_ONCE(lt->acquired)) { \ + WRITE_ONCE(lt->acquired, 1); \ + local_trylock_acquire(<->llock); \ + _ret = true; \ + } else { \ + _ret = false; \ + preempt_enable(); \ + } \ + _ret; \ + }) + #define __localtry_trylock_irqsave(lock, flags) \ ({ \ localtry_lock_t *lt; \ @@ -275,12 +293,10 @@ do { \ #define __localtry_unlock_irq(lock) __local_unlock(lock) #define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags) -#define __localtry_trylock_irqsave(lock, flags) \ +#define __localtry_trylock(lock) \ ({ \ int __locked; \ \ - typecheck(unsigned long, flags); \ - flags = 0; \ if (in_nmi() | in_hardirq()) { \ __locked = 0; \ } else { \ @@ -292,4 +308,11 @@ do { \ __locked; \ }) +#define __localtry_trylock_irqsave(lock, flags) \ + ({ \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __localtry_trylock(lock); \ + }) + #endif /* CONFIG_PREEMPT_RT */
On 2/13/25 16:23, Alexei Starovoitov wrote: > On Thu, Feb 13, 2025 at 7:04 AM Vlastimil Babka <vbabka@suse.cz> wrote: >> >> >> > + >> > +#define __localtry_trylock_irqsave(lock, flags) \ >> > + ({ \ >> > + int __locked; \ >> > + \ >> > + typecheck(unsigned long, flags); \ >> > + flags = 0; \ >> > + if (in_nmi() | in_hardirq()) { \ >> > + __locked = 0; \ >> >> Because of this, IIUC? > > Right. > It's part of commit log: > + In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock(). > + Map localtry_lock_irqsave() to preemptible spin_trylock(). > + When in hard IRQ or NMI return false right away, since > + spin_trylock() is not safe due to PI issues. > > Steven explained it in detail in some earlier thread. > > realtime is hard. bpf and realtime together are even harder. > Things got much better over the years, but plenty of work ahead. > I can go in detail, but offtopic for this thread. Thanks, it's fine. Just that the comment of the function could be more clear then, so people don't have to check implementation/commit log/lore discussions :)
On Fri, Feb 14, 2025 at 4:11 AM Vlastimil Babka <vbabka@suse.cz> wrote: > > On 2/13/25 04:35, Alexei Starovoitov wrote: > > From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > > > > In !PREEMPT_RT local_lock_irqsave() disables interrupts to protect > > critical section, but it doesn't prevent NMI, so the fully reentrant > > code cannot use local_lock_irqsave() for exclusive access. > > > > Introduce localtry_lock_t and localtry_lock_irqsave() that > > disables interrupts and sets acquired=1, so localtry_lock_irqsave() > > from NMI attempting to acquire the same lock will return false. > > > > In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock(). > > Map localtry_lock_irqsave() to preemptible spin_trylock(). > > When in hard IRQ or NMI return false right away, since > > spin_trylock() is not safe due to PI issues. > > > > Note there is no need to use local_inc for acquired variable, > > since it's a percpu variable with strict nesting scopes. > > > > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > > Signed-off-by: Alexei Starovoitov <ast@kernel.org> > > ... > > > > > +/* localtry_lock_t variants */ > > + > > +#define __localtry_lock_init(lock) \ > > +do { \ > > + __local_lock_init(&(lock)->llock); \ > > + WRITE_ONCE(&(lock)->acquired, 0); \ > > This needs to be WRITE_ONCE((lock)->acquired, 0); Thanks. Good catch. > I'm adopting this implementation for my next slab sheaves RFC. But I'll want > localtry_trylock() without _irqsave too, so I've added it locally. Posting > below with the init fix and making the PREEMPT_RT comment clear. Feel free > to fold everything, it would make it easier for me. Or just the fixes, if > you don't want code you don't use yourself. +1 > ----8<---- > From c4f47afa3d06367d8d54662d6c3a76d3ab6e349d Mon Sep 17 00:00:00 2001 > From: Vlastimil Babka <vbabka@suse.cz> > Date: Thu, 13 Feb 2025 19:38:31 +0100 > Subject: [PATCH] locking/local_lock: add localtry_trylock() > > Add a localtry_trylock() variant without _irqsave that will be used in > slab sheaves implementation. Thanks to only disabling preemption and not > irqs, it has a lower overhead. It's not necessary to disable irqs to > avoid a deadlock if the irq context uses trylock and can handle > failures. > > Also make the comment of localtry_trylock_irqsave() more clear, and fix a > compilation failure in localtry_lock_init(). > > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > --- > include/linux/local_lock.h | 13 +++++++++++- > include/linux/local_lock_internal.h | 31 +++++++++++++++++++++++++---- > 2 files changed, 39 insertions(+), 5 deletions(-) > > diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h > index 05c254a5d7d3..1a0bc35839e3 100644 > --- a/include/linux/local_lock.h > +++ b/include/linux/local_lock.h > @@ -77,6 +77,16 @@ > #define localtry_lock_irqsave(lock, flags) \ > __localtry_lock_irqsave(lock, flags) > > +/** > + * localtry_trylock - Try to acquire a per CPU local lock. > + * @lock: The lock variable > + * > + * The function can be used in any context such as NMI or HARDIRQ. Due to > + * locking constrains it will _always_ fail to acquire the lock in NMI or > + * HARDIRQ context on PREEMPT_RT. > + */ > +#define localtry_trylock(lock) __localtry_trylock(lock) > + > /** > * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable > * interrupts if acquired > @@ -84,7 +94,8 @@ > * @flags: Storage for interrupt flags > * > * The function can be used in any context such as NMI or HARDIRQ. Due to > - * locking constrains it will _always_ fail to acquire the lock on PREEMPT_RT. > + * locking constrains it will _always_ fail to acquire the lock in NMI or > + * HARDIRQ context on PREEMPT_RT. +1 as well. > */ > #define localtry_trylock_irqsave(lock, flags) \ > __localtry_trylock_irqsave(lock, flags) > diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h > index c1369b300777..67bd13d142fa 100644 > --- a/include/linux/local_lock_internal.h > +++ b/include/linux/local_lock_internal.h > @@ -137,7 +137,7 @@ do { \ > #define __localtry_lock_init(lock) \ > do { \ > __local_lock_init(&(lock)->llock); \ > - WRITE_ONCE(&(lock)->acquired, 0); \ > + WRITE_ONCE((lock)->acquired, 0); \ > } while (0) > > #define __localtry_lock(lock) \ > @@ -167,6 +167,24 @@ do { \ > WRITE_ONCE(lt->acquired, 1); \ > } while (0) > > +#define __localtry_trylock(lock) \ > + ({ \ > + localtry_lock_t *lt; \ > + bool _ret; \ > + \ > + preempt_disable(); \ > + lt = this_cpu_ptr(lock); \ > + if (!READ_ONCE(lt->acquired)) { \ > + WRITE_ONCE(lt->acquired, 1); \ > + local_trylock_acquire(<->llock); \ > + _ret = true; \ > + } else { \ > + _ret = false; \ > + preempt_enable(); \ > + } \ > + _ret; \ > + }) > + > #define __localtry_trylock_irqsave(lock, flags) \ > ({ \ > localtry_lock_t *lt; \ > @@ -275,12 +293,10 @@ do { \ > #define __localtry_unlock_irq(lock) __local_unlock(lock) > #define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags) > > -#define __localtry_trylock_irqsave(lock, flags) \ > +#define __localtry_trylock(lock) \ > ({ \ > int __locked; \ > \ > - typecheck(unsigned long, flags); \ > - flags = 0; \ > if (in_nmi() | in_hardirq()) { \ > __locked = 0; \ > } else { \ > @@ -292,4 +308,11 @@ do { \ > __locked; \ > }) > > +#define __localtry_trylock_irqsave(lock, flags) \ > + ({ \ > + typecheck(unsigned long, flags); \ > + flags = 0; \ > + __localtry_trylock(lock); \ > + }) > + All makes sense to me. Since respin is needed, I can fold the above fix/feature and push it into a branch with stable sha-s that we both can use as a base ? Or you can push just this one patch into a stable branch and I can pull it and apply the rest on top.
On 2/14/25 19:32, Alexei Starovoitov wrote: >> ({ \ >> localtry_lock_t *lt; \ >> @@ -275,12 +293,10 @@ do { \ >> #define __localtry_unlock_irq(lock) __local_unlock(lock) >> #define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags) >> >> -#define __localtry_trylock_irqsave(lock, flags) \ >> +#define __localtry_trylock(lock) \ >> ({ \ >> int __locked; \ >> \ >> - typecheck(unsigned long, flags); \ >> - flags = 0; \ >> if (in_nmi() | in_hardirq()) { \ >> __locked = 0; \ >> } else { \ >> @@ -292,4 +308,11 @@ do { \ >> __locked; \ >> }) >> >> +#define __localtry_trylock_irqsave(lock, flags) \ >> + ({ \ >> + typecheck(unsigned long, flags); \ >> + flags = 0; \ >> + __localtry_trylock(lock); \ >> + }) >> + > > All makes sense to me. > > Since respin is needed, I can fold the above fix/feature and > push it into a branch with stable sha-s that we both can > use as a base ? I doubt sheaves will be included in 6.15 so it's fine enough for me if you fold this and perhaps order the result as patch 1? > Or you can push just this one patch into a stable branch and I can pull it > and apply the rest on top. Ideally we'd have PeterZ blessing before we get to stable commit id's... Thanks.
On 2025-02-14 19:48:57 [+0100], Vlastimil Babka wrote: > > Since respin is needed, I can fold the above fix/feature and > > push it into a branch with stable sha-s that we both can > > use as a base ? > > I doubt sheaves will be included in 6.15 so it's fine enough for me if you > fold this and perhaps order the result as patch 1? > > > Or you can push just this one patch into a stable branch and I can pull it > > and apply the rest on top. > > Ideally we'd have PeterZ blessing before we get to stable commit id's... Yes. As noted in the other thread, I'm all fine with the propose changes. > Thanks. Sebastian
On 2/13/25 04:35, Alexei Starovoitov wrote: > From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > > In !PREEMPT_RT local_lock_irqsave() disables interrupts to protect > critical section, but it doesn't prevent NMI, so the fully reentrant > code cannot use local_lock_irqsave() for exclusive access. > > Introduce localtry_lock_t and localtry_lock_irqsave() that > disables interrupts and sets acquired=1, so localtry_lock_irqsave() > from NMI attempting to acquire the same lock will return false. > > In PREEMPT_RT local_lock_irqsave() maps to preemptible spin_lock(). > Map localtry_lock_irqsave() to preemptible spin_trylock(). > When in hard IRQ or NMI return false right away, since > spin_trylock() is not safe due to PI issues. > > Note there is no need to use local_inc for acquired variable, > since it's a percpu variable with strict nesting scopes. > > Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> > Signed-off-by: Alexei Starovoitov <ast@kernel.org> I'm not the maintainer of this area, but with the fixes/addition I proposed, and having use for this lock variant myself, I think it's fair to add, fwiw: Acked-by: Vlastimil Babka <vbabka@suse.cz> Thanks.
diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index 091dc0b6bdfb..05c254a5d7d3 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -51,6 +51,65 @@ #define local_unlock_irqrestore(lock, flags) \ __local_unlock_irqrestore(lock, flags) +/** + * localtry_lock_init - Runtime initialize a lock instance + */ +#define localtry_lock_init(lock) __localtry_lock_init(lock) + +/** + * localtry_lock - Acquire a per CPU local lock + * @lock: The lock variable + */ +#define localtry_lock(lock) __localtry_lock(lock) + +/** + * localtry_lock_irq - Acquire a per CPU local lock and disable interrupts + * @lock: The lock variable + */ +#define localtry_lock_irq(lock) __localtry_lock_irq(lock) + +/** + * localtry_lock_irqsave - Acquire a per CPU local lock, save and disable + * interrupts + * @lock: The lock variable + * @flags: Storage for interrupt flags + */ +#define localtry_lock_irqsave(lock, flags) \ + __localtry_lock_irqsave(lock, flags) + +/** + * localtry_trylock_irqsave - Try to acquire a per CPU local lock, save and disable + * interrupts if acquired + * @lock: The lock variable + * @flags: Storage for interrupt flags + * + * The function can be used in any context such as NMI or HARDIRQ. Due to + * locking constrains it will _always_ fail to acquire the lock on PREEMPT_RT. + */ +#define localtry_trylock_irqsave(lock, flags) \ + __localtry_trylock_irqsave(lock, flags) + +/** + * local_unlock - Release a per CPU local lock + * @lock: The lock variable + */ +#define localtry_unlock(lock) __localtry_unlock(lock) + +/** + * local_unlock_irq - Release a per CPU local lock and enable interrupts + * @lock: The lock variable + */ +#define localtry_unlock_irq(lock) __localtry_unlock_irq(lock) + +/** + * localtry_unlock_irqrestore - Release a per CPU local lock and restore + * interrupt flags + * @lock: The lock variable + * @flags: Interrupt flags to restore + */ +#define localtry_unlock_irqrestore(lock, flags) \ + __localtry_unlock_irqrestore(lock, flags) + DEFINE_GUARD(local_lock, local_lock_t __percpu*, local_lock(_T), local_unlock(_T)) diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 8dd71fbbb6d2..c1369b300777 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -15,6 +15,11 @@ typedef struct { #endif } local_lock_t; +typedef struct { + local_lock_t llock; + unsigned int acquired; +} localtry_lock_t; + #ifdef CONFIG_DEBUG_LOCK_ALLOC # define LOCAL_LOCK_DEBUG_INIT(lockname) \ .dep_map = { \ @@ -31,6 +36,13 @@ static inline void local_lock_acquire(local_lock_t *l) l->owner = current; } +static inline void local_trylock_acquire(local_lock_t *l) +{ + lock_map_acquire_try(&l->dep_map); + DEBUG_LOCKS_WARN_ON(l->owner); + l->owner = current; +} + static inline void local_lock_release(local_lock_t *l) { DEBUG_LOCKS_WARN_ON(l->owner != current); @@ -45,11 +57,13 @@ static inline void local_lock_debug_init(local_lock_t *l) #else /* CONFIG_DEBUG_LOCK_ALLOC */ # define LOCAL_LOCK_DEBUG_INIT(lockname) static inline void local_lock_acquire(local_lock_t *l) { } +static inline void local_trylock_acquire(local_lock_t *l) { } static inline void local_lock_release(local_lock_t *l) { } static inline void local_lock_debug_init(local_lock_t *l) { } #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } +#define INIT_LOCALTRY_LOCK(lockname) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }} #define __local_lock_init(lock) \ do { \ @@ -118,6 +132,86 @@ do { \ #define __local_unlock_nested_bh(lock) \ local_lock_release(this_cpu_ptr(lock)) +/* localtry_lock_t variants */ + +#define __localtry_lock_init(lock) \ +do { \ + __local_lock_init(&(lock)->llock); \ + WRITE_ONCE(&(lock)->acquired, 0); \ +} while (0) + +#define __localtry_lock(lock) \ + do { \ + localtry_lock_t *lt; \ + preempt_disable(); \ + lt = this_cpu_ptr(lock); \ + local_lock_acquire(<->llock); \ + WRITE_ONCE(lt->acquired, 1); \ + } while (0) + +#define __localtry_lock_irq(lock) \ + do { \ + localtry_lock_t *lt; \ + local_irq_disable(); \ + lt = this_cpu_ptr(lock); \ + local_lock_acquire(<->llock); \ + WRITE_ONCE(lt->acquired, 1); \ + } while (0) + +#define __localtry_lock_irqsave(lock, flags) \ + do { \ + localtry_lock_t *lt; \ + local_irq_save(flags); \ + lt = this_cpu_ptr(lock); \ + local_lock_acquire(<->llock); \ + WRITE_ONCE(lt->acquired, 1); \ + } while (0) + +#define __localtry_trylock_irqsave(lock, flags) \ + ({ \ + localtry_lock_t *lt; \ + bool _ret; \ + \ + local_irq_save(flags); \ + lt = this_cpu_ptr(lock); \ + if (!READ_ONCE(lt->acquired)) { \ + WRITE_ONCE(lt->acquired, 1); \ + local_trylock_acquire(<->llock); \ + _ret = true; \ + } else { \ + _ret = false; \ + local_irq_restore(flags); \ + } \ + _ret; \ + }) + +#define __localtry_unlock(lock) \ + do { \ + localtry_lock_t *lt; \ + lt = this_cpu_ptr(lock); \ + WRITE_ONCE(lt->acquired, 0); \ + local_lock_release(<->llock); \ + preempt_enable(); \ + } while (0) + +#define __localtry_unlock_irq(lock) \ + do { \ + localtry_lock_t *lt; \ + lt = this_cpu_ptr(lock); \ + WRITE_ONCE(lt->acquired, 0); \ + local_lock_release(<->llock); \ + local_irq_enable(); \ + } while (0) + +#define __localtry_unlock_irqrestore(lock, flags) \ + do { \ + localtry_lock_t *lt; \ + lt = this_cpu_ptr(lock); \ + WRITE_ONCE(lt->acquired, 0); \ + local_lock_release(<->llock); \ + local_irq_restore(flags); \ + } while (0) + #else /* !CONFIG_PREEMPT_RT */ /* @@ -125,8 +219,10 @@ do { \ * critical section while staying preemptible. */ typedef spinlock_t local_lock_t; +typedef spinlock_t localtry_lock_t; #define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) +#define INIT_LOCALTRY_LOCK(lockname) INIT_LOCAL_LOCK(lockname) #define __local_lock_init(l) \ do { \ @@ -169,4 +265,31 @@ do { \ spin_unlock(this_cpu_ptr((lock))); \ } while (0) +/* localtry_lock_t variants */ + +#define __localtry_lock_init(lock) __local_lock_init(lock) +#define __localtry_lock(lock) __local_lock(lock) +#define __localtry_lock_irq(lock) __local_lock(lock) +#define __localtry_lock_irqsave(lock, flags) __local_lock_irqsave(lock, flags) +#define __localtry_unlock(lock) __local_unlock(lock) +#define __localtry_unlock_irq(lock) __local_unlock(lock) +#define __localtry_unlock_irqrestore(lock, flags) __local_unlock_irqrestore(lock, flags) + +#define __localtry_trylock_irqsave(lock, flags) \ + ({ \ + int __locked; \ + \ + typecheck(unsigned long, flags); \ + flags = 0; \ + if (in_nmi() | in_hardirq()) { \ + __locked = 0; \ + } else { \ + migrate_disable(); \ + __locked = spin_trylock(this_cpu_ptr((lock))); \ + if (!__locked) \ + migrate_enable(); \ + } \ + __locked; \ + }) + #endif /* CONFIG_PREEMPT_RT */