diff mbox series

[v3,6/6] pid: drop irq disablement around pidmap_lock

Message ID 20250201163106.28912-7-mjguzik@gmail.com (mailing list archive)
State New
Headers show
Series reduce tasklist_lock hold time on exit and do some | expand

Commit Message

Mateusz Guzik Feb. 1, 2025, 4:31 p.m. UTC
It no longer serves any purpose now that the tasklist_lock ->
pidmap_lock ordering got eliminated.

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
---
 kernel/pid.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

Comments

Oleg Nesterov Feb. 1, 2025, 5:42 p.m. UTC | #1
I'll try to review on Monday, but I see nothing obviously wrong after
a very quick glance. Although I am not sure 4/6 is really useful, but
I won't argue.

However, shouldn't this patch also remove the comment which explains
the possible lock inversion? Above put_pid(),

	/*
	 * Note: disable interrupts while the pidmap_lock is held as an
	 * interrupt might come in and do read_lock(&tasklist_lock).
	 *
	 * If we don't disable interrupts there is a nasty deadlock between
	 * detach_pid()->free_pid() and another cpu that does
	 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
	 * read_lock(&tasklist_lock);
	 *
	 * After we clean up the tasklist_lock and know there are no
	 * irq handlers that take it we can leave the interrupts enabled.
	 * For now it is easier to be safe than to prove it can't happen.
	 */

Oleg.

On 02/01, Mateusz Guzik wrote:
>
> It no longer serves any purpose now that the tasklist_lock ->
> pidmap_lock ordering got eliminated.
> 
> Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
> ---
>  kernel/pid.c | 23 +++++++++++------------
>  1 file changed, 11 insertions(+), 12 deletions(-)
> 
> diff --git a/kernel/pid.c b/kernel/pid.c
> index 73625f28c166..900193de4232 100644
> --- a/kernel/pid.c
> +++ b/kernel/pid.c
> @@ -115,11 +115,10 @@ static void delayed_put_pid(struct rcu_head *rhp)
>  void free_pid(struct pid *pid)
>  {
>  	int i;
> -	unsigned long flags;
>  
>  	lockdep_assert_not_held(&tasklist_lock);
>  
> -	spin_lock_irqsave(&pidmap_lock, flags);
> +	spin_lock(&pidmap_lock);
>  	for (i = 0; i <= pid->level; i++) {
>  		struct upid *upid = pid->numbers + i;
>  		struct pid_namespace *ns = upid->ns;
> @@ -142,7 +141,7 @@ void free_pid(struct pid *pid)
>  		idr_remove(&ns->idr, upid->nr);
>  	}
>  	pidfs_remove_pid(pid);
> -	spin_unlock_irqrestore(&pidmap_lock, flags);
> +	spin_unlock(&pidmap_lock);
>  
>  	call_rcu(&pid->rcu, delayed_put_pid);
>  }
> @@ -210,7 +209,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
>  		}
>  
>  		idr_preload(GFP_KERNEL);
> -		spin_lock_irq(&pidmap_lock);
> +		spin_lock(&pidmap_lock);
>  
>  		if (tid) {
>  			nr = idr_alloc(&tmp->idr, NULL, tid,
> @@ -237,7 +236,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
>  			nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
>  					      pid_max, GFP_ATOMIC);
>  		}
> -		spin_unlock_irq(&pidmap_lock);
> +		spin_unlock(&pidmap_lock);
>  		idr_preload_end();
>  
>  		if (nr < 0) {
> @@ -271,7 +270,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
>  
>  	upid = pid->numbers + ns->level;
>  	idr_preload(GFP_KERNEL);
> -	spin_lock_irq(&pidmap_lock);
> +	spin_lock(&pidmap_lock);
>  	if (!(ns->pid_allocated & PIDNS_ADDING))
>  		goto out_unlock;
>  	pidfs_add_pid(pid);
> @@ -280,18 +279,18 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
>  		idr_replace(&upid->ns->idr, pid, upid->nr);
>  		upid->ns->pid_allocated++;
>  	}
> -	spin_unlock_irq(&pidmap_lock);
> +	spin_unlock(&pidmap_lock);
>  	idr_preload_end();
>  
>  	return pid;
>  
>  out_unlock:
> -	spin_unlock_irq(&pidmap_lock);
> +	spin_unlock(&pidmap_lock);
>  	idr_preload_end();
>  	put_pid_ns(ns);
>  
>  out_free:
> -	spin_lock_irq(&pidmap_lock);
> +	spin_lock(&pidmap_lock);
>  	while (++i <= ns->level) {
>  		upid = pid->numbers + i;
>  		idr_remove(&upid->ns->idr, upid->nr);
> @@ -301,7 +300,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
>  	if (ns->pid_allocated == PIDNS_ADDING)
>  		idr_set_cursor(&ns->idr, 0);
>  
> -	spin_unlock_irq(&pidmap_lock);
> +	spin_unlock(&pidmap_lock);
>  
>  	kmem_cache_free(ns->pid_cachep, pid);
>  	return ERR_PTR(retval);
> @@ -309,9 +308,9 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
>  
>  void disable_pid_allocation(struct pid_namespace *ns)
>  {
> -	spin_lock_irq(&pidmap_lock);
> +	spin_lock(&pidmap_lock);
>  	ns->pid_allocated &= ~PIDNS_ADDING;
> -	spin_unlock_irq(&pidmap_lock);
> +	spin_unlock(&pidmap_lock);
>  }
>  
>  struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
> -- 
> 2.43.0
>
Oleg Nesterov Feb. 1, 2025, 5:45 p.m. UTC | #2
On 02/01, Oleg Nesterov wrote:
>
> However, shouldn't this patch also remove the comment which explains
> the possible lock inversion? Above put_pid(),
>
> 	/*
> 	 * Note: disable interrupts while the pidmap_lock is held as an
> 	 * interrupt might come in and do read_lock(&tasklist_lock).
> 	 *
> 	 * If we don't disable interrupts there is a nasty deadlock between
> 	 * detach_pid()->free_pid() and another cpu that does
> 	 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
> 	 * read_lock(&tasklist_lock);
> 	 *
> 	 * After we clean up the tasklist_lock and know there are no
> 	 * irq handlers that take it we can leave the interrupts enabled.
> 	 * For now it is easier to be safe than to prove it can't happen.
> 	 */

Ah, sorry, please forget, you did it in the previous patch. which probably
needs some more discussion...

Oleg.
David Laight Feb. 1, 2025, 6:19 p.m. UTC | #3
On Sat,  1 Feb 2025 17:31:06 +0100
Mateusz Guzik <mjguzik@gmail.com> wrote:

> It no longer serves any purpose now that the tasklist_lock ->
> pidmap_lock ordering got eliminated.

Not disabling interrupts may make thing worse.
It is a trade off between 'interrupt latency' and 'lock hold time'.

If interrupts are disabled then (clearly) they can get delayed because
the lock is held.
Provided the lock is only held for a short time it probably doesn't matter.
Indeed, unless it is the worst one, it probably doesn't matter at all.
After all spin locks shouldn't really be held for significant periods.

OTOH if the lock doesn't disable interrupts then an interrupt will
increase the length of time a lock is held for.
This can be significant - and I mean upwards of 1ms.
Network interrupts can tale a while - and then the work that is deferred
to 'softint' context happens as well (I don't think a spinlock stops
the softint code).

I've a feeling that unless a spin lock is held for 'far longer than one
should ever be held for' then you really want to disable interrupts.

In this case if you get a network interrupt + softint while the pidmap_lock
is held then all other cpu won't be able to acquire the lock until the
network code finishes.

The same issue makes futex pretty much useless in anything trying to do
audio processing.

	David
Mateusz Guzik Feb. 1, 2025, 6:42 p.m. UTC | #4
On Sat, Feb 1, 2025 at 7:19 PM David Laight
<david.laight.linux@gmail.com> wrote:
>
> On Sat,  1 Feb 2025 17:31:06 +0100
> Mateusz Guzik <mjguzik@gmail.com> wrote:
>
> > It no longer serves any purpose now that the tasklist_lock ->
> > pidmap_lock ordering got eliminated.
>
> Not disabling interrupts may make thing worse.
> It is a trade off between 'interrupt latency' and 'lock hold time'.
>
> If interrupts are disabled then (clearly) they can get delayed because
> the lock is held.
> Provided the lock is only held for a short time it probably doesn't matter.
> Indeed, unless it is the worst one, it probably doesn't matter at all.
> After all spin locks shouldn't really be held for significant periods.
>
> OTOH if the lock doesn't disable interrupts then an interrupt will
> increase the length of time a lock is held for.
> This can be significant - and I mean upwards of 1ms.
> Network interrupts can tale a while - and then the work that is deferred
> to 'softint' context happens as well (I don't think a spinlock stops
> the softint code).
>
> I've a feeling that unless a spin lock is held for 'far longer than one
> should ever be held for' then you really want to disable interrupts.
>

Note that taking the interrupt trip increases single-threaded overhead.

Per your own description, if the lock is contested and interrupts are
disabled, handling them also get delayed by CPUs which are busy just
waiting (and which would otherwise take care of them).

So while this is indeed a tradeoff, as I understand the sane default
is to *not* disable interrupts unless necessary.
David Laight Feb. 1, 2025, 9:51 p.m. UTC | #5
On Sat, 1 Feb 2025 19:42:32 +0100
Mateusz Guzik <mjguzik@gmail.com> wrote:

> On Sat, Feb 1, 2025 at 7:19 PM David Laight
> <david.laight.linux@gmail.com> wrote:
> >
> > On Sat,  1 Feb 2025 17:31:06 +0100
> > Mateusz Guzik <mjguzik@gmail.com> wrote:
> >  
> > > It no longer serves any purpose now that the tasklist_lock ->
> > > pidmap_lock ordering got eliminated.  
> >
> > Not disabling interrupts may make thing worse.
> > It is a trade off between 'interrupt latency' and 'lock hold time'.
> >
> > If interrupts are disabled then (clearly) they can get delayed because
> > the lock is held.
> > Provided the lock is only held for a short time it probably doesn't matter.
> > Indeed, unless it is the worst one, it probably doesn't matter at all.
> > After all spin locks shouldn't really be held for significant periods.
> >
> > OTOH if the lock doesn't disable interrupts then an interrupt will
> > increase the length of time a lock is held for.
> > This can be significant - and I mean upwards of 1ms.
> > Network interrupts can tale a while - and then the work that is deferred
> > to 'softint' context happens as well (I don't think a spinlock stops
> > the softint code).
> >
> > I've a feeling that unless a spin lock is held for 'far longer than one
> > should ever be held for' then you really want to disable interrupts.
> >  
> 
> Note that taking the interrupt trip increases single-threaded overhead.

I'm not sure what you mean.
Disabling interrupts isn't as cheap as it ought to be, but probably isn't
that bad.

> Per your own description, if the lock is contested and interrupts are
> disabled, handling them also get delayed by CPUs which are busy just
> waiting (and which would otherwise take care of them).

The slow path for spin locks ought to have interrupts enabled.
But, in any case, the interrupt is only delayed for the short time the spin
lock is held for.

> So while this is indeed a tradeoff, as I understand the sane default
> is to *not* disable interrupts unless necessary.

I bet to differ.

If an interrupt is taken by a cpu that holds a spin lock then any other
cpu that attempts to acquire the lock spins for the additional time that
the interrupt takes.
If interrupts are disabled then an interrupt is delayed for the time
that the spin lock is held.

The execution time of an interrupt can easily be a lot longer than
most spin locks are held for.
Either because of the work that an ethernet interrupt does (even before
deferring to softint), or because even a single PCIe read (eg to an
Altera Cyclone V fpga) can easily take 1000s of clocks.

Now the execution cost of the interrupt has to happen some time.
But you really don't want multiple cpu spinning waiting for it to finish.

	David
Matthew Wilcox Feb. 1, 2025, 10 p.m. UTC | #6
On Sat, Feb 01, 2025 at 09:51:05PM +0000, David Laight wrote:
> I'm not sure what you mean.
> Disabling interrupts isn't as cheap as it ought to be, but probably isn't
> that bad.

Time it.  You'll see.

> > So while this is indeed a tradeoff, as I understand the sane default
> > is to *not* disable interrupts unless necessary.
> 
> I bet to differ.

You're wrong.  It is utterly standard to take spinlocks without
disabling IRQs.  We do it all over the kernel.  If you think that needs
to change, then make your case, don't throw a driveby review.

And I don't mean by arguing.  Make a change, measure the difference.
David Laight Feb. 2, 2025, 1:55 p.m. UTC | #7
On Sat, 1 Feb 2025 22:00:06 +0000
Matthew Wilcox <willy@infradead.org> wrote:

> On Sat, Feb 01, 2025 at 09:51:05PM +0000, David Laight wrote:
> > I'm not sure what you mean.
> > Disabling interrupts isn't as cheap as it ought to be, but probably isn't
> > that bad.  
> 
> Time it.  You'll see.

The best scheme I've seen is to just increment a per-cpu value.
Let the interrupt happen, notice it isn't allowed and return with
interrupts disabled.
Then re-issue the interrupt when the count is decremented to zero.
Easy with level sensitive interrupts.
But I don't think Linux ever uses that scheme.


> > > So while this is indeed a tradeoff, as I understand the sane default
> > > is to *not* disable interrupts unless necessary.  
> > 
> > I bet to differ.  
> 
> You're wrong.  It is utterly standard to take spinlocks without
> disabling IRQs.  We do it all over the kernel.  If you think that needs
> to change, then make your case, don't throw a driveby review.
> 
> And I don't mean by arguing.  Make a change, measure the difference.

The analysis was done on some userspace code that basically does:
	for (;;) {
		pthread_mutex_enter(lock);
		item = get_head(list);
		if (!item)
			break;
		pthead_mutex_exit(lock);
		process(item);
	}
For the test there were about 10000 items on the list and 30 threads
processing it (that was the target of the tests).
The entire list needs to be processed in 10ms (RTP audio).
There was a bit more code with the mutex held, but only 100 or so
instructions.
Mostly it works fine, some threads get delayed by interrupts (etc) but
the other threads carry on working and all the items get processed.

However sometimes an interrupt happens while the mutex is held.
In that case the other 29 threads get stuck waiting for the mutex.
No progress is made until the interrupt completes and it overruns
the 10ms period.

While this is a userspace test, the same thing will happen with
spin locks in the kernel.

In userspace you can't disable interrupts, but for kernel spinlocks
you can.

The problem is likely to show up as unexpected latency affecting
code with a hot mutex that is only held for short periods while
running a lot of network traffic.
That is also latency that affects all cpu at the same time.
The interrupt itself will always cause latency to one cpu.

Note that I also had to enable RFS, threaded NAPI and move the NAPI
threads to RT priorities to avoid lost packets.
The fix was to replace the linked list with an array and use atomic
increment to get the index of the item to process.

	David
Mateusz Guzik Feb. 2, 2025, 7:34 p.m. UTC | #8
On Sun, Feb 2, 2025 at 2:55 PM David Laight
<david.laight.linux@gmail.com> wrote:
>
> On Sat, 1 Feb 2025 22:00:06 +0000
> Matthew Wilcox <willy@infradead.org> wrote:
>
> > On Sat, Feb 01, 2025 at 09:51:05PM +0000, David Laight wrote:
> > > I'm not sure what you mean.
> > > Disabling interrupts isn't as cheap as it ought to be, but probably isn't
> > > that bad.
> >
> > Time it.  You'll see.
>
> The best scheme I've seen is to just increment a per-cpu value.
> Let the interrupt happen, notice it isn't allowed and return with
> interrupts disabled.
> Then re-issue the interrupt when the count is decremented to zero.
> Easy with level sensitive interrupts.
> But I don't think Linux ever uses that scheme.
>

I presume you are talking about the splhigh/splx set of primivitives
from Unix kernels.

While "entering" is indeed cheap, undoing the work still needs to be
atomic vs interrupts.

I see NetBSD uses local cmpxchg8b on the interrupt level and interrupt
mask, while the rest takes the irq trip.

The NetBSD solution is still going to be visibly slower than not
messing with any of it as spin_unlock on amd64 is merely a store of 0
and cmpxchg even without the lock prefix costs several clocks.

Maybe there is other hackery which could be done, but see below.

>
> > > > So while this is indeed a tradeoff, as I understand the sane default
> > > > is to *not* disable interrupts unless necessary.
> > >
> > > I bet to differ.
> >
> > You're wrong.  It is utterly standard to take spinlocks without
> > disabling IRQs.  We do it all over the kernel.  If you think that needs
> > to change, then make your case, don't throw a driveby review.
> >
> > And I don't mean by arguing.  Make a change, measure the difference.
>
> The analysis was done on some userspace code that basically does:
>         for (;;) {
>                 pthread_mutex_enter(lock);
>                 item = get_head(list);
>                 if (!item)
>                         break;
>                 pthead_mutex_exit(lock);
>                 process(item);
>         }
> For the test there were about 10000 items on the list and 30 threads
> processing it (that was the target of the tests).
> The entire list needs to be processed in 10ms (RTP audio).
> There was a bit more code with the mutex held, but only 100 or so
> instructions.
> Mostly it works fine, some threads get delayed by interrupts (etc) but
> the other threads carry on working and all the items get processed.
>
> However sometimes an interrupt happens while the mutex is held.
> In that case the other 29 threads get stuck waiting for the mutex.
> No progress is made until the interrupt completes and it overruns
> the 10ms period.
>
> While this is a userspace test, the same thing will happen with
> spin locks in the kernel.
>
> In userspace you can't disable interrupts, but for kernel spinlocks
> you can.
>
> The problem is likely to show up as unexpected latency affecting
> code with a hot mutex that is only held for short periods while
> running a lot of network traffic.
> That is also latency that affects all cpu at the same time.
> The interrupt itself will always cause latency to one cpu.
>

Nobody is denying there is potential that lock hold time will get
significantly extended if you get unlucky enough vs interrupts. It is
questioned whether defaulting to irqs off around lock-protected areas
is the right call.

As I noted in my previous e-mail the spin_lock_irq stuff disables
interrupts upfront and does not touch them afterwards even when
waiting for the lock to become free. Patching that up with queued
locks may be non-trivial, if at all possible. Thus contention on
irq-disabled locks *will* add latency to their handling unless this
gets addressed. Note maintaining forward progress guarantee in the
locking mechanism is non-negotiable, so punting to TAS or similar
unfair locks does not cut it.

This is on top of having to solve the overhead problem for taking the
trips (see earlier in the e-mail).

I would argue if the network stuff specifically is known to add
visible latency, then perhaps that's something to investigate.

Anyhow, as Willy said, you are welcome to code this up and demonstrate
it is better overall.
David Laight Feb. 2, 2025, 8:44 p.m. UTC | #9
On Sun, 2 Feb 2025 20:34:48 +0100
Mateusz Guzik <mjguzik@gmail.com> wrote:

> On Sun, Feb 2, 2025 at 2:55 PM David Laight
> <david.laight.linux@gmail.com> wrote:
> >
> > On Sat, 1 Feb 2025 22:00:06 +0000
> > Matthew Wilcox <willy@infradead.org> wrote:
> >  
> > > On Sat, Feb 01, 2025 at 09:51:05PM +0000, David Laight wrote:  
> > > > I'm not sure what you mean.
> > > > Disabling interrupts isn't as cheap as it ought to be, but probably isn't
> > > > that bad.  
> > >
> > > Time it.  You'll see.  
> >
> > The best scheme I've seen is to just increment a per-cpu value.
> > Let the interrupt happen, notice it isn't allowed and return with
> > interrupts disabled.
> > Then re-issue the interrupt when the count is decremented to zero.
> > Easy with level sensitive interrupts.
> > But I don't think Linux ever uses that scheme.
> >  
> 
> I presume you are talking about the splhigh/splx set of primivitives
> from Unix kernels.
> 
> While "entering" is indeed cheap, undoing the work still needs to be
> atomic vs interrupts.
> 
> I see NetBSD uses local cmpxchg8b on the interrupt level and interrupt
> mask, while the rest takes the irq trip.
> 
> The NetBSD solution is still going to be visibly slower than not
> messing with any of it as spin_unlock on amd64 is merely a store of 0
> and cmpxchg even without the lock prefix costs several clocks.
> 
> Maybe there is other hackery which could be done, but see below.

I was thinking it might be possible to merge an 'interrupts disabled' count
with the existing 'pre-emption disabled' count.
IIRC (on x86 at least) this is just a per-cpu variabled accessed from %fs/%gs.
So you add one to disable pre-emption and (say) 1<<16 to disable interrupts.
If an interrupt happens while the count is 'big' the count value is changed
so the last decrement of 1<<16 will set carry (or overflow), and a return
from interrupt is done that leaves interrupts disabled (traditionally easy).
The interrupt enable call just subtracts the 1<<16 and checks for carry (or
overflow), if not set all is fine, it set it needs to call something to
re-issue the interrupt - that is probably the hard bit.

> 
> >  
> > > > > So while this is indeed a tradeoff, as I understand the sane default
> > > > > is to *not* disable interrupts unless necessary.  
> > > >
> > > > I bet to differ.  
> > >
> > > You're wrong.  It is utterly standard to take spinlocks without
> > > disabling IRQs.  We do it all over the kernel.  If you think that needs
> > > to change, then make your case, don't throw a driveby review.
> > >
> > > And I don't mean by arguing.  Make a change, measure the difference.  
> >
> > The analysis was done on some userspace code that basically does:
> >         for (;;) {
> >                 pthread_mutex_enter(lock);
> >                 item = get_head(list);
> >                 if (!item)
> >                         break;
> >                 pthead_mutex_exit(lock);
> >                 process(item);
> >         }
> > For the test there were about 10000 items on the list and 30 threads
> > processing it (that was the target of the tests).
> > The entire list needs to be processed in 10ms (RTP audio).
> > There was a bit more code with the mutex held, but only 100 or so
> > instructions.
> > Mostly it works fine, some threads get delayed by interrupts (etc) but
> > the other threads carry on working and all the items get processed.
> >
> > However sometimes an interrupt happens while the mutex is held.
> > In that case the other 29 threads get stuck waiting for the mutex.
> > No progress is made until the interrupt completes and it overruns
> > the 10ms period.
> >
> > While this is a userspace test, the same thing will happen with
> > spin locks in the kernel.
> >
> > In userspace you can't disable interrupts, but for kernel spinlocks
> > you can.
> >
> > The problem is likely to show up as unexpected latency affecting
> > code with a hot mutex that is only held for short periods while
> > running a lot of network traffic.
> > That is also latency that affects all cpu at the same time.
> > The interrupt itself will always cause latency to one cpu.
> >  
> 
> Nobody is denying there is potential that lock hold time will get
> significantly extended if you get unlucky enough vs interrupts. It is
> questioned whether defaulting to irqs off around lock-protected areas
> is the right call.

I really commented because you were changing one lock which could
easily be 'hot' enough for there to be side effects, without even
a comment about any pitfalls.

	David

> 
> As I noted in my previous e-mail the spin_lock_irq stuff disables
> interrupts upfront and does not touch them afterwards even when
> waiting for the lock to become free. Patching that up with queued
> locks may be non-trivial, if at all possible. Thus contention on
> irq-disabled locks *will* add latency to their handling unless this
> gets addressed. Note maintaining forward progress guarantee in the
> locking mechanism is non-negotiable, so punting to TAS or similar
> unfair locks does not cut it.
> 
> This is on top of having to solve the overhead problem for taking the
> trips (see earlier in the e-mail).
> 
> I would argue if the network stuff specifically is known to add
> visible latency, then perhaps that's something to investigate.
> 
> Anyhow, as Willy said, you are welcome to code this up and demonstrate
> it is better overall.
>
Matthew Wilcox Feb. 2, 2025, 10:06 p.m. UTC | #10
On Sun, Feb 02, 2025 at 08:44:49PM +0000, David Laight wrote:
> I really commented because you were changing one lock which could
> easily be 'hot' enough for there to be side effects, without even
> a comment about any pitfalls.

No such comment is needed.  This is standard practice.  You're the one
arguing for changing stuff.  Have a different conversation separate from
this patch.
diff mbox series

Patch

diff --git a/kernel/pid.c b/kernel/pid.c
index 73625f28c166..900193de4232 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -115,11 +115,10 @@  static void delayed_put_pid(struct rcu_head *rhp)
 void free_pid(struct pid *pid)
 {
 	int i;
-	unsigned long flags;
 
 	lockdep_assert_not_held(&tasklist_lock);
 
-	spin_lock_irqsave(&pidmap_lock, flags);
+	spin_lock(&pidmap_lock);
 	for (i = 0; i <= pid->level; i++) {
 		struct upid *upid = pid->numbers + i;
 		struct pid_namespace *ns = upid->ns;
@@ -142,7 +141,7 @@  void free_pid(struct pid *pid)
 		idr_remove(&ns->idr, upid->nr);
 	}
 	pidfs_remove_pid(pid);
-	spin_unlock_irqrestore(&pidmap_lock, flags);
+	spin_unlock(&pidmap_lock);
 
 	call_rcu(&pid->rcu, delayed_put_pid);
 }
@@ -210,7 +209,7 @@  struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 		}
 
 		idr_preload(GFP_KERNEL);
-		spin_lock_irq(&pidmap_lock);
+		spin_lock(&pidmap_lock);
 
 		if (tid) {
 			nr = idr_alloc(&tmp->idr, NULL, tid,
@@ -237,7 +236,7 @@  struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 			nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
 					      pid_max, GFP_ATOMIC);
 		}
-		spin_unlock_irq(&pidmap_lock);
+		spin_unlock(&pidmap_lock);
 		idr_preload_end();
 
 		if (nr < 0) {
@@ -271,7 +270,7 @@  struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 
 	upid = pid->numbers + ns->level;
 	idr_preload(GFP_KERNEL);
-	spin_lock_irq(&pidmap_lock);
+	spin_lock(&pidmap_lock);
 	if (!(ns->pid_allocated & PIDNS_ADDING))
 		goto out_unlock;
 	pidfs_add_pid(pid);
@@ -280,18 +279,18 @@  struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 		idr_replace(&upid->ns->idr, pid, upid->nr);
 		upid->ns->pid_allocated++;
 	}
-	spin_unlock_irq(&pidmap_lock);
+	spin_unlock(&pidmap_lock);
 	idr_preload_end();
 
 	return pid;
 
 out_unlock:
-	spin_unlock_irq(&pidmap_lock);
+	spin_unlock(&pidmap_lock);
 	idr_preload_end();
 	put_pid_ns(ns);
 
 out_free:
-	spin_lock_irq(&pidmap_lock);
+	spin_lock(&pidmap_lock);
 	while (++i <= ns->level) {
 		upid = pid->numbers + i;
 		idr_remove(&upid->ns->idr, upid->nr);
@@ -301,7 +300,7 @@  struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 	if (ns->pid_allocated == PIDNS_ADDING)
 		idr_set_cursor(&ns->idr, 0);
 
-	spin_unlock_irq(&pidmap_lock);
+	spin_unlock(&pidmap_lock);
 
 	kmem_cache_free(ns->pid_cachep, pid);
 	return ERR_PTR(retval);
@@ -309,9 +308,9 @@  struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 
 void disable_pid_allocation(struct pid_namespace *ns)
 {
-	spin_lock_irq(&pidmap_lock);
+	spin_lock(&pidmap_lock);
 	ns->pid_allocated &= ~PIDNS_ADDING;
-	spin_unlock_irq(&pidmap_lock);
+	spin_unlock(&pidmap_lock);
 }
 
 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)