diff mbox series

[v2,5/5] mm/slub: simplify __cmpxchg_double_slab() and slab_[un]lock()

Message ID 20220823170400.26546-6-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series mm/slub: fix validation races and cleanup locking | expand

Commit Message

Vlastimil Babka Aug. 23, 2022, 5:04 p.m. UTC
The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
(through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
preemption and that's sufficient on RT where interrupts are threaded.

That means we no longer need the slab_[un]lock() wrappers, so delete
them and rename the current __slab_[un]lock() to slab_[un]lock().

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: David Rientjes <rientjes@google.com>
---
 mm/slub.c | 39 ++++++++++++---------------------------
 1 file changed, 12 insertions(+), 27 deletions(-)

Comments

Hyeonggon Yoo Aug. 24, 2022, 10:24 a.m. UTC | #1
On Tue, Aug 23, 2022 at 07:04:00PM +0200, Vlastimil Babka wrote:
> The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
> (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
> preemption and that's sufficient on RT where interrupts are threaded.
> 
> That means we no longer need the slab_[un]lock() wrappers, so delete
> them and rename the current __slab_[un]lock() to slab_[un]lock().
>

I'm not familiar with PREEMPT_RT preemption model so not sure I'm following.

1) Does "interrupts are threaded on RT" mean processing _most_ (all handlers
   that did not specified IRQF_NO_THREAD) of interrupts are delayed to irq threads
   and processed later in process context, and the kernel *never* use
   spinlock_t, local_lock_t that does not disable interrupts (and sleep) on RT
   in hardware/software interrupt context?

2) Do we need disabling irq in cmpxchg_double_slab() on RT?

BTW Is there a good documentation/papers on PREEMPT_RT preemption model?
I tried to find but only found Documentation/locking/locktypes.rst :(

Thanks!

> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Acked-by: David Rientjes <rientjes@google.com>
> ---
>  mm/slub.c | 39 ++++++++++++---------------------------
>  1 file changed, 12 insertions(+), 27 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 0444a2ba4f12..bb8c1292d7e8 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -446,7 +446,7 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
>  /*
>   * Per slab locking using the pagelock
>   */
> -static __always_inline void __slab_lock(struct slab *slab)
> +static __always_inline void slab_lock(struct slab *slab)
>  {
>  	struct page *page = slab_page(slab);
>  
> @@ -454,7 +454,7 @@ static __always_inline void __slab_lock(struct slab *slab)
>  	bit_spin_lock(PG_locked, &page->flags);
>  }
>  
> -static __always_inline void __slab_unlock(struct slab *slab)
> +static __always_inline void slab_unlock(struct slab *slab)
>  {
>  	struct page *page = slab_page(slab);
>  
> @@ -462,24 +462,12 @@ static __always_inline void __slab_unlock(struct slab *slab)
>  	__bit_spin_unlock(PG_locked, &page->flags);
>  }
>  
> -static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
> -{
> -	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> -		local_irq_save(*flags);
> -	__slab_lock(slab);
> -}
> -
> -static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
> -{
> -	__slab_unlock(slab);
> -	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> -		local_irq_restore(*flags);
> -}
> -
>  /*
>   * Interrupts must be disabled (for the fallback code to work right), typically
> - * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
> - * so we disable interrupts as part of slab_[un]lock().
> + * by an _irqsave() lock variant. Except on PREEMPT_RT where these variants do
> + * not actually disable interrupts. On the other hand the migrate_disable()
> + * done by bit_spin_lock() is sufficient on PREEMPT_RT thanks to its threaded
> + * interrupts.
>   */
>  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
>  		void *freelist_old, unsigned long counters_old,
> @@ -498,18 +486,15 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
>  	} else
>  #endif
>  	{
> -		/* init to 0 to prevent spurious warnings */
> -		unsigned long flags = 0;
> -
> -		slab_lock(slab, &flags);
> +		slab_lock(slab);
>  		if (slab->freelist == freelist_old &&
>  					slab->counters == counters_old) {
>  			slab->freelist = freelist_new;
>  			slab->counters = counters_new;
> -			slab_unlock(slab, &flags);
> +			slab_unlock(slab);
>  			return true;
>  		}
> -		slab_unlock(slab, &flags);
> +		slab_unlock(slab);
>  	}
>  
>  	cpu_relax();
> @@ -540,16 +525,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
>  		unsigned long flags;
>  
>  		local_irq_save(flags);
> -		__slab_lock(slab);
> +		slab_lock(slab);
>  		if (slab->freelist == freelist_old &&
>  					slab->counters == counters_old) {
>  			slab->freelist = freelist_new;
>  			slab->counters = counters_new;
> -			__slab_unlock(slab);
> +			slab_unlock(slab);
>  			local_irq_restore(flags);
>  			return true;
>  		}
> -		__slab_unlock(slab);
> +		slab_unlock(slab);
>  		local_irq_restore(flags);
>  	}
>  
> -- 
> 2.37.2
>
Vlastimil Babka Aug. 24, 2022, 11:51 a.m. UTC | #2
On 8/24/22 12:24, Hyeonggon Yoo wrote:
> On Tue, Aug 23, 2022 at 07:04:00PM +0200, Vlastimil Babka wrote:
>> The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
>> (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
>> preemption and that's sufficient on RT where interrupts are threaded.
>>
>> That means we no longer need the slab_[un]lock() wrappers, so delete
>> them and rename the current __slab_[un]lock() to slab_[un]lock().
>>
> 
> I'm not familiar with PREEMPT_RT preemption model so not sure I'm following.
> 
> 1) Does "interrupts are threaded on RT" mean processing _most_ (all handlers
>     that did not specified IRQF_NO_THREAD) of interrupts are delayed to irq threads
>     and processed later in process context, and the kernel *never* use
>     spinlock_t, local_lock_t that does not disable interrupts (and sleep) on RT
>     in hardware/software interrupt context?

AFAIK, yes, that's the case. So if some non-threaded handler used slab, 
we would be in trouble. But that would already be the case before this 
patch due to the local_lock usage in other paths - the bit_spin_lock() 
without disabled irq shouldn't add anything new here AFAIK.

> 2) Do we need disabling irq in cmpxchg_double_slab() on RT?

By that logic, we don't. But IMHO it's not worth complicating the code 
by special casing it for some negligible performance gain (the protected 
sections are very short), like we now special case 
__cmpxchg_double_slab() for correctness (after this patch, just the 
correctness of lockdep_assert_irqs_disabled()).

> BTW Is there a good documentation/papers on PREEMPT_RT preemption model?
> I tried to find but only found Documentation/locking/locktypes.rst :(

Good question, I don't know myself, maybe the RT guys do.

> Thanks!
>
Hyeonggon Yoo Aug. 24, 2022, 12:45 p.m. UTC | #3
On Wed, Aug 24, 2022 at 01:51:24PM +0200, Vlastimil Babka wrote:
> On 8/24/22 12:24, Hyeonggon Yoo wrote:
> > On Tue, Aug 23, 2022 at 07:04:00PM +0200, Vlastimil Babka wrote:
> > > The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
> > > (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
> > > preemption and that's sufficient on RT where interrupts are threaded.
> > > 
> > > That means we no longer need the slab_[un]lock() wrappers, so delete
> > > them and rename the current __slab_[un]lock() to slab_[un]lock().
> > > 
> > 
> > I'm not familiar with PREEMPT_RT preemption model so not sure I'm following.
> > 
> > 1) Does "interrupts are threaded on RT" mean processing _most_ (all handlers
> >     that did not specified IRQF_NO_THREAD) of interrupts are delayed to irq threads
> >     and processed later in process context, and the kernel *never* use
> >     spinlock_t, local_lock_t that does not disable interrupts (and sleep) on RT
> >     in hardware/software interrupt context?
> 
> AFAIK, yes, that's the case. So if some non-threaded handler used slab, we
> would be in trouble.

Yeah, that was exactly what I wondered!

> But that would already be the case before this patch
> due to the local_lock usage in other paths - the bit_spin_lock() without
> disabled irq shouldn't add anything new here AFAIK.

Agreed.
 
> > 2) Do we need disabling irq in cmpxchg_double_slab() on RT?
> 
> By that logic, we don't. But IMHO it's not worth complicating the code by
> special casing it for some negligible performance gain (the protected
> sections are very short), like we now special case __cmpxchg_double_slab()
> for correctness (after this patch, just the correctness of
> lockdep_assert_irqs_disabled()).

Okay. Wanted to make sure that disabling interrupts is not required
by RT.

> 
> > BTW Is there a good documentation/papers on PREEMPT_RT preemption model?
> > I tried to find but only found Documentation/locking/locktypes.rst :(
> 
> Good question, I don't know myself, maybe the RT guys do.

Okay.

Thanks!
Hyeonggon Yoo Aug. 24, 2022, 1:04 p.m. UTC | #4
On Tue, Aug 23, 2022 at 07:04:00PM +0200, Vlastimil Babka wrote:
> The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
> (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
> preemption and that's sufficient on RT where interrupts are threaded.
> 
> That means we no longer need the slab_[un]lock() wrappers, so delete
> them and rename the current __slab_[un]lock() to slab_[un]lock().
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Acked-by: David Rientjes <rientjes@google.com>
> ---
>  mm/slub.c | 39 ++++++++++++---------------------------
>  1 file changed, 12 insertions(+), 27 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 0444a2ba4f12..bb8c1292d7e8 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -446,7 +446,7 @@ slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
>  /*
>   * Per slab locking using the pagelock
>   */
> -static __always_inline void __slab_lock(struct slab *slab)
> +static __always_inline void slab_lock(struct slab *slab)
>  {
>  	struct page *page = slab_page(slab);
>  
> @@ -454,7 +454,7 @@ static __always_inline void __slab_lock(struct slab *slab)
>  	bit_spin_lock(PG_locked, &page->flags);
>  }
>  
> -static __always_inline void __slab_unlock(struct slab *slab)
> +static __always_inline void slab_unlock(struct slab *slab)
>  {
>  	struct page *page = slab_page(slab);
>  
> @@ -462,24 +462,12 @@ static __always_inline void __slab_unlock(struct slab *slab)
>  	__bit_spin_unlock(PG_locked, &page->flags);
>  }
>  
> -static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
> -{
> -	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> -		local_irq_save(*flags);
> -	__slab_lock(slab);
> -}
> -
> -static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
> -{
> -	__slab_unlock(slab);
> -	if (IS_ENABLED(CONFIG_PREEMPT_RT))
> -		local_irq_restore(*flags);
> -}
> -
>  /*
>   * Interrupts must be disabled (for the fallback code to work right), typically
> - * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
> - * so we disable interrupts as part of slab_[un]lock().
> + * by an _irqsave() lock variant. Except on PREEMPT_RT where these variants do
> + * not actually disable interrupts. On the other hand the migrate_disable()

You mean preempt_disable()?
migrate_disable() will not be enough.

> + * done by bit_spin_lock() is sufficient on PREEMPT_RT thanks to its threaded
> + * interrupts.
>   */
>  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
>  		void *freelist_old, unsigned long counters_old,
> @@ -498,18 +486,15 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
>  	} else
>  #endif
>  	{
> -		/* init to 0 to prevent spurious warnings */
> -		unsigned long flags = 0;
> -
> -		slab_lock(slab, &flags);
> +		slab_lock(slab);
>  		if (slab->freelist == freelist_old &&
>  					slab->counters == counters_old) {
>  			slab->freelist = freelist_new;
>  			slab->counters = counters_new;
> -			slab_unlock(slab, &flags);
> +			slab_unlock(slab);
>  			return true;
>  		}
> -		slab_unlock(slab, &flags);
> +		slab_unlock(slab);
>  	}
>  
>  	cpu_relax();
> @@ -540,16 +525,16 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
>  		unsigned long flags;
>  
>  		local_irq_save(flags);
> -		__slab_lock(slab);
> +		slab_lock(slab);
>  		if (slab->freelist == freelist_old &&
>  					slab->counters == counters_old) {
>  			slab->freelist = freelist_new;
>  			slab->counters = counters_new;
> -			__slab_unlock(slab);
> +			slab_unlock(slab);
>  			local_irq_restore(flags);
>  			return true;
>  		}
> -		__slab_unlock(slab);
> +		slab_unlock(slab);
>  		local_irq_restore(flags);
>  	}
>  
> -- 
> 2.37.2

Otherwise looks good to me.

Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Sebastian Andrzej Siewior Aug. 24, 2022, 4:25 p.m. UTC | #5
On 2022-08-23 19:04:00 [+0200], Vlastimil Babka wrote:
> The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
> (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
> preemption and that's sufficient on RT where interrupts are threaded.

maybe something like 
"… sufficient on PREEMPT_RT where no allocation/ free operation is
 performed in hardirq context and so interrupt the current operation."

> That means we no longer need the slab_[un]lock() wrappers, so delete
> them and rename the current __slab_[un]lock() to slab_[un]lock().
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> Acked-by: David Rientjes <rientjes@google.com>

Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -454,7 +454,7 @@ static __always_inline void __slab_lock(struct slab *slab)>  /*
>   * Interrupts must be disabled (for the fallback code to work right), typically
> - * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
> - * so we disable interrupts as part of slab_[un]lock().
> + * by an _irqsave() lock variant. Except on PREEMPT_RT where these variants do
> + * not actually disable interrupts. On the other hand the migrate_disable()
> + * done by bit_spin_lock() is sufficient on PREEMPT_RT thanks to its threaded
> + * interrupts.

 "                                     On PREEMPT_RT the
 preempt_disable(), which is part of bit_spin_lock(), is sufficient
 because the policy is not to allow any allocation/ free operation in
 hardirq context. Therefore nothing can interrupt the operation."

This also includes SMP function calls (IPI).

>   */
>  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
>  		void *freelist_old, unsigned long counters_old,

Sebastian
Sebastian Andrzej Siewior Aug. 24, 2022, 4:31 p.m. UTC | #6
On 2022-08-24 19:24:20 [+0900], Hyeonggon Yoo wrote:
> I'm not familiar with PREEMPT_RT preemption model so not sure I'm following.
> 
> 1) Does "interrupts are threaded on RT" mean processing _most_ (all handlers
>    that did not specified IRQF_NO_THREAD) of interrupts are delayed to irq threads
>    and processed later in process context, and the kernel *never* use
>    spinlock_t, local_lock_t that does not disable interrupts (and sleep) on RT
>    in hardware/software interrupt context?

All non-threaded interrupts (or everything in hardirq context) must not
allocate (even with GFP_ATOMIC) or free memory on PREEMPT_RT. This is
policy. If you refer by "software interrupt" to softirqs then they can
allocate memory since softirq is also threaded.

> BTW Is there a good documentation/papers on PREEMPT_RT preemption model?
> I tried to find but only found Documentation/locking/locktypes.rst :(

What is it, that you are looking for? But the locking description is
good ;)

> Thanks!
> 
> > Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> > Acked-by: David Rientjes <rientjes@google.com>

Sebastian
Vlastimil Babka Aug. 25, 2022, 12:41 p.m. UTC | #7
On 8/24/22 15:04, Hyeonggon Yoo wrote:
> On Tue, Aug 23, 2022 at 07:04:00PM +0200, Vlastimil Babka wrote:
>> The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
>> (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
>> preemption and that's sufficient on RT where interrupts are threaded.
>> 
>> That means we no longer need the slab_[un]lock() wrappers, so delete
>> them and rename the current __slab_[un]lock() to slab_[un]lock().
>> 
>> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
>> Acked-by: David Rientjes <rientjes@google.com>
>> -
>>  /*
>>   * Interrupts must be disabled (for the fallback code to work right), typically
>> - * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
>> - * so we disable interrupts as part of slab_[un]lock().
>> + * by an _irqsave() lock variant. Except on PREEMPT_RT where these variants do
>> + * not actually disable interrupts. On the other hand the migrate_disable()
> 
> You mean preempt_disable()?

I did, thanks for catching it.

> migrate_disable() will not be enough.
> 
>> -- 
>> 2.37.2
> 
> Otherwise looks good to me.
> 
> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Thanks!
Vlastimil Babka Aug. 25, 2022, 12:59 p.m. UTC | #8
On 8/24/22 18:25, Sebastian Andrzej Siewior wrote:
> On 2022-08-23 19:04:00 [+0200], Vlastimil Babka wrote:
>> The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
>> (through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
>> preemption and that's sufficient on RT where interrupts are threaded.
> 
> maybe something like 
> "… sufficient on PREEMPT_RT where no allocation/ free operation is
>  performed in hardirq context and so interrupt the current operation."
> 
>> That means we no longer need the slab_[un]lock() wrappers, so delete
>> them and rename the current __slab_[un]lock() to slab_[un]lock().
>> 
>> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
>> Acked-by: David Rientjes <rientjes@google.com>
> 
> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>

Thanks, incorporated your wording suggestions.

>> --- a/mm/slub.c
>> +++ b/mm/slub.c
>> @@ -454,7 +454,7 @@ static __always_inline void __slab_lock(struct slab *slab)
> …
>>  /*
>>   * Interrupts must be disabled (for the fallback code to work right), typically
>> - * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
>> - * so we disable interrupts as part of slab_[un]lock().
>> + * by an _irqsave() lock variant. Except on PREEMPT_RT where these variants do
>> + * not actually disable interrupts. On the other hand the migrate_disable()
>> + * done by bit_spin_lock() is sufficient on PREEMPT_RT thanks to its threaded
>> + * interrupts.
> 
>  "                                     On PREEMPT_RT the
>  preempt_disable(), which is part of bit_spin_lock(), is sufficient
>  because the policy is not to allow any allocation/ free operation in
>  hardirq context. Therefore nothing can interrupt the operation."
> 
> This also includes SMP function calls (IPI).
> 
>>   */
>>  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
>>  		void *freelist_old, unsigned long counters_old,
> 
> Sebastian
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 0444a2ba4f12..bb8c1292d7e8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -446,7 +446,7 @@  slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
 /*
  * Per slab locking using the pagelock
  */
-static __always_inline void __slab_lock(struct slab *slab)
+static __always_inline void slab_lock(struct slab *slab)
 {
 	struct page *page = slab_page(slab);
 
@@ -454,7 +454,7 @@  static __always_inline void __slab_lock(struct slab *slab)
 	bit_spin_lock(PG_locked, &page->flags);
 }
 
-static __always_inline void __slab_unlock(struct slab *slab)
+static __always_inline void slab_unlock(struct slab *slab)
 {
 	struct page *page = slab_page(slab);
 
@@ -462,24 +462,12 @@  static __always_inline void __slab_unlock(struct slab *slab)
 	__bit_spin_unlock(PG_locked, &page->flags);
 }
 
-static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
-{
-	if (IS_ENABLED(CONFIG_PREEMPT_RT))
-		local_irq_save(*flags);
-	__slab_lock(slab);
-}
-
-static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
-{
-	__slab_unlock(slab);
-	if (IS_ENABLED(CONFIG_PREEMPT_RT))
-		local_irq_restore(*flags);
-}
-
 /*
  * Interrupts must be disabled (for the fallback code to work right), typically
- * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
- * so we disable interrupts as part of slab_[un]lock().
+ * by an _irqsave() lock variant. Except on PREEMPT_RT where these variants do
+ * not actually disable interrupts. On the other hand the migrate_disable()
+ * done by bit_spin_lock() is sufficient on PREEMPT_RT thanks to its threaded
+ * interrupts.
  */
 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
 		void *freelist_old, unsigned long counters_old,
@@ -498,18 +486,15 @@  static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab
 	} else
 #endif
 	{
-		/* init to 0 to prevent spurious warnings */
-		unsigned long flags = 0;
-
-		slab_lock(slab, &flags);
+		slab_lock(slab);
 		if (slab->freelist == freelist_old &&
 					slab->counters == counters_old) {
 			slab->freelist = freelist_new;
 			slab->counters = counters_new;
-			slab_unlock(slab, &flags);
+			slab_unlock(slab);
 			return true;
 		}
-		slab_unlock(slab, &flags);
+		slab_unlock(slab);
 	}
 
 	cpu_relax();
@@ -540,16 +525,16 @@  static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
 		unsigned long flags;
 
 		local_irq_save(flags);
-		__slab_lock(slab);
+		slab_lock(slab);
 		if (slab->freelist == freelist_old &&
 					slab->counters == counters_old) {
 			slab->freelist = freelist_new;
 			slab->counters = counters_new;
-			__slab_unlock(slab);
+			slab_unlock(slab);
 			local_irq_restore(flags);
 			return true;
 		}
-		__slab_unlock(slab);
+		slab_unlock(slab);
 		local_irq_restore(flags);
 	}