diff mbox series

mm/slub: remove dead code for debug caches on deactivate_slab()

Message ID 20221014114322.97512-1-42.hyeyoo@gmail.com (mailing list archive)
State New
Headers show
Series mm/slub: remove dead code for debug caches on deactivate_slab() | expand

Commit Message

Hyeonggon Yoo Oct. 14, 2022, 11:43 a.m. UTC
After commit c7323a5ad0786 ("mm/slub: restrict sysfs validation to debug
caches and make it safe"), SLUB does not take a slab from partial list for
debug caches. As deactivation isn't needed anymore, remove dead code.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
---
 mm/slub.c | 16 ++--------------
 1 file changed, 2 insertions(+), 14 deletions(-)

Comments

Vlastimil Babka Oct. 21, 2022, 10:43 a.m. UTC | #1
On 10/14/22 13:43, Hyeonggon Yoo wrote:
> After commit c7323a5ad0786 ("mm/slub: restrict sysfs validation to debug
> caches and make it safe"), SLUB does not take a slab from partial list for

I'm confused by "SLUB does not take a slab from partial list" here. Did you
mean something like "SLUB never installs (even temporarily) a percpu slab
for debug caches"? So that means we never deactivate percpu slabs for debug
caches. And since debug caches are also the only ones that use the full
list, we no longer need to care about the full list in deactivate_slab(), right?

> debug caches. As deactivation isn't needed anymore, remove dead code.
> 
> Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Otherwise it looks correct to me, just wanted to clarify I'm not missing
something.

> ---
>  mm/slub.c | 16 ++--------------
>  1 file changed, 2 insertions(+), 14 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 96dd392d7f99..e2215240954d 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2411,7 +2411,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
>  static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
>  			    void *freelist)
>  {
> -	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST };
> +	enum slab_modes { M_NONE, M_PARTIAL, M_FREE, M_FULL_NOLIST };
>  	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
>  	int free_delta = 0;
>  	enum slab_modes mode = M_NONE;
> @@ -2487,14 +2487,6 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
>  		 * acquire_slab() will see a slab that is frozen
>  		 */
>  		spin_lock_irqsave(&n->list_lock, flags);
> -	} else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) {
> -		mode = M_FULL;
> -		/*
> -		 * This also ensures that the scanning of full
> -		 * slabs from diagnostic functions will not see
> -		 * any frozen slabs.
> -		 */
> -		spin_lock_irqsave(&n->list_lock, flags);
>  	} else {
>  		mode = M_FULL_NOLIST;
>  	}
> @@ -2504,7 +2496,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
>  				old.freelist, old.counters,
>  				new.freelist, new.counters,
>  				"unfreezing slab")) {
> -		if (mode == M_PARTIAL || mode == M_FULL)
> +		if (mode == M_PARTIAL)
>  			spin_unlock_irqrestore(&n->list_lock, flags);
>  		goto redo;
>  	}
> @@ -2518,10 +2510,6 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
>  		stat(s, DEACTIVATE_EMPTY);
>  		discard_slab(s, slab);
>  		stat(s, FREE_SLAB);
> -	} else if (mode == M_FULL) {
> -		add_full(s, n, slab);
> -		spin_unlock_irqrestore(&n->list_lock, flags);
> -		stat(s, DEACTIVATE_FULL);
>  	} else if (mode == M_FULL_NOLIST) {
>  		stat(s, DEACTIVATE_FULL);
>  	}
Hyeonggon Yoo Oct. 22, 2022, 4:14 a.m. UTC | #2
On Fri, Oct 21, 2022 at 12:43:42PM +0200, Vlastimil Babka wrote:
> On 10/14/22 13:43, Hyeonggon Yoo wrote:
> > After commit c7323a5ad0786 ("mm/slub: restrict sysfs validation to debug
> > caches and make it safe"), SLUB does not take a slab from partial list for
> 
> I'm confused by "SLUB does not take a slab from partial list" here. Did you
> mean something like "SLUB never installs (even temporarily) a percpu slab
> for debug caches"?

Yes.

> So that means we never deactivate percpu slabs for debug
> caches.

Yes.

> And since debug caches are also the only ones that use the full
> list, we no longer need to care about the full list in deactivate_slab(), right?

Yes, You got it right, exactly!

Let me rephrase:

"After commit c7323a5ad0786 ("mm/slub: restrict sysfs validation to debug
caches and make it safe"), SLUB never installs percpu slab for debug caches
and thus never deactivates percpu slab for them.

Since only some of debug caches care about the full list, SLUB no longer
deactivates to full list. Remove dead code in deactivate_slab()."


Feel free to change this ;-)

> 
> > debug caches. As deactivation isn't needed anymore, remove dead code.
> > 
> > Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> 
> Otherwise it looks correct to me, just wanted to clarify I'm not missing
> something.

You are not missing anything.
Thank you for clarification.

Hyeonggon

> 
> > ---
> >  mm/slub.c | 16 ++--------------
> >  1 file changed, 2 insertions(+), 14 deletions(-)
> > 
> > diff --git a/mm/slub.c b/mm/slub.c
> > index 96dd392d7f99..e2215240954d 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -2411,7 +2411,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
> >  static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
> >  			    void *freelist)
> >  {
> > -	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST };
> > +	enum slab_modes { M_NONE, M_PARTIAL, M_FREE, M_FULL_NOLIST };
> >  	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
> >  	int free_delta = 0;
> >  	enum slab_modes mode = M_NONE;
> > @@ -2487,14 +2487,6 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
> >  		 * acquire_slab() will see a slab that is frozen
> >  		 */
> >  		spin_lock_irqsave(&n->list_lock, flags);
> > -	} else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) {
> > -		mode = M_FULL;
> > -		/*
> > -		 * This also ensures that the scanning of full
> > -		 * slabs from diagnostic functions will not see
> > -		 * any frozen slabs.
> > -		 */
> > -		spin_lock_irqsave(&n->list_lock, flags);
> >  	} else {
> >  		mode = M_FULL_NOLIST;
> >  	}
> > @@ -2504,7 +2496,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
> >  				old.freelist, old.counters,
> >  				new.freelist, new.counters,
> >  				"unfreezing slab")) {
> > -		if (mode == M_PARTIAL || mode == M_FULL)
> > +		if (mode == M_PARTIAL)
> >  			spin_unlock_irqrestore(&n->list_lock, flags);
> >  		goto redo;
> >  	}
> > @@ -2518,10 +2510,6 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
> >  		stat(s, DEACTIVATE_EMPTY);
> >  		discard_slab(s, slab);
> >  		stat(s, FREE_SLAB);
> > -	} else if (mode == M_FULL) {
> > -		add_full(s, n, slab);
> > -		spin_unlock_irqrestore(&n->list_lock, flags);
> > -		stat(s, DEACTIVATE_FULL);
> >  	} else if (mode == M_FULL_NOLIST) {
> >  		stat(s, DEACTIVATE_FULL);
> >  	}
>
Vlastimil Babka Oct. 24, 2022, 11:10 a.m. UTC | #3
On 10/22/22 06:14, Hyeonggon Yoo wrote:
> On Fri, Oct 21, 2022 at 12:43:42PM +0200, Vlastimil Babka wrote:
>> On 10/14/22 13:43, Hyeonggon Yoo wrote:
>> > After commit c7323a5ad0786 ("mm/slub: restrict sysfs validation to debug
>> > caches and make it safe"), SLUB does not take a slab from partial list for
>> 
>> I'm confused by "SLUB does not take a slab from partial list" here. Did you
>> mean something like "SLUB never installs (even temporarily) a percpu slab
>> for debug caches"?
> 
> Yes.
> 
>> So that means we never deactivate percpu slabs for debug
>> caches.
> 
> Yes.
> 
>> And since debug caches are also the only ones that use the full
>> list, we no longer need to care about the full list in deactivate_slab(), right?
> 
> Yes, You got it right, exactly!
> 
> Let me rephrase:
> 
> "After commit c7323a5ad0786 ("mm/slub: restrict sysfs validation to debug
> caches and make it safe"), SLUB never installs percpu slab for debug caches
> and thus never deactivates percpu slab for them.
> 
> Since only some of debug caches care about the full list, SLUB no longer
> deactivates to full list. Remove dead code in deactivate_slab()."
> 
> 
> Feel free to change this ;-)

Great, thanks!

Pushed to slab/for-6.2/cleanups
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 96dd392d7f99..e2215240954d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2411,7 +2411,7 @@  static void init_kmem_cache_cpus(struct kmem_cache *s)
 static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
 			    void *freelist)
 {
-	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST };
+	enum slab_modes { M_NONE, M_PARTIAL, M_FREE, M_FULL_NOLIST };
 	struct kmem_cache_node *n = get_node(s, slab_nid(slab));
 	int free_delta = 0;
 	enum slab_modes mode = M_NONE;
@@ -2487,14 +2487,6 @@  static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
 		 * acquire_slab() will see a slab that is frozen
 		 */
 		spin_lock_irqsave(&n->list_lock, flags);
-	} else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) {
-		mode = M_FULL;
-		/*
-		 * This also ensures that the scanning of full
-		 * slabs from diagnostic functions will not see
-		 * any frozen slabs.
-		 */
-		spin_lock_irqsave(&n->list_lock, flags);
 	} else {
 		mode = M_FULL_NOLIST;
 	}
@@ -2504,7 +2496,7 @@  static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
 				old.freelist, old.counters,
 				new.freelist, new.counters,
 				"unfreezing slab")) {
-		if (mode == M_PARTIAL || mode == M_FULL)
+		if (mode == M_PARTIAL)
 			spin_unlock_irqrestore(&n->list_lock, flags);
 		goto redo;
 	}
@@ -2518,10 +2510,6 @@  static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
 		stat(s, DEACTIVATE_EMPTY);
 		discard_slab(s, slab);
 		stat(s, FREE_SLAB);
-	} else if (mode == M_FULL) {
-		add_full(s, n, slab);
-		spin_unlock_irqrestore(&n->list_lock, flags);
-		stat(s, DEACTIVATE_FULL);
 	} else if (mode == M_FULL_NOLIST) {
 		stat(s, DEACTIVATE_FULL);
 	}