diff mbox series

[v2,24/33] mm/slob: Convert SLOB to use struct slab

Message ID 20211201181510.18784-25-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series Separate struct slab from struct page | expand

Commit Message

Vlastimil Babka Dec. 1, 2021, 6:15 p.m. UTC
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

Use struct slab throughout the slob allocator.

[ vbabka@suse.cz: don't introduce wrappers for PageSlobFree in mm/slab.h just
  for the single callers being wrappers in mm/slob.c ]

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slob.c | 34 +++++++++++++++++-----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

Comments

Hyeonggon Yoo Dec. 10, 2021, 10:44 a.m. UTC | #1
On Wed, Dec 01, 2021 at 07:15:01PM +0100, Vlastimil Babka wrote:
> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> 
> Use struct slab throughout the slob allocator.
> 
> [ vbabka@suse.cz: don't introduce wrappers for PageSlobFree in mm/slab.h just
>   for the single callers being wrappers in mm/slob.c ]
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/slob.c | 34 +++++++++++++++++-----------------
>  1 file changed, 17 insertions(+), 17 deletions(-)
> 
> diff --git a/mm/slob.c b/mm/slob.c
> index d2d15e7f191c..d3512bcc3141 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c

...

>  		/* Enough room on this page? */
> @@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
>  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
>  		if (!b)
>  			return NULL;
> -		sp = virt_to_page(b);
> -		__SetPageSlab(sp);
> +		sp = virt_to_slab(b);
> +		__SetPageSlab(slab_page(sp));

Hello Vlastimil.

I've tested this patch on my machine and it causes NULL pointer dereference.
that's because virt_to_slab returns NULL if folio_test_slab is false.
and __SetPageSlab is called with sp = NULL.

diff below fixed bug.

diff --git a/mm/slob.c b/mm/slob.c
index d3512bcc3141..cf669f03440f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int a
lign, int node,
                b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
                if (!b)
                        return NULL;
+               __SetPageSlab(virt_to_page(b));
                sp = virt_to_slab(b);
-               __SetPageSlab(slab_page(sp));

                spin_lock_irqsave(&slob_lock, flags);
                sp->units = SLOB_UNITS(PAGE_SIZE);
 
 Thanks,
 Hyeonggon.

>  
>  		spin_lock_irqsave(&slob_lock, flags);
>  		sp->units = SLOB_UNITS(PAGE_SIZE);
> @@ -381,7 +381,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
>   */
>  static void slob_free(void *block, int size)
>  {
> -	struct page *sp;
> +	struct slab *sp;
>  	slob_t *prev, *next, *b = (slob_t *)block;
>  	slobidx_t units;
>  	unsigned long flags;
> @@ -391,7 +391,7 @@ static void slob_free(void *block, int size)
>  		return;
>  	BUG_ON(!size);
>  
> -	sp = virt_to_page(block);
> +	sp = virt_to_slab(block);
>  	units = SLOB_UNITS(size);
>  
>  	spin_lock_irqsave(&slob_lock, flags);
> @@ -401,8 +401,8 @@ static void slob_free(void *block, int size)
>  		if (slob_page_free(sp))
>  			clear_slob_page_free(sp);
>  		spin_unlock_irqrestore(&slob_lock, flags);
> -		__ClearPageSlab(sp);
> -		page_mapcount_reset(sp);
> +		__ClearPageSlab(slab_page(sp));
> +		page_mapcount_reset(slab_page(sp));
>  		slob_free_pages(b, 0);
>  		return;
>  	}
> -- 
> 2.33.1
> 
>
Vlastimil Babka Dec. 10, 2021, 11:44 a.m. UTC | #2
On 12/10/21 11:44, Hyeonggon Yoo wrote:
> On Wed, Dec 01, 2021 at 07:15:01PM +0100, Vlastimil Babka wrote:
>> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>> 
>> Use struct slab throughout the slob allocator.
>> 
>> [ vbabka@suse.cz: don't introduce wrappers for PageSlobFree in mm/slab.h just
>>   for the single callers being wrappers in mm/slob.c ]
>> 
>> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
>> ---
>>  mm/slob.c | 34 +++++++++++++++++-----------------
>>  1 file changed, 17 insertions(+), 17 deletions(-)
>> 
>> diff --git a/mm/slob.c b/mm/slob.c
>> index d2d15e7f191c..d3512bcc3141 100644
>> --- a/mm/slob.c
>> +++ b/mm/slob.c
> 
> ...
> 
>>  		/* Enough room on this page? */
>> @@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
>>  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
>>  		if (!b)
>>  			return NULL;
>> -		sp = virt_to_page(b);
>> -		__SetPageSlab(sp);
>> +		sp = virt_to_slab(b);
>> +		__SetPageSlab(slab_page(sp));
> 
> Hello Vlastimil.
> 
> I've tested this patch on my machine and it causes NULL pointer dereference.
> that's because virt_to_slab returns NULL if folio_test_slab is false.
> and __SetPageSlab is called with sp = NULL.

Oops, thanks for catching this.

> diff below fixed bug.

Changed it to use folio and will push an updated branch
slab-struct_slab-v3r2

Thanks!

> diff --git a/mm/slob.c b/mm/slob.c
> index d3512bcc3141..cf669f03440f 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c
> @@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int a
> lign, int node,
>                 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
>                 if (!b)
>                         return NULL;
> +               __SetPageSlab(virt_to_page(b));
>                 sp = virt_to_slab(b);
> -               __SetPageSlab(slab_page(sp));
> 
>                 spin_lock_irqsave(&slob_lock, flags);
>                 sp->units = SLOB_UNITS(PAGE_SIZE);
>  
>  Thanks,
>  Hyeonggon.
> 
>>  
>>  		spin_lock_irqsave(&slob_lock, flags);
>>  		sp->units = SLOB_UNITS(PAGE_SIZE);
>> @@ -381,7 +381,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
>>   */
>>  static void slob_free(void *block, int size)
>>  {
>> -	struct page *sp;
>> +	struct slab *sp;
>>  	slob_t *prev, *next, *b = (slob_t *)block;
>>  	slobidx_t units;
>>  	unsigned long flags;
>> @@ -391,7 +391,7 @@ static void slob_free(void *block, int size)
>>  		return;
>>  	BUG_ON(!size);
>>  
>> -	sp = virt_to_page(block);
>> +	sp = virt_to_slab(block);
>>  	units = SLOB_UNITS(size);
>>  
>>  	spin_lock_irqsave(&slob_lock, flags);
>> @@ -401,8 +401,8 @@ static void slob_free(void *block, int size)
>>  		if (slob_page_free(sp))
>>  			clear_slob_page_free(sp);
>>  		spin_unlock_irqrestore(&slob_lock, flags);
>> -		__ClearPageSlab(sp);
>> -		page_mapcount_reset(sp);
>> +		__ClearPageSlab(slab_page(sp));
>> +		page_mapcount_reset(slab_page(sp));
>>  		slob_free_pages(b, 0);
>>  		return;
>>  	}
>> -- 
>> 2.33.1
>> 
>>
Hyeonggon Yoo Dec. 10, 2021, 3:29 p.m. UTC | #3
On Fri, Dec 10, 2021 at 12:44:32PM +0100, Vlastimil Babka wrote:
> On 12/10/21 11:44, Hyeonggon Yoo wrote:
> > On Wed, Dec 01, 2021 at 07:15:01PM +0100, Vlastimil Babka wrote:
> >> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> >> 
> >> Use struct slab throughout the slob allocator.
> >> 
> >> [ vbabka@suse.cz: don't introduce wrappers for PageSlobFree in mm/slab.h just
> >>   for the single callers being wrappers in mm/slob.c ]
> >> 
> >> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> >> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> >> ---
> >>  mm/slob.c | 34 +++++++++++++++++-----------------
> >>  1 file changed, 17 insertions(+), 17 deletions(-)
> >> 
> >> diff --git a/mm/slob.c b/mm/slob.c
> >> index d2d15e7f191c..d3512bcc3141 100644
> >> --- a/mm/slob.c
> >> +++ b/mm/slob.c
> > 
> > ...
> > 
> >>  		/* Enough room on this page? */
> >> @@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
> >>  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
> >>  		if (!b)
> >>  			return NULL;
> >> -		sp = virt_to_page(b);
> >> -		__SetPageSlab(sp);
> >> +		sp = virt_to_slab(b);
> >> +		__SetPageSlab(slab_page(sp));
> > 
> > Hello Vlastimil.
> > 
> > I've tested this patch on my machine and it causes NULL pointer dereference.
> > that's because virt_to_slab returns NULL if folio_test_slab is false.
> > and __SetPageSlab is called with sp = NULL.
> 
> Oops, thanks for catching this.
> 
> > diff below fixed bug.
> 
> Changed it to use folio and will push an updated branch
> slab-struct_slab-v3r2
> 
> Thanks!

Tested again on slab-struct_slab-v3r2 and everyting works fine!
And the code overall looks good to me.

Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

And below is not about this patch,
but what about something like this?

Because it's not necessary that page->_mapcount and
slab->units have same offset, so we can remove
(somewhat confusing) page_mapcount_reset function call.

I want to remove calling page_mapcount_reset also in
SLAB, but I have no idea. (maybe we can do that if we remove
SLAB colouring but that's going too far)

diff --git a/mm/slab.h b/mm/slab.h
index 90d7fceba470..dd0480149d38 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -50,8 +50,8 @@ struct slab {
        struct list_head slab_list;
        void * __unused_1;
        void *freelist;         /* first free block */
-       void * __unused_2;
-       int units;
+       long units;
+       unsigned int __unused_2;

 #else
 #error "Unexpected slab allocator configured"
diff --git a/mm/slob.c b/mm/slob.c
index 39b651b3e6e7..7b2d2c7d69cc 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -404,7 +404,6 @@ static void slob_free(void *block, int size)
                        clear_slob_page_free(sp);
                spin_unlock_irqrestore(&slob_lock, flags);
                __ClearPageSlab(slab_page(sp));
-               page_mapcount_reset(slab_page(sp));
                slob_free_pages(b, 0);
                return;
        }

> 
> > diff --git a/mm/slob.c b/mm/slob.c
> > index d3512bcc3141..cf669f03440f 100644
> > --- a/mm/slob.c
> > +++ b/mm/slob.c
> > @@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int a
> > lign, int node,
> >                 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
> >                 if (!b)
> >                         return NULL;
> > +               __SetPageSlab(virt_to_page(b));
> >                 sp = virt_to_slab(b);
> > -               __SetPageSlab(slab_page(sp));
> > 
> >                 spin_lock_irqsave(&slob_lock, flags);
> >                 sp->units = SLOB_UNITS(PAGE_SIZE);
> >  
> >  Thanks,
> >  Hyeonggon.
> > 
> >>  
> >>  		spin_lock_irqsave(&slob_lock, flags);
> >>  		sp->units = SLOB_UNITS(PAGE_SIZE);
> >> @@ -381,7 +381,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
> >>   */
> >>  static void slob_free(void *block, int size)
> >>  {
> >> -	struct page *sp;
> >> +	struct slab *sp;
> >>  	slob_t *prev, *next, *b = (slob_t *)block;
> >>  	slobidx_t units;
> >>  	unsigned long flags;
> >> @@ -391,7 +391,7 @@ static void slob_free(void *block, int size)
> >>  		return;
> >>  	BUG_ON(!size);
> >>  
> >> -	sp = virt_to_page(block);
> >> +	sp = virt_to_slab(block);
> >>  	units = SLOB_UNITS(size);
> >>  
> >>  	spin_lock_irqsave(&slob_lock, flags);
> >> @@ -401,8 +401,8 @@ static void slob_free(void *block, int size)
> >>  		if (slob_page_free(sp))
> >>  			clear_slob_page_free(sp);
> >>  		spin_unlock_irqrestore(&slob_lock, flags);
> >> -		__ClearPageSlab(sp);
> >> -		page_mapcount_reset(sp);
> >> +		__ClearPageSlab(slab_page(sp));
> >> +		page_mapcount_reset(slab_page(sp));
> >>  		slob_free_pages(b, 0);
> >>  		return;
> >>  	}
> >> -- 
> >> 2.33.1
> >> 
> >> 
>
Vlastimil Babka Dec. 10, 2021, 6:09 p.m. UTC | #4
On 12/10/21 16:29, Hyeonggon Yoo wrote:
> On Fri, Dec 10, 2021 at 12:44:32PM +0100, Vlastimil Babka wrote:
>> On 12/10/21 11:44, Hyeonggon Yoo wrote:
>> > On Wed, Dec 01, 2021 at 07:15:01PM +0100, Vlastimil Babka wrote:
>> >> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
>> >> 
>> >> Use struct slab throughout the slob allocator.
>> >> 
>> >> [ vbabka@suse.cz: don't introduce wrappers for PageSlobFree in mm/slab.h just
>> >>   for the single callers being wrappers in mm/slob.c ]
>> >> 
>> >> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>> >> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
>> >> ---
>> >>  mm/slob.c | 34 +++++++++++++++++-----------------
>> >>  1 file changed, 17 insertions(+), 17 deletions(-)
>> >> 
>> >> diff --git a/mm/slob.c b/mm/slob.c
>> >> index d2d15e7f191c..d3512bcc3141 100644
>> >> --- a/mm/slob.c
>> >> +++ b/mm/slob.c
>> > 
>> > ...
>> > 
>> >>  		/* Enough room on this page? */
>> >> @@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
>> >>  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
>> >>  		if (!b)
>> >>  			return NULL;
>> >> -		sp = virt_to_page(b);
>> >> -		__SetPageSlab(sp);
>> >> +		sp = virt_to_slab(b);
>> >> +		__SetPageSlab(slab_page(sp));
>> > 
>> > Hello Vlastimil.
>> > 
>> > I've tested this patch on my machine and it causes NULL pointer dereference.
>> > that's because virt_to_slab returns NULL if folio_test_slab is false.
>> > and __SetPageSlab is called with sp = NULL.
>> 
>> Oops, thanks for catching this.
>> 
>> > diff below fixed bug.
>> 
>> Changed it to use folio and will push an updated branch
>> slab-struct_slab-v3r2
>> 
>> Thanks!
> 
> Tested again on slab-struct_slab-v3r2 and everyting works fine!
> And the code overall looks good to me.
> 
> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

Thanks!

> And below is not about this patch,
> but what about something like this?
> 
> Because it's not necessary that page->_mapcount and
> slab->units have same offset, so we can remove
> (somewhat confusing) page_mapcount_reset function call.

Yeah this could be done as new patch on top of 21/33. I wouldn't squash it
there so that it remains just a split without functional change.

> I want to remove calling page_mapcount_reset also in
> SLAB, but I have no idea. (maybe we can do that if we remove
> SLAB colouring but that's going too far)
> 
> diff --git a/mm/slab.h b/mm/slab.h
> index 90d7fceba470..dd0480149d38 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -50,8 +50,8 @@ struct slab {
>         struct list_head slab_list;
>         void * __unused_1;
>         void *freelist;         /* first free block */
> -       void * __unused_2;
> -       int units;
> +       long units;
> +       unsigned int __unused_2;
> 
>  #else
>  #error "Unexpected slab allocator configured"
> diff --git a/mm/slob.c b/mm/slob.c
> index 39b651b3e6e7..7b2d2c7d69cc 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c
> @@ -404,7 +404,6 @@ static void slob_free(void *block, int size)
>                         clear_slob_page_free(sp);
>                 spin_unlock_irqrestore(&slob_lock, flags);
>                 __ClearPageSlab(slab_page(sp));
> -               page_mapcount_reset(slab_page(sp));
>                 slob_free_pages(b, 0);
>                 return;
>         }
> 
>> 
>> > diff --git a/mm/slob.c b/mm/slob.c
>> > index d3512bcc3141..cf669f03440f 100644
>> > --- a/mm/slob.c
>> > +++ b/mm/slob.c
>> > @@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int a
>> > lign, int node,
>> >                 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
>> >                 if (!b)
>> >                         return NULL;
>> > +               __SetPageSlab(virt_to_page(b));
>> >                 sp = virt_to_slab(b);
>> > -               __SetPageSlab(slab_page(sp));
>> > 
>> >                 spin_lock_irqsave(&slob_lock, flags);
>> >                 sp->units = SLOB_UNITS(PAGE_SIZE);
>> >  
>> >  Thanks,
>> >  Hyeonggon.
>> > 
>> >>  
>> >>  		spin_lock_irqsave(&slob_lock, flags);
>> >>  		sp->units = SLOB_UNITS(PAGE_SIZE);
>> >> @@ -381,7 +381,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
>> >>   */
>> >>  static void slob_free(void *block, int size)
>> >>  {
>> >> -	struct page *sp;
>> >> +	struct slab *sp;
>> >>  	slob_t *prev, *next, *b = (slob_t *)block;
>> >>  	slobidx_t units;
>> >>  	unsigned long flags;
>> >> @@ -391,7 +391,7 @@ static void slob_free(void *block, int size)
>> >>  		return;
>> >>  	BUG_ON(!size);
>> >>  
>> >> -	sp = virt_to_page(block);
>> >> +	sp = virt_to_slab(block);
>> >>  	units = SLOB_UNITS(size);
>> >>  
>> >>  	spin_lock_irqsave(&slob_lock, flags);
>> >> @@ -401,8 +401,8 @@ static void slob_free(void *block, int size)
>> >>  		if (slob_page_free(sp))
>> >>  			clear_slob_page_free(sp);
>> >>  		spin_unlock_irqrestore(&slob_lock, flags);
>> >> -		__ClearPageSlab(sp);
>> >> -		page_mapcount_reset(sp);
>> >> +		__ClearPageSlab(slab_page(sp));
>> >> +		page_mapcount_reset(slab_page(sp));
>> >>  		slob_free_pages(b, 0);
>> >>  		return;
>> >>  	}
>> >> -- 
>> >> 2.33.1
>> >> 
>> >> 
>>
Hyeonggon Yoo Dec. 11, 2021, 10:54 a.m. UTC | #5
On Fri, Dec 10, 2021 at 07:09:56PM +0100, Vlastimil Babka wrote:
> On 12/10/21 16:29, Hyeonggon Yoo wrote:
> > On Fri, Dec 10, 2021 at 12:44:32PM +0100, Vlastimil Babka wrote:
> >> On 12/10/21 11:44, Hyeonggon Yoo wrote:
> >> > On Wed, Dec 01, 2021 at 07:15:01PM +0100, Vlastimil Babka wrote:
> >> >> From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
> >> >> 
> >> >> Use struct slab throughout the slob allocator.
> >> >> 
> >> >> [ vbabka@suse.cz: don't introduce wrappers for PageSlobFree in mm/slab.h just
> >> >>   for the single callers being wrappers in mm/slob.c ]
> >> >> 
> >> >> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> >> >> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> >> >> ---
> >> >>  mm/slob.c | 34 +++++++++++++++++-----------------
> >> >>  1 file changed, 17 insertions(+), 17 deletions(-)
> >> >> 
> >> >> diff --git a/mm/slob.c b/mm/slob.c
> >> >> index d2d15e7f191c..d3512bcc3141 100644
> >> >> --- a/mm/slob.c
> >> >> +++ b/mm/slob.c
> >> > 
> >> > ...
> >> > 
> >> >>  		/* Enough room on this page? */
> >> >> @@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
> >> >>  		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
> >> >>  		if (!b)
> >> >>  			return NULL;
> >> >> -		sp = virt_to_page(b);
> >> >> -		__SetPageSlab(sp);
> >> >> +		sp = virt_to_slab(b);
> >> >> +		__SetPageSlab(slab_page(sp));
> >> > 
> >> > Hello Vlastimil.
> >> > 
> >> > I've tested this patch on my machine and it causes NULL pointer dereference.
> >> > that's because virt_to_slab returns NULL if folio_test_slab is false.
> >> > and __SetPageSlab is called with sp = NULL.
> >> 
> >> Oops, thanks for catching this.
> >> 
> >> > diff below fixed bug.
> >> 
> >> Changed it to use folio and will push an updated branch
> >> slab-struct_slab-v3r2
> >> 
> >> Thanks!
> > 
> > Tested again on slab-struct_slab-v3r2 and everyting works fine!
> > And the code overall looks good to me.
> > 
> > Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> > Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> 
> Thanks!
>

You're welcome! :)

> > And below is not about this patch,
> > but what about something like this?
> > 
> > Because it's not necessary that page->_mapcount and
> > slab->units have same offset, so we can remove
> > (somewhat confusing) page_mapcount_reset function call.
> 
> Yeah this could be done as new patch on top of 21/33. I wouldn't squash it
> there so that it remains just a split without functional change.
>

Yeah, It is better to be done as separate patch.
I'll send it as a patch.

Thanks,
Hyeonggon

> > I want to remove calling page_mapcount_reset also in
> > SLAB, but I have no idea. (maybe we can do that if we remove
> > SLAB colouring but that's going too far)
> > 
> > diff --git a/mm/slab.h b/mm/slab.h
> > index 90d7fceba470..dd0480149d38 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -50,8 +50,8 @@ struct slab {
> >         struct list_head slab_list;
> >         void * __unused_1;
> >         void *freelist;         /* first free block */
> > -       void * __unused_2;
> > -       int units;
> > +       long units;
> > +       unsigned int __unused_2;
> > 
> >  #else
> >  #error "Unexpected slab allocator configured"
> > diff --git a/mm/slob.c b/mm/slob.c
> > index 39b651b3e6e7..7b2d2c7d69cc 100644
> > --- a/mm/slob.c
> > +++ b/mm/slob.c
> > @@ -404,7 +404,6 @@ static void slob_free(void *block, int size)
> >                         clear_slob_page_free(sp);
> >                 spin_unlock_irqrestore(&slob_lock, flags);
> >                 __ClearPageSlab(slab_page(sp));
> > -               page_mapcount_reset(slab_page(sp));
> >                 slob_free_pages(b, 0);
> >                 return;
> >         }
> > 
> >> 
> >> > diff --git a/mm/slob.c b/mm/slob.c
> >> > index d3512bcc3141..cf669f03440f 100644
> >> > --- a/mm/slob.c
> >> > +++ b/mm/slob.c
> >> > @@ -358,8 +358,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int a
> >> > lign, int node,
> >> >                 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
> >> >                 if (!b)
> >> >                         return NULL;
> >> > +               __SetPageSlab(virt_to_page(b));
> >> >                 sp = virt_to_slab(b);
> >> > -               __SetPageSlab(slab_page(sp));
> >> > 
> >> >                 spin_lock_irqsave(&slob_lock, flags);
> >> >                 sp->units = SLOB_UNITS(PAGE_SIZE);
> >> >  
> >> >  Thanks,
> >> >  Hyeonggon.
> >> > 
> >> >>  
> >> >>  		spin_lock_irqsave(&slob_lock, flags);
> >> >>  		sp->units = SLOB_UNITS(PAGE_SIZE);
> >> >> @@ -381,7 +381,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
> >> >>   */
> >> >>  static void slob_free(void *block, int size)
> >> >>  {
> >> >> -	struct page *sp;
> >> >> +	struct slab *sp;
> >> >>  	slob_t *prev, *next, *b = (slob_t *)block;
> >> >>  	slobidx_t units;
> >> >>  	unsigned long flags;
> >> >> @@ -391,7 +391,7 @@ static void slob_free(void *block, int size)
> >> >>  		return;
> >> >>  	BUG_ON(!size);
> >> >>  
> >> >> -	sp = virt_to_page(block);
> >> >> +	sp = virt_to_slab(block);
> >> >>  	units = SLOB_UNITS(size);
> >> >>  
> >> >>  	spin_lock_irqsave(&slob_lock, flags);
> >> >> @@ -401,8 +401,8 @@ static void slob_free(void *block, int size)
> >> >>  		if (slob_page_free(sp))
> >> >>  			clear_slob_page_free(sp);
> >> >>  		spin_unlock_irqrestore(&slob_lock, flags);
> >> >> -		__ClearPageSlab(sp);
> >> >> -		page_mapcount_reset(sp);
> >> >> +		__ClearPageSlab(slab_page(sp));
> >> >> +		page_mapcount_reset(slab_page(sp));
> >> >>  		slob_free_pages(b, 0);
> >> >>  		return;
> >> >>  	}
> >> >> -- 
> >> >> 2.33.1
> >> >> 
> >> >> 
> >> 
>
diff mbox series

Patch

diff --git a/mm/slob.c b/mm/slob.c
index d2d15e7f191c..d3512bcc3141 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -105,21 +105,21 @@  static LIST_HEAD(free_slob_large);
 /*
  * slob_page_free: true for pages on free_slob_pages list.
  */
-static inline int slob_page_free(struct page *sp)
+static inline int slob_page_free(struct slab *slab)
 {
-	return PageSlobFree(sp);
+	return PageSlobFree(slab_page(slab));
 }
 
-static void set_slob_page_free(struct page *sp, struct list_head *list)
+static void set_slob_page_free(struct slab *slab, struct list_head *list)
 {
-	list_add(&sp->slab_list, list);
-	__SetPageSlobFree(sp);
+	list_add(&slab->slab_list, list);
+	__SetPageSlobFree(slab_page(slab));
 }
 
-static inline void clear_slob_page_free(struct page *sp)
+static inline void clear_slob_page_free(struct slab *slab)
 {
-	list_del(&sp->slab_list);
-	__ClearPageSlobFree(sp);
+	list_del(&slab->slab_list);
+	__ClearPageSlobFree(slab_page(slab));
 }
 
 #define SLOB_UNIT sizeof(slob_t)
@@ -234,7 +234,7 @@  static void slob_free_pages(void *b, int order)
  *         freelist, in this case @page_removed_from_list will be set to
  *         true (set to false otherwise).
  */
-static void *slob_page_alloc(struct page *sp, size_t size, int align,
+static void *slob_page_alloc(struct slab *sp, size_t size, int align,
 			      int align_offset, bool *page_removed_from_list)
 {
 	slob_t *prev, *cur, *aligned = NULL;
@@ -301,7 +301,7 @@  static void *slob_page_alloc(struct page *sp, size_t size, int align,
 static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
 							int align_offset)
 {
-	struct page *sp;
+	struct slab *sp;
 	struct list_head *slob_list;
 	slob_t *b = NULL;
 	unsigned long flags;
@@ -323,7 +323,7 @@  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
 		 * If there's a node specification, search for a partial
 		 * page with a matching node id in the freelist.
 		 */
-		if (node != NUMA_NO_NODE && page_to_nid(sp) != node)
+		if (node != NUMA_NO_NODE && slab_nid(sp) != node)
 			continue;
 #endif
 		/* Enough room on this page? */
@@ -358,8 +358,8 @@  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
 		b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
 		if (!b)
 			return NULL;
-		sp = virt_to_page(b);
-		__SetPageSlab(sp);
+		sp = virt_to_slab(b);
+		__SetPageSlab(slab_page(sp));
 
 		spin_lock_irqsave(&slob_lock, flags);
 		sp->units = SLOB_UNITS(PAGE_SIZE);
@@ -381,7 +381,7 @@  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
  */
 static void slob_free(void *block, int size)
 {
-	struct page *sp;
+	struct slab *sp;
 	slob_t *prev, *next, *b = (slob_t *)block;
 	slobidx_t units;
 	unsigned long flags;
@@ -391,7 +391,7 @@  static void slob_free(void *block, int size)
 		return;
 	BUG_ON(!size);
 
-	sp = virt_to_page(block);
+	sp = virt_to_slab(block);
 	units = SLOB_UNITS(size);
 
 	spin_lock_irqsave(&slob_lock, flags);
@@ -401,8 +401,8 @@  static void slob_free(void *block, int size)
 		if (slob_page_free(sp))
 			clear_slob_page_free(sp);
 		spin_unlock_irqrestore(&slob_lock, flags);
-		__ClearPageSlab(sp);
-		page_mapcount_reset(sp);
+		__ClearPageSlab(slab_page(sp));
+		page_mapcount_reset(slab_page(sp));
 		slob_free_pages(b, 0);
 		return;
 	}