diff mbox series

[v3,3/7] slob: Use slab_list instead of lru

Message ID 20190314053135.1541-4-tobin@kernel.org (mailing list archive)
State New, archived
Headers show
Series mm: Use slab_list list_head instead of lru | expand

Commit Message

Tobin C. Harding March 14, 2019, 5:31 a.m. UTC
Currently we use the page->lru list for maintaining lists of slabs.  We
have a list_head in the page structure (slab_list) that can be used for
this purpose.  Doing so makes the code cleaner since we are not
overloading the lru list.

The slab_list is part of a union within the page struct (included here
stripped down):

	union {
		struct {	/* Page cache and anonymous pages */
			struct list_head lru;
			...
		};
		struct {
			dma_addr_t dma_addr;
		};
		struct {	/* slab, slob and slub */
			union {
				struct list_head slab_list;
				struct {	/* Partial pages */
					struct page *next;
					int pages;	/* Nr of pages left */
					int pobjects;	/* Approximate count */
				};
			};
		...

Here we see that slab_list and lru are the same bits.  We can verify
that this change is safe to do by examining the object file produced from
slob.c before and after this patch is applied.

Steps taken to verify:

 1. checkout current tip of Linus' tree

    commit a667cb7a94d4 ("Merge branch 'akpm' (patches from Andrew)")

 2. configure and build (select SLOB allocator)

    CONFIG_SLOB=y
    CONFIG_SLAB_MERGE_DEFAULT=y

 3. dissasemble object file `objdump -dr mm/slub.o > before.s
 4. apply patch
 5. build
 6. dissasemble object file `objdump -dr mm/slub.o > after.s
 7. diff before.s after.s

Use slab_list list_head instead of the lru list_head for maintaining
lists of slabs.

Reviewed-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Tobin C. Harding <tobin@kernel.org>
---
 mm/slob.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

Comments

Roman Gushchin March 14, 2019, 6:52 p.m. UTC | #1
On Thu, Mar 14, 2019 at 04:31:31PM +1100, Tobin C. Harding wrote:
> Currently we use the page->lru list for maintaining lists of slabs.  We
> have a list_head in the page structure (slab_list) that can be used for
> this purpose.  Doing so makes the code cleaner since we are not
> overloading the lru list.
> 
> The slab_list is part of a union within the page struct (included here
> stripped down):
> 
> 	union {
> 		struct {	/* Page cache and anonymous pages */
> 			struct list_head lru;
> 			...
> 		};
> 		struct {
> 			dma_addr_t dma_addr;
> 		};
> 		struct {	/* slab, slob and slub */
> 			union {
> 				struct list_head slab_list;
> 				struct {	/* Partial pages */
> 					struct page *next;
> 					int pages;	/* Nr of pages left */
> 					int pobjects;	/* Approximate count */
> 				};
> 			};
> 		...
> 
> Here we see that slab_list and lru are the same bits.  We can verify
> that this change is safe to do by examining the object file produced from
> slob.c before and after this patch is applied.
> 
> Steps taken to verify:
> 
>  1. checkout current tip of Linus' tree
> 
>     commit a667cb7a94d4 ("Merge branch 'akpm' (patches from Andrew)")
> 
>  2. configure and build (select SLOB allocator)
> 
>     CONFIG_SLOB=y
>     CONFIG_SLAB_MERGE_DEFAULT=y
> 
>  3. dissasemble object file `objdump -dr mm/slub.o > before.s
>  4. apply patch
>  5. build
>  6. dissasemble object file `objdump -dr mm/slub.o > after.s
>  7. diff before.s after.s
> 
> Use slab_list list_head instead of the lru list_head for maintaining
> lists of slabs.
> 
> Reviewed-by: Roman Gushchin <guro@fb.com>
> Signed-off-by: Tobin C. Harding <tobin@kernel.org>
> ---
>  mm/slob.c | 8 ++++----
>  1 file changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/slob.c b/mm/slob.c
> index 39ad9217ffea..94486c32e0ff 100644
> --- a/mm/slob.c
> +++ b/mm/slob.c
> @@ -112,13 +112,13 @@ static inline int slob_page_free(struct page *sp)
>  
>  static void set_slob_page_free(struct page *sp, struct list_head *list)
>  {
> -	list_add(&sp->lru, list);
> +	list_add(&sp->slab_list, list);
>  	__SetPageSlobFree(sp);
>  }
>  
>  static inline void clear_slob_page_free(struct page *sp)
>  {
> -	list_del(&sp->lru);
> +	list_del(&sp->slab_list);
>  	__ClearPageSlobFree(sp);
>  }
>  
> @@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
>  
>  	spin_lock_irqsave(&slob_lock, flags);
>  	/* Iterate through each partially free page, try to find room */
> -	list_for_each_entry(sp, slob_list, lru) {
> +	list_for_each_entry(sp, slob_list, slab_list) {
>  #ifdef CONFIG_NUMA
>  		/*
>  		 * If there's a node specification, search for a partial


Hi Tobin!

How about list_rotate_to_front(&next->lru, slob_list) from the previous patch?
Shouldn't it use slab_list instead of lru too?

Thanks!
Tobin Harding March 14, 2019, 8:38 p.m. UTC | #2
On Thu, Mar 14, 2019 at 06:52:25PM +0000, Roman Gushchin wrote:
> On Thu, Mar 14, 2019 at 04:31:31PM +1100, Tobin C. Harding wrote:
> > Currently we use the page->lru list for maintaining lists of slabs.  We
> > have a list_head in the page structure (slab_list) that can be used for
> > this purpose.  Doing so makes the code cleaner since we are not
> > overloading the lru list.
> > 
> > The slab_list is part of a union within the page struct (included here
> > stripped down):
> > 
> > 	union {
> > 		struct {	/* Page cache and anonymous pages */
> > 			struct list_head lru;
> > 			...
> > 		};
> > 		struct {
> > 			dma_addr_t dma_addr;
> > 		};
> > 		struct {	/* slab, slob and slub */
> > 			union {
> > 				struct list_head slab_list;
> > 				struct {	/* Partial pages */
> > 					struct page *next;
> > 					int pages;	/* Nr of pages left */
> > 					int pobjects;	/* Approximate count */
> > 				};
> > 			};
> > 		...
> > 
> > Here we see that slab_list and lru are the same bits.  We can verify
> > that this change is safe to do by examining the object file produced from
> > slob.c before and after this patch is applied.
> > 
> > Steps taken to verify:
> > 
> >  1. checkout current tip of Linus' tree
> > 
> >     commit a667cb7a94d4 ("Merge branch 'akpm' (patches from Andrew)")
> > 
> >  2. configure and build (select SLOB allocator)
> > 
> >     CONFIG_SLOB=y
> >     CONFIG_SLAB_MERGE_DEFAULT=y
> > 
> >  3. dissasemble object file `objdump -dr mm/slub.o > before.s
> >  4. apply patch
> >  5. build
> >  6. dissasemble object file `objdump -dr mm/slub.o > after.s
> >  7. diff before.s after.s
> > 
> > Use slab_list list_head instead of the lru list_head for maintaining
> > lists of slabs.
> > 
> > Reviewed-by: Roman Gushchin <guro@fb.com>
> > Signed-off-by: Tobin C. Harding <tobin@kernel.org>
> > ---
> >  mm/slob.c | 8 ++++----
> >  1 file changed, 4 insertions(+), 4 deletions(-)
> > 
> > diff --git a/mm/slob.c b/mm/slob.c
> > index 39ad9217ffea..94486c32e0ff 100644
> > --- a/mm/slob.c
> > +++ b/mm/slob.c
> > @@ -112,13 +112,13 @@ static inline int slob_page_free(struct page *sp)
> >  
> >  static void set_slob_page_free(struct page *sp, struct list_head *list)
> >  {
> > -	list_add(&sp->lru, list);
> > +	list_add(&sp->slab_list, list);
> >  	__SetPageSlobFree(sp);
> >  }
> >  
> >  static inline void clear_slob_page_free(struct page *sp)
> >  {
> > -	list_del(&sp->lru);
> > +	list_del(&sp->slab_list);
> >  	__ClearPageSlobFree(sp);
> >  }
> >  
> > @@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
> >  
> >  	spin_lock_irqsave(&slob_lock, flags);
> >  	/* Iterate through each partially free page, try to find room */
> > -	list_for_each_entry(sp, slob_list, lru) {
> > +	list_for_each_entry(sp, slob_list, slab_list) {
> >  #ifdef CONFIG_NUMA
> >  		/*
> >  		 * If there's a node specification, search for a partial
> 
> 
> Hi Tobin!
> 
> How about list_rotate_to_front(&next->lru, slob_list) from the previous patch?
> Shouldn't it use slab_list instead of lru too?

Thanks Roman, my mistake - one too many rebases.  I hate when I drop the
ball like this.

Tobin.
Tobin Harding March 14, 2019, 8:42 p.m. UTC | #3
On Fri, Mar 15, 2019 at 07:38:09AM +1100, Tobin C. Harding wrote:
> On Thu, Mar 14, 2019 at 06:52:25PM +0000, Roman Gushchin wrote:
> > On Thu, Mar 14, 2019 at 04:31:31PM +1100, Tobin C. Harding wrote:
> > > Currently we use the page->lru list for maintaining lists of slabs.  We
> > > have a list_head in the page structure (slab_list) that can be used for
> > > this purpose.  Doing so makes the code cleaner since we are not
> > > overloading the lru list.
> > > 
> > > The slab_list is part of a union within the page struct (included here
> > > stripped down):
> > > 
> > > 	union {
> > > 		struct {	/* Page cache and anonymous pages */
> > > 			struct list_head lru;
> > > 			...
> > > 		};
> > > 		struct {
> > > 			dma_addr_t dma_addr;
> > > 		};
> > > 		struct {	/* slab, slob and slub */
> > > 			union {
> > > 				struct list_head slab_list;
> > > 				struct {	/* Partial pages */
> > > 					struct page *next;
> > > 					int pages;	/* Nr of pages left */
> > > 					int pobjects;	/* Approximate count */
> > > 				};
> > > 			};
> > > 		...
> > > 
> > > Here we see that slab_list and lru are the same bits.  We can verify
> > > that this change is safe to do by examining the object file produced from
> > > slob.c before and after this patch is applied.
> > > 
> > > Steps taken to verify:
> > > 
> > >  1. checkout current tip of Linus' tree
> > > 
> > >     commit a667cb7a94d4 ("Merge branch 'akpm' (patches from Andrew)")
> > > 
> > >  2. configure and build (select SLOB allocator)
> > > 
> > >     CONFIG_SLOB=y
> > >     CONFIG_SLAB_MERGE_DEFAULT=y
> > > 
> > >  3. dissasemble object file `objdump -dr mm/slub.o > before.s
> > >  4. apply patch
> > >  5. build
> > >  6. dissasemble object file `objdump -dr mm/slub.o > after.s
> > >  7. diff before.s after.s
> > > 
> > > Use slab_list list_head instead of the lru list_head for maintaining
> > > lists of slabs.
> > > 
> > > Reviewed-by: Roman Gushchin <guro@fb.com>
> > > Signed-off-by: Tobin C. Harding <tobin@kernel.org>
> > > ---
> > >  mm/slob.c | 8 ++++----
> > >  1 file changed, 4 insertions(+), 4 deletions(-)
> > > 
> > > diff --git a/mm/slob.c b/mm/slob.c
> > > index 39ad9217ffea..94486c32e0ff 100644
> > > --- a/mm/slob.c
> > > +++ b/mm/slob.c
> > > @@ -112,13 +112,13 @@ static inline int slob_page_free(struct page *sp)
> > >  
> > >  static void set_slob_page_free(struct page *sp, struct list_head *list)
> > >  {
> > > -	list_add(&sp->lru, list);
> > > +	list_add(&sp->slab_list, list);
> > >  	__SetPageSlobFree(sp);
> > >  }
> > >  
> > >  static inline void clear_slob_page_free(struct page *sp)
> > >  {
> > > -	list_del(&sp->lru);
> > > +	list_del(&sp->slab_list);
> > >  	__ClearPageSlobFree(sp);
> > >  }
> > >  
> > > @@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
> > >  
> > >  	spin_lock_irqsave(&slob_lock, flags);
> > >  	/* Iterate through each partially free page, try to find room */
> > > -	list_for_each_entry(sp, slob_list, lru) {
> > > +	list_for_each_entry(sp, slob_list, slab_list) {
> > >  #ifdef CONFIG_NUMA
> > >  		/*
> > >  		 * If there's a node specification, search for a partial
> > 
> > 
> > Hi Tobin!
> > 
> > How about list_rotate_to_front(&next->lru, slob_list) from the previous patch?
> > Shouldn't it use slab_list instead of lru too?
> 
> Thanks Roman, my mistake - one too many rebases.  I hate when I drop the
> ball like this.

Oh that's right, its a union so it still builds and boots - I was
thinking that I had rebased and not built.  I guess that's just a fumble
instead of a complete ball drop.

Thanks for the careful review all the same.

	Tobin
Tobin Harding March 14, 2019, 8:47 p.m. UTC | #4
On Thu, Mar 14, 2019 at 06:52:25PM +0000, Roman Gushchin wrote:
> On Thu, Mar 14, 2019 at 04:31:31PM +1100, Tobin C. Harding wrote:
> > Currently we use the page->lru list for maintaining lists of slabs.  We
> > have a list_head in the page structure (slab_list) that can be used for
> > this purpose.  Doing so makes the code cleaner since we are not
> > overloading the lru list.
> > 
> > The slab_list is part of a union within the page struct (included here
> > stripped down):
> > 
> > 	union {
> > 		struct {	/* Page cache and anonymous pages */
> > 			struct list_head lru;
> > 			...
> > 		};
> > 		struct {
> > 			dma_addr_t dma_addr;
> > 		};
> > 		struct {	/* slab, slob and slub */
> > 			union {
> > 				struct list_head slab_list;
> > 				struct {	/* Partial pages */
> > 					struct page *next;
> > 					int pages;	/* Nr of pages left */
> > 					int pobjects;	/* Approximate count */
> > 				};
> > 			};
> > 		...
> > 
> > Here we see that slab_list and lru are the same bits.  We can verify
> > that this change is safe to do by examining the object file produced from
> > slob.c before and after this patch is applied.
> > 
> > Steps taken to verify:
> > 
> >  1. checkout current tip of Linus' tree
> > 
> >     commit a667cb7a94d4 ("Merge branch 'akpm' (patches from Andrew)")
> > 
> >  2. configure and build (select SLOB allocator)
> > 
> >     CONFIG_SLOB=y
> >     CONFIG_SLAB_MERGE_DEFAULT=y
> > 
> >  3. dissasemble object file `objdump -dr mm/slub.o > before.s
> >  4. apply patch
> >  5. build
> >  6. dissasemble object file `objdump -dr mm/slub.o > after.s
> >  7. diff before.s after.s
> > 
> > Use slab_list list_head instead of the lru list_head for maintaining
> > lists of slabs.
> > 
> > Reviewed-by: Roman Gushchin <guro@fb.com>
> > Signed-off-by: Tobin C. Harding <tobin@kernel.org>
> > ---
> >  mm/slob.c | 8 ++++----
> >  1 file changed, 4 insertions(+), 4 deletions(-)
> > 
> > diff --git a/mm/slob.c b/mm/slob.c
> > index 39ad9217ffea..94486c32e0ff 100644
> > --- a/mm/slob.c
> > +++ b/mm/slob.c
> > @@ -112,13 +112,13 @@ static inline int slob_page_free(struct page *sp)
> >  
> >  static void set_slob_page_free(struct page *sp, struct list_head *list)
> >  {
> > -	list_add(&sp->lru, list);
> > +	list_add(&sp->slab_list, list);
> >  	__SetPageSlobFree(sp);
> >  }
> >  
> >  static inline void clear_slob_page_free(struct page *sp)
> >  {
> > -	list_del(&sp->lru);
> > +	list_del(&sp->slab_list);
> >  	__ClearPageSlobFree(sp);
> >  }
> >  
> > @@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
> >  
> >  	spin_lock_irqsave(&slob_lock, flags);
> >  	/* Iterate through each partially free page, try to find room */
> > -	list_for_each_entry(sp, slob_list, lru) {
> > +	list_for_each_entry(sp, slob_list, slab_list) {
> >  #ifdef CONFIG_NUMA
> >  		/*
> >  		 * If there's a node specification, search for a partial
> 
> 
> Hi Tobin!
> 
> How about list_rotate_to_front(&next->lru, slob_list) from the previous patch?
> Shouldn't it use slab_list instead of lru too?

I'll let this sit for a day or two in case we get any more comments on
the list.h stuff then do another version ready for US Monday morning.

Thanks again,
Tobin.
diff mbox series

Patch

diff --git a/mm/slob.c b/mm/slob.c
index 39ad9217ffea..94486c32e0ff 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -112,13 +112,13 @@  static inline int slob_page_free(struct page *sp)
 
 static void set_slob_page_free(struct page *sp, struct list_head *list)
 {
-	list_add(&sp->lru, list);
+	list_add(&sp->slab_list, list);
 	__SetPageSlobFree(sp);
 }
 
 static inline void clear_slob_page_free(struct page *sp)
 {
-	list_del(&sp->lru);
+	list_del(&sp->slab_list);
 	__ClearPageSlobFree(sp);
 }
 
@@ -282,7 +282,7 @@  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
 
 	spin_lock_irqsave(&slob_lock, flags);
 	/* Iterate through each partially free page, try to find room */
-	list_for_each_entry(sp, slob_list, lru) {
+	list_for_each_entry(sp, slob_list, slab_list) {
 #ifdef CONFIG_NUMA
 		/*
 		 * If there's a node specification, search for a partial
@@ -331,7 +331,7 @@  static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
 		spin_lock_irqsave(&slob_lock, flags);
 		sp->units = SLOB_UNITS(PAGE_SIZE);
 		sp->freelist = b;
-		INIT_LIST_HEAD(&sp->lru);
+		INIT_LIST_HEAD(&sp->slab_list);
 		set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
 		set_slob_page_free(sp, slob_list);
 		b = slob_page_alloc(sp, size, align);