diff mbox series

[11/12] mm, slub: don't aggressively inline with CONFIG_SLUB_TINY

Message ID 20221121171202.22080-12-vbabka@suse.cz (mailing list archive)
State New
Headers show
Series Introduce CONFIG_SLUB_TINY and deprecate SLOB | expand

Commit Message

Vlastimil Babka Nov. 21, 2022, 5:12 p.m. UTC
SLUB fastpaths use __always_inline to avoid function calls. With
CONFIG_SLUB_TINY we would rather save the memory. Add a
__fastpath_inline macro that's __always_inline normally but empty with
CONFIG_SLUB_TINY.

bloat-o-meter results on x86_64 mm/slub.o:

add/remove: 3/1 grow/shrink: 1/8 up/down: 865/-1784 (-919)
Function                                     old     new   delta
kmem_cache_free                               20     281    +261
slab_alloc_node.isra                           -     245    +245
slab_free.constprop.isra                       -     231    +231
__kmem_cache_alloc_lru.isra                    -     128    +128
__kmem_cache_release                          88      83      -5
__kmem_cache_create                         1446    1436     -10
__kmem_cache_free                            271     142    -129
kmem_cache_alloc_node                        330     127    -203
kmem_cache_free_bulk.part                    826     613    -213
__kmem_cache_alloc_node                      230      10    -220
kmem_cache_alloc_lru                         325      12    -313
kmem_cache_alloc                             325      10    -315
kmem_cache_free.part                         376       -    -376
Total: Before=26103, After=25184, chg -3.52%

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/slub.c | 14 ++++++++++----
 1 file changed, 10 insertions(+), 4 deletions(-)

Comments

Hyeonggon Yoo Nov. 28, 2022, 1:19 p.m. UTC | #1
On Mon, Nov 21, 2022 at 06:12:01PM +0100, Vlastimil Babka wrote:
> SLUB fastpaths use __always_inline to avoid function calls. With
> CONFIG_SLUB_TINY we would rather save the memory. Add a
> __fastpath_inline macro that's __always_inline normally but empty with
> CONFIG_SLUB_TINY.
> 
> bloat-o-meter results on x86_64 mm/slub.o:
> 
> add/remove: 3/1 grow/shrink: 1/8 up/down: 865/-1784 (-919)
> Function                                     old     new   delta
> kmem_cache_free                               20     281    +261
> slab_alloc_node.isra                           -     245    +245
> slab_free.constprop.isra                       -     231    +231
> __kmem_cache_alloc_lru.isra                    -     128    +128
> __kmem_cache_release                          88      83      -5
> __kmem_cache_create                         1446    1436     -10
> __kmem_cache_free                            271     142    -129
> kmem_cache_alloc_node                        330     127    -203
> kmem_cache_free_bulk.part                    826     613    -213
> __kmem_cache_alloc_node                      230      10    -220
> kmem_cache_alloc_lru                         325      12    -313
> kmem_cache_alloc                             325      10    -315
> kmem_cache_free.part                         376       -    -376
> Total: Before=26103, After=25184, chg -3.52%
> 
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/slub.c | 14 ++++++++++----
>  1 file changed, 10 insertions(+), 4 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 7f1cd702c3b4..d54466e76503 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -187,6 +187,12 @@ do {					\
>  #define USE_LOCKLESS_FAST_PATH()	(false)
>  #endif
>  
> +#ifndef CONFIG_SLUB_TINY
> +#define __fastpath_inline __always_inline
> +#else
> +#define __fastpath_inline
> +#endif
> +
>  #ifdef CONFIG_SLUB_DEBUG
>  #ifdef CONFIG_SLUB_DEBUG_ON
>  DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
> @@ -3386,7 +3392,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
>   *
>   * Otherwise we can simply pick the next object from the lockless free list.
>   */
> -static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
> +static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
>  		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
>  {
>  	void *object;
> @@ -3412,13 +3418,13 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
>  	return object;
>  }
>  
> -static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
> +static __fastpath_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
>  		gfp_t gfpflags, unsigned long addr, size_t orig_size)
>  {
>  	return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
>  }
>  
> -static __always_inline
> +static __fastpath_inline
>  void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
>  			     gfp_t gfpflags)
>  {
> @@ -3733,7 +3739,7 @@ static void do_slab_free(struct kmem_cache *s,
>  }
>  #endif /* CONFIG_SLUB_TINY */
>  
> -static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
> +static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab,
>  				      void *head, void *tail, void **p, int cnt,
>  				      unsigned long addr)
>  {
> -- 
> 2.38.1

Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 7f1cd702c3b4..d54466e76503 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -187,6 +187,12 @@  do {					\
 #define USE_LOCKLESS_FAST_PATH()	(false)
 #endif
 
+#ifndef CONFIG_SLUB_TINY
+#define __fastpath_inline __always_inline
+#else
+#define __fastpath_inline
+#endif
+
 #ifdef CONFIG_SLUB_DEBUG
 #ifdef CONFIG_SLUB_DEBUG_ON
 DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
@@ -3386,7 +3392,7 @@  static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
  *
  * Otherwise we can simply pick the next object from the lockless free list.
  */
-static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
+static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
 		gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
 {
 	void *object;
@@ -3412,13 +3418,13 @@  static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
 	return object;
 }
 
-static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
+static __fastpath_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
 		gfp_t gfpflags, unsigned long addr, size_t orig_size)
 {
 	return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
 }
 
-static __always_inline
+static __fastpath_inline
 void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
 			     gfp_t gfpflags)
 {
@@ -3733,7 +3739,7 @@  static void do_slab_free(struct kmem_cache *s,
 }
 #endif /* CONFIG_SLUB_TINY */
 
-static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
+static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab,
 				      void *head, void *tail, void **p, int cnt,
 				      unsigned long addr)
 {