Message ID | 20221121171202.22080-7-vbabka@suse.cz (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Introduce CONFIG_SLUB_TINY and deprecate SLOB | expand |
On 11/21/22 18:11, Vlastimil Babka wrote: > Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation > by grouping pages by mobility, but on tiny systems the extra memory > overhead of separate set of kmalloc-rcl caches will probably be worse, > and mobility grouping likely disabled anyway. > > Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the > regular ones. > > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Fixed up in response to lkp report for a MEMCG_KMEM+SLUB_TINY combo: ---8<--- From c1ec0b924850a2863d061f316615d596176f15bb Mon Sep 17 00:00:00 2001 From: Vlastimil Babka <vbabka@suse.cz> Date: Tue, 15 Nov 2022 18:19:28 +0100 Subject: [PATCH 06/12] mm, slub: don't create kmalloc-rcl caches with CONFIG_SLUB_TINY Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation by grouping pages by mobility, but on tiny systems the extra memory overhead of separate set of kmalloc-rcl caches will probably be worse, and mobility grouping likely disabled anyway. Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the regular ones. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> --- include/linux/slab.h | 9 +++++++-- mm/slab_common.c | 10 ++++++++-- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 45efc6c553b8..ae2d19ec8467 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -336,12 +336,17 @@ enum kmalloc_cache_type { #endif #ifndef CONFIG_MEMCG_KMEM KMALLOC_CGROUP = KMALLOC_NORMAL, -#else - KMALLOC_CGROUP, #endif +#ifdef CONFIG_SLUB_TINY + KMALLOC_RECLAIM = KMALLOC_NORMAL, +#else KMALLOC_RECLAIM, +#endif #ifdef CONFIG_ZONE_DMA KMALLOC_DMA, +#endif +#ifdef CONFIG_MEMCG_KMEM + KMALLOC_CGROUP, #endif NR_KMALLOC_TYPES }; diff --git a/mm/slab_common.c b/mm/slab_common.c index a8cb5de255fc..907d52963806 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -770,10 +770,16 @@ EXPORT_SYMBOL(kmalloc_size_roundup); #define KMALLOC_CGROUP_NAME(sz) #endif +#ifndef CONFIG_SLUB_TINY +#define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz, +#else +#define KMALLOC_RCL_NAME(sz) +#endif + #define INIT_KMALLOC_INFO(__size, __short_size) \ { \ .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ - .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \ + KMALLOC_RCL_NAME(__short_size) \ KMALLOC_CGROUP_NAME(__short_size) \ KMALLOC_DMA_NAME(__short_size) \ .size = __size, \ @@ -859,7 +865,7 @@ void __init setup_kmalloc_cache_index_table(void) static void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) { - if (type == KMALLOC_RECLAIM) { + if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { flags |= SLAB_RECLAIM_ACCOUNT; } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { if (mem_cgroup_kmem_disabled()) {
On Wed, Nov 23, 2022 at 02:53:43PM +0100, Vlastimil Babka wrote: > On 11/21/22 18:11, Vlastimil Babka wrote: > > Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation > > by grouping pages by mobility, but on tiny systems the extra memory > > overhead of separate set of kmalloc-rcl caches will probably be worse, > > and mobility grouping likely disabled anyway. > > > > Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the > > regular ones. > > > > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > > Fixed up in response to lkp report for a MEMCG_KMEM+SLUB_TINY combo: > ---8<--- > From c1ec0b924850a2863d061f316615d596176f15bb Mon Sep 17 00:00:00 2001 > From: Vlastimil Babka <vbabka@suse.cz> > Date: Tue, 15 Nov 2022 18:19:28 +0100 > Subject: [PATCH 06/12] mm, slub: don't create kmalloc-rcl caches with > CONFIG_SLUB_TINY > > Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation > by grouping pages by mobility, but on tiny systems the extra memory > overhead of separate set of kmalloc-rcl caches will probably be worse, > and mobility grouping likely disabled anyway. > > Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the > regular ones. > > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > --- > include/linux/slab.h | 9 +++++++-- > mm/slab_common.c | 10 ++++++++-- > 2 files changed, 15 insertions(+), 4 deletions(-) > > diff --git a/include/linux/slab.h b/include/linux/slab.h > index 45efc6c553b8..ae2d19ec8467 100644 > --- a/include/linux/slab.h > +++ b/include/linux/slab.h > @@ -336,12 +336,17 @@ enum kmalloc_cache_type { > #endif > #ifndef CONFIG_MEMCG_KMEM > KMALLOC_CGROUP = KMALLOC_NORMAL, > -#else > - KMALLOC_CGROUP, > #endif > +#ifdef CONFIG_SLUB_TINY > + KMALLOC_RECLAIM = KMALLOC_NORMAL, > +#else > KMALLOC_RECLAIM, > +#endif > #ifdef CONFIG_ZONE_DMA > KMALLOC_DMA, > +#endif > +#ifdef CONFIG_MEMCG_KMEM > + KMALLOC_CGROUP, > #endif > NR_KMALLOC_TYPES > }; Can you please elaborate what the lkp report was about and how you fixed it? I'm not getting what the problem of previous version is. > diff --git a/mm/slab_common.c b/mm/slab_common.c > index a8cb5de255fc..907d52963806 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -770,10 +770,16 @@ EXPORT_SYMBOL(kmalloc_size_roundup); > #define KMALLOC_CGROUP_NAME(sz) > #endif > > +#ifndef CONFIG_SLUB_TINY > +#define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz, > +#else > +#define KMALLOC_RCL_NAME(sz) > +#endif > + > #define INIT_KMALLOC_INFO(__size, __short_size) \ > { \ > .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ > - .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \ > + KMALLOC_RCL_NAME(__short_size) \ > KMALLOC_CGROUP_NAME(__short_size) \ > KMALLOC_DMA_NAME(__short_size) \ > .size = __size, \ > @@ -859,7 +865,7 @@ void __init setup_kmalloc_cache_index_table(void) > static void __init > new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) > { > - if (type == KMALLOC_RECLAIM) { > + if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { > flags |= SLAB_RECLAIM_ACCOUNT; > } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { > if (mem_cgroup_kmem_disabled()) { > -- > 2.38.1 > Otherwise looks fine to me.
On 11/24/22 13:06, Hyeonggon Yoo wrote: > On Wed, Nov 23, 2022 at 02:53:43PM +0100, Vlastimil Babka wrote: >> On 11/21/22 18:11, Vlastimil Babka wrote: >> > Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation >> > by grouping pages by mobility, but on tiny systems the extra memory >> > overhead of separate set of kmalloc-rcl caches will probably be worse, >> > and mobility grouping likely disabled anyway. >> > >> > Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the >> > regular ones. >> > >> > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> >> >> Fixed up in response to lkp report for a MEMCG_KMEM+SLUB_TINY combo: >> ---8<--- >> From c1ec0b924850a2863d061f316615d596176f15bb Mon Sep 17 00:00:00 2001 >> From: Vlastimil Babka <vbabka@suse.cz> >> Date: Tue, 15 Nov 2022 18:19:28 +0100 >> Subject: [PATCH 06/12] mm, slub: don't create kmalloc-rcl caches with >> CONFIG_SLUB_TINY >> >> Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation >> by grouping pages by mobility, but on tiny systems the extra memory >> overhead of separate set of kmalloc-rcl caches will probably be worse, >> and mobility grouping likely disabled anyway. >> >> Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the >> regular ones. >> >> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> >> --- >> include/linux/slab.h | 9 +++++++-- >> mm/slab_common.c | 10 ++++++++-- >> 2 files changed, 15 insertions(+), 4 deletions(-) >> >> diff --git a/include/linux/slab.h b/include/linux/slab.h >> index 45efc6c553b8..ae2d19ec8467 100644 >> --- a/include/linux/slab.h >> +++ b/include/linux/slab.h >> @@ -336,12 +336,17 @@ enum kmalloc_cache_type { >> #endif >> #ifndef CONFIG_MEMCG_KMEM >> KMALLOC_CGROUP = KMALLOC_NORMAL, >> -#else >> - KMALLOC_CGROUP, >> #endif >> +#ifdef CONFIG_SLUB_TINY >> + KMALLOC_RECLAIM = KMALLOC_NORMAL, >> +#else >> KMALLOC_RECLAIM, >> +#endif >> #ifdef CONFIG_ZONE_DMA >> KMALLOC_DMA, >> +#endif >> +#ifdef CONFIG_MEMCG_KMEM >> + KMALLOC_CGROUP, >> #endif >> NR_KMALLOC_TYPES >> }; > > Can you please elaborate what the lkp report was about > and how you fixed it? I'm not getting what the problem of previous > version is. Report here: https://lore.kernel.org/all/202211231949.nIyAWKam-lkp@intel.com/ Problem is that if the preprocessing results in e.g. KMALLOC_NORMAL = 0, KMALLOC_DMA = KMALLOC_NORMAL KMALLOC_CGROUP, KMALLOC_RECLAIM = KMALLOC_NORMAL, NR_KMALLOC_TYPES then NR_KMALLOC_TYPES is not 2, but 1, because the enum's internal counter got reset to 0 by KMALLOC_RECLAIM = KMALLOC_NORMAL. A common gotcha :/
On Thu, Nov 24, 2022 at 01:12:13PM +0100, Vlastimil Babka wrote: > On 11/24/22 13:06, Hyeonggon Yoo wrote: > > On Wed, Nov 23, 2022 at 02:53:43PM +0100, Vlastimil Babka wrote: > >> On 11/21/22 18:11, Vlastimil Babka wrote: > >> > Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation > >> > by grouping pages by mobility, but on tiny systems the extra memory > >> > overhead of separate set of kmalloc-rcl caches will probably be worse, > >> > and mobility grouping likely disabled anyway. > >> > > >> > Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the > >> > regular ones. > >> > > >> > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > >> > >> Fixed up in response to lkp report for a MEMCG_KMEM+SLUB_TINY combo: > >> ---8<--- > >> From c1ec0b924850a2863d061f316615d596176f15bb Mon Sep 17 00:00:00 2001 > >> From: Vlastimil Babka <vbabka@suse.cz> > >> Date: Tue, 15 Nov 2022 18:19:28 +0100 > >> Subject: [PATCH 06/12] mm, slub: don't create kmalloc-rcl caches with > >> CONFIG_SLUB_TINY > >> > >> Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation > >> by grouping pages by mobility, but on tiny systems the extra memory > >> overhead of separate set of kmalloc-rcl caches will probably be worse, > >> and mobility grouping likely disabled anyway. > >> > >> Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the > >> regular ones. > >> > >> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > >> --- > >> include/linux/slab.h | 9 +++++++-- > >> mm/slab_common.c | 10 ++++++++-- > >> 2 files changed, 15 insertions(+), 4 deletions(-) > >> > >> diff --git a/include/linux/slab.h b/include/linux/slab.h > >> index 45efc6c553b8..ae2d19ec8467 100644 > >> --- a/include/linux/slab.h > >> +++ b/include/linux/slab.h > >> @@ -336,12 +336,17 @@ enum kmalloc_cache_type { > >> #endif > >> #ifndef CONFIG_MEMCG_KMEM > >> KMALLOC_CGROUP = KMALLOC_NORMAL, > >> -#else > >> - KMALLOC_CGROUP, > >> #endif > >> +#ifdef CONFIG_SLUB_TINY > >> + KMALLOC_RECLAIM = KMALLOC_NORMAL, > >> +#else > >> KMALLOC_RECLAIM, > >> +#endif > >> #ifdef CONFIG_ZONE_DMA > >> KMALLOC_DMA, > >> +#endif > >> +#ifdef CONFIG_MEMCG_KMEM > >> + KMALLOC_CGROUP, > >> #endif > >> NR_KMALLOC_TYPES > >> }; > > > > Can you please elaborate what the lkp report was about > > and how you fixed it? I'm not getting what the problem of previous > > version is. > > Report here: > https://lore.kernel.org/all/202211231949.nIyAWKam-lkp@intel.com/ > > Problem is that if the preprocessing results in e.g. > KMALLOC_NORMAL = 0, > KMALLOC_DMA = KMALLOC_NORMAL > KMALLOC_CGROUP, > KMALLOC_RECLAIM = KMALLOC_NORMAL, > NR_KMALLOC_TYPES > > then NR_KMALLOC_TYPES is not 2, but 1, because the enum's internal counter > got reset to 0 by KMALLOC_RECLAIM = KMALLOC_NORMAL. A common gotcha :/ Thanks for quick and kind explanation :) That was easy to be missed.
On Wed, Nov 23, 2022 at 02:53:43PM +0100, Vlastimil Babka wrote: > On 11/21/22 18:11, Vlastimil Babka wrote: > > Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation > > by grouping pages by mobility, but on tiny systems the extra memory > > overhead of separate set of kmalloc-rcl caches will probably be worse, > > and mobility grouping likely disabled anyway. > > > > Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the > > regular ones. > > > > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > > Fixed up in response to lkp report for a MEMCG_KMEM+SLUB_TINY combo: > ---8<--- > From c1ec0b924850a2863d061f316615d596176f15bb Mon Sep 17 00:00:00 2001 > From: Vlastimil Babka <vbabka@suse.cz> > Date: Tue, 15 Nov 2022 18:19:28 +0100 > Subject: [PATCH 06/12] mm, slub: don't create kmalloc-rcl caches with > CONFIG_SLUB_TINY > > Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation > by grouping pages by mobility, but on tiny systems the extra memory > overhead of separate set of kmalloc-rcl caches will probably be worse, > and mobility grouping likely disabled anyway. > > Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the > regular ones. > > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> > --- > include/linux/slab.h | 9 +++++++-- > mm/slab_common.c | 10 ++++++++-- > 2 files changed, 15 insertions(+), 4 deletions(-) > > diff --git a/include/linux/slab.h b/include/linux/slab.h > index 45efc6c553b8..ae2d19ec8467 100644 > --- a/include/linux/slab.h > +++ b/include/linux/slab.h > @@ -336,12 +336,17 @@ enum kmalloc_cache_type { > #endif > #ifndef CONFIG_MEMCG_KMEM > KMALLOC_CGROUP = KMALLOC_NORMAL, > -#else > - KMALLOC_CGROUP, > #endif > +#ifdef CONFIG_SLUB_TINY > + KMALLOC_RECLAIM = KMALLOC_NORMAL, > +#else > KMALLOC_RECLAIM, > +#endif > #ifdef CONFIG_ZONE_DMA > KMALLOC_DMA, > +#endif > +#ifdef CONFIG_MEMCG_KMEM > + KMALLOC_CGROUP, > #endif > NR_KMALLOC_TYPES > }; > diff --git a/mm/slab_common.c b/mm/slab_common.c > index a8cb5de255fc..907d52963806 100644 > --- a/mm/slab_common.c > +++ b/mm/slab_common.c > @@ -770,10 +770,16 @@ EXPORT_SYMBOL(kmalloc_size_roundup); > #define KMALLOC_CGROUP_NAME(sz) > #endif > > +#ifndef CONFIG_SLUB_TINY > +#define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz, > +#else > +#define KMALLOC_RCL_NAME(sz) > +#endif > + > #define INIT_KMALLOC_INFO(__size, __short_size) \ > { \ > .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ > - .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \ > + KMALLOC_RCL_NAME(__short_size) \ > KMALLOC_CGROUP_NAME(__short_size) \ > KMALLOC_DMA_NAME(__short_size) \ > .size = __size, \ > @@ -859,7 +865,7 @@ void __init setup_kmalloc_cache_index_table(void) > static void __init > new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) > { > - if (type == KMALLOC_RECLAIM) { > + if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { for consistency this can be: if (IS_ENABLED(CONFIG_SLUB_TINY) && (type == KMALLOC_RECLAIM)) { But yeah, it's not a big deal. > flags |= SLAB_RECLAIM_ACCOUNT; > } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { > if (mem_cgroup_kmem_disabled()) { > -- > 2.38.1 > For either case: Looks good to me. Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
On Thu, Nov 24, 2022 at 10:23:51PM +0900, Hyeonggon Yoo wrote: > > @@ -859,7 +865,7 @@ void __init setup_kmalloc_cache_index_table(void) > > static void __init > > new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) > > { > > - if (type == KMALLOC_RECLAIM) { > > + if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { > > for consistency this can be: > if (IS_ENABLED(CONFIG_SLUB_TINY) && (type == KMALLOC_RECLAIM)) { > My finger slipped :) I mean: if (!IS_ENABLED(CONFIG_SLUB_TINY) && (type == KMALLOC_RECLAIM)) {
diff --git a/include/linux/slab.h b/include/linux/slab.h index 45efc6c553b8..3ce9474c90ab 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -339,7 +339,11 @@ enum kmalloc_cache_type { #else KMALLOC_CGROUP, #endif +#ifndef CONFIG_SLUB_TINY KMALLOC_RECLAIM, +#else + KMALLOC_RECLAIM = KMALLOC_NORMAL, +#endif #ifdef CONFIG_ZONE_DMA KMALLOC_DMA, #endif diff --git a/mm/slab_common.c b/mm/slab_common.c index a8cb5de255fc..907d52963806 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -770,10 +770,16 @@ EXPORT_SYMBOL(kmalloc_size_roundup); #define KMALLOC_CGROUP_NAME(sz) #endif +#ifndef CONFIG_SLUB_TINY +#define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz, +#else +#define KMALLOC_RCL_NAME(sz) +#endif + #define INIT_KMALLOC_INFO(__size, __short_size) \ { \ .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \ - .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \ + KMALLOC_RCL_NAME(__short_size) \ KMALLOC_CGROUP_NAME(__short_size) \ KMALLOC_DMA_NAME(__short_size) \ .size = __size, \ @@ -859,7 +865,7 @@ void __init setup_kmalloc_cache_index_table(void) static void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) { - if (type == KMALLOC_RECLAIM) { + if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { flags |= SLAB_RECLAIM_ACCOUNT; } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { if (mem_cgroup_kmem_disabled()) {
Distinguishing kmalloc(__GFP_RECLAIMABLE) can help against fragmentation by grouping pages by mobility, but on tiny systems the extra memory overhead of separate set of kmalloc-rcl caches will probably be worse, and mobility grouping likely disabled anyway. Thus with CONFIG_SLUB_TINY, don't create kmalloc-rcl caches and use the regular ones. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> --- include/linux/slab.h | 4 ++++ mm/slab_common.c | 10 ++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-)