Message ID | 20200610163135.17364-9-vbabka@suse.cz (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | slub_debug fixes and improvements | expand |
On Wed, Jun 10, 2020 at 06:31:34PM +0200, Vlastimil Babka wrote: > There are few more places in SLUB that could benefit from reduced overhead of > the static key introduced by a previous patch: > > - setup_object_debug() called on each object in newly allocated slab page > - setup_page_debug() called on newly allocated slab page > - __free_slab() called on freed slab page > > Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Roman Gushchin <guro@fb.com> Thanks! > --- > mm/slub.c | 6 +++--- > 1 file changed, 3 insertions(+), 3 deletions(-) > > diff --git a/mm/slub.c b/mm/slub.c > index c8e8b4ae2451..efb08f2e9c66 100644 > --- a/mm/slub.c > +++ b/mm/slub.c > @@ -1130,7 +1130,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) > static void setup_object_debug(struct kmem_cache *s, struct page *page, > void *object) > { > - if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) > + if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) > return; This part is nice! It might bring some perf improvements. > > init_object(s, object, SLUB_RED_INACTIVE); > @@ -1140,7 +1140,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, > static > void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) > { > - if (!(s->flags & SLAB_POISON)) > + if (!kmem_cache_debug_flags(s, SLAB_POISON)) > return; > > metadata_access_enable(); > @@ -1857,7 +1857,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) > int order = compound_order(page); > int pages = 1 << order; > > - if (s->flags & SLAB_CONSISTENCY_CHECKS) { > + if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { > void *p; > > slab_pad_check(s, page); > -- > 2.26.2 >
diff --git a/mm/slub.c b/mm/slub.c index c8e8b4ae2451..efb08f2e9c66 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1130,7 +1130,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) static void setup_object_debug(struct kmem_cache *s, struct page *page, void *object) { - if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))) + if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) return; init_object(s, object, SLUB_RED_INACTIVE); @@ -1140,7 +1140,7 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, static void setup_page_debug(struct kmem_cache *s, struct page *page, void *addr) { - if (!(s->flags & SLAB_POISON)) + if (!kmem_cache_debug_flags(s, SLAB_POISON)) return; metadata_access_enable(); @@ -1857,7 +1857,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page) int order = compound_order(page); int pages = 1 << order; - if (s->flags & SLAB_CONSISTENCY_CHECKS) { + if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { void *p; slab_pad_check(s, page);
There are few more places in SLUB that could benefit from reduced overhead of the static key introduced by a previous patch: - setup_object_debug() called on each object in newly allocated slab page - setup_page_debug() called on newly allocated slab page - __free_slab() called on freed slab page Signed-off-by: Vlastimil Babka <vbabka@suse.cz> --- mm/slub.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)