diff mbox series

[1/1] mm, slab: move allocation tagging code in the alloc path into a hook

Message ID 20240703015354.3370503-1-surenb@google.com (mailing list archive)
State New
Headers show
Series [1/1] mm, slab: move allocation tagging code in the alloc path into a hook | expand

Commit Message

Suren Baghdasaryan July 3, 2024, 1:53 a.m. UTC
Move allocation tagging specific code in the allocation path into
alloc_tagging_slab_alloc_hook, similar to how freeing path uses
alloc_tagging_slab_free_hook. No functional changes, just code
cleanup.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 mm/slub.c | 34 +++++++++++++++++++++++++++++-----
 1 file changed, 29 insertions(+), 5 deletions(-)


base-commit: e9d22f7a6655941fc8b2b942ed354ec780936b3e

Comments

Vlastimil Babka July 3, 2024, 10:47 a.m. UTC | #1
On 7/3/24 3:53 AM, Suren Baghdasaryan wrote:
> Move allocation tagging specific code in the allocation path into
> alloc_tagging_slab_alloc_hook, similar to how freeing path uses
> alloc_tagging_slab_free_hook. No functional changes, just code
> cleanup.
> 
> Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  mm/slub.c | 34 +++++++++++++++++++++++++++++-----
>  1 file changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 4927edec6a8c..99d53190cfcf 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2033,11 +2033,18 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
>  	return slab_obj_exts(slab) + obj_to_index(s, slab, p);
>  }
>  
> +#ifdef CONFIG_MEM_ALLOC_PROFILING

I think if you extract this whole #ifdef CONFIG_MEM_ALLOC_PROFILING section
to go outside of CONFIG_SLAB_OBJ_EXT sections, i.e. below the final

#endif /* CONFIG_SLAB_OBJ_EXT */

then it wouldn't be necessary to have two instances of the empty hooks?

> +
> +static inline void
> +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
> +{
> +	alloc_tag_add(&obj_exts->ref, current->alloc_tag, size);
> +}
> +
>  static inline void
>  alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
>  			     int objects)
>  {
> -#ifdef CONFIG_MEM_ALLOC_PROFILING
>  	struct slabobj_ext *obj_exts;
>  	int i;
>  
> @@ -2053,9 +2060,23 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
>  
>  		alloc_tag_sub(&obj_exts[off].ref, s->size);
>  	}
> -#endif
>  }
>  
> +#else /* CONFIG_MEM_ALLOC_PROFILING */
> +
> +static inline void
> +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
> +{
> +}
> +
> +static inline void
> +alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> +			     int objects)
> +{
> +}
> +
> +#endif /* CONFIG_MEM_ALLOC_PROFILING*/
> +
>  #else /* CONFIG_SLAB_OBJ_EXT */
>  
>  static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> @@ -2079,6 +2100,11 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
>  	return NULL;
>  }
>  
> +static inline void
> +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
> +{
> +}
> +
>  static inline void
>  alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
>  			     int objects)
> @@ -3944,7 +3970,6 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
>  		kmemleak_alloc_recursive(p[i], s->object_size, 1,
>  					 s->flags, init_flags);
>  		kmsan_slab_alloc(s, p[i], init_flags);
> -#ifdef CONFIG_MEM_ALLOC_PROFILING
>  		if (need_slab_obj_ext()) {
>  			struct slabobj_ext *obj_exts;
>  
> @@ -3955,9 +3980,8 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
>  			 * check should be added before alloc_tag_add().
>  			 */
>  			if (likely(obj_exts))
> -				alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
> +				alloc_tagging_slab_alloc_hook(obj_exts, s->size);
>  		}

Could this whole "if (need_slab_obj_ext())" block be part of
alloc_tagging_slab_alloc_hook()? That would match
__memcg_slab_post_alloc_hook also taking care of calloing
alloc_slab_obj_exts on its own. Maybe then we won't even need empty versions
of prepare_slab_obj_exts_hook()

> -#endif
>  	}
>  
>  	return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
> 
> base-commit: e9d22f7a6655941fc8b2b942ed354ec780936b3e
Suren Baghdasaryan July 3, 2024, 4:16 p.m. UTC | #2
On Wed, Jul 3, 2024 at 3:47 AM Vlastimil Babka <vbabka@suse.cz> wrote:
>
> On 7/3/24 3:53 AM, Suren Baghdasaryan wrote:
> > Move allocation tagging specific code in the allocation path into
> > alloc_tagging_slab_alloc_hook, similar to how freeing path uses
> > alloc_tagging_slab_free_hook. No functional changes, just code
> > cleanup.
> >
> > Suggested-by: Vlastimil Babka <vbabka@suse.cz>
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  mm/slub.c | 34 +++++++++++++++++++++++++++++-----
> >  1 file changed, 29 insertions(+), 5 deletions(-)
> >
> > diff --git a/mm/slub.c b/mm/slub.c
> > index 4927edec6a8c..99d53190cfcf 100644
> > --- a/mm/slub.c
> > +++ b/mm/slub.c
> > @@ -2033,11 +2033,18 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> >       return slab_obj_exts(slab) + obj_to_index(s, slab, p);
> >  }
> >
> > +#ifdef CONFIG_MEM_ALLOC_PROFILING
>
> I think if you extract this whole #ifdef CONFIG_MEM_ALLOC_PROFILING section
> to go outside of CONFIG_SLAB_OBJ_EXT sections, i.e. below the final
>
> #endif /* CONFIG_SLAB_OBJ_EXT */
>
> then it wouldn't be necessary to have two instances of the empty hooks?
>
> > +
> > +static inline void
> > +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
> > +{
> > +     alloc_tag_add(&obj_exts->ref, current->alloc_tag, size);
> > +}
> > +
> >  static inline void
> >  alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> >                            int objects)
> >  {
> > -#ifdef CONFIG_MEM_ALLOC_PROFILING
> >       struct slabobj_ext *obj_exts;
> >       int i;
> >
> > @@ -2053,9 +2060,23 @@ alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> >
> >               alloc_tag_sub(&obj_exts[off].ref, s->size);
> >       }
> > -#endif
> >  }
> >
> > +#else /* CONFIG_MEM_ALLOC_PROFILING */
> > +
> > +static inline void
> > +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
> > +{
> > +}
> > +
> > +static inline void
> > +alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> > +                          int objects)
> > +{
> > +}
> > +
> > +#endif /* CONFIG_MEM_ALLOC_PROFILING*/
> > +
> >  #else /* CONFIG_SLAB_OBJ_EXT */
> >
> >  static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> > @@ -2079,6 +2100,11 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
> >       return NULL;
> >  }
> >
> > +static inline void
> > +alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
> > +{
> > +}
> > +
> >  static inline void
> >  alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
> >                            int objects)
> > @@ -3944,7 +3970,6 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
> >               kmemleak_alloc_recursive(p[i], s->object_size, 1,
> >                                        s->flags, init_flags);
> >               kmsan_slab_alloc(s, p[i], init_flags);
> > -#ifdef CONFIG_MEM_ALLOC_PROFILING
> >               if (need_slab_obj_ext()) {
> >                       struct slabobj_ext *obj_exts;
> >
> > @@ -3955,9 +3980,8 @@ bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
> >                        * check should be added before alloc_tag_add().
> >                        */
> >                       if (likely(obj_exts))
> > -                             alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
> > +                             alloc_tagging_slab_alloc_hook(obj_exts, s->size);
> >               }
>
> Could this whole "if (need_slab_obj_ext())" block be part of
> alloc_tagging_slab_alloc_hook()? That would match
> __memcg_slab_post_alloc_hook also taking care of calloing
> alloc_slab_obj_exts on its own. Maybe then we won't even need empty versions
> of prepare_slab_obj_exts_hook()

Yeah, since we don't have other slab_obj_ext users yet, we can
simplify the code that way. Originally I wanted to keep it more
generic and make it easy to add new slab_obj_ext users, but I'm fine
with this simplification. When there are other users we can change the
code back to be more generic. I'll post v2 shortly with your
suggestions.
Thanks,
Suren.

>
> > -#endif
> >       }
> >
> >       return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
> >
> > base-commit: e9d22f7a6655941fc8b2b942ed354ec780936b3e
>
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 4927edec6a8c..99d53190cfcf 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2033,11 +2033,18 @@  prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
 	return slab_obj_exts(slab) + obj_to_index(s, slab, p);
 }
 
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+
+static inline void
+alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
+{
+	alloc_tag_add(&obj_exts->ref, current->alloc_tag, size);
+}
+
 static inline void
 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
 			     int objects)
 {
-#ifdef CONFIG_MEM_ALLOC_PROFILING
 	struct slabobj_ext *obj_exts;
 	int i;
 
@@ -2053,9 +2060,23 @@  alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
 
 		alloc_tag_sub(&obj_exts[off].ref, s->size);
 	}
-#endif
 }
 
+#else /* CONFIG_MEM_ALLOC_PROFILING */
+
+static inline void
+alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
+{
+}
+
+static inline void
+alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
+			     int objects)
+{
+}
+
+#endif /* CONFIG_MEM_ALLOC_PROFILING*/
+
 #else /* CONFIG_SLAB_OBJ_EXT */
 
 static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
@@ -2079,6 +2100,11 @@  prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
 	return NULL;
 }
 
+static inline void
+alloc_tagging_slab_alloc_hook(struct slabobj_ext *obj_exts, unsigned int size)
+{
+}
+
 static inline void
 alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
 			     int objects)
@@ -3944,7 +3970,6 @@  bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
 		kmemleak_alloc_recursive(p[i], s->object_size, 1,
 					 s->flags, init_flags);
 		kmsan_slab_alloc(s, p[i], init_flags);
-#ifdef CONFIG_MEM_ALLOC_PROFILING
 		if (need_slab_obj_ext()) {
 			struct slabobj_ext *obj_exts;
 
@@ -3955,9 +3980,8 @@  bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
 			 * check should be added before alloc_tag_add().
 			 */
 			if (likely(obj_exts))
-				alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
+				alloc_tagging_slab_alloc_hook(obj_exts, s->size);
 		}
-#endif
 	}
 
 	return memcg_slab_post_alloc_hook(s, lru, flags, size, p);