diff mbox series

[v3,34/35] codetag: debug: introduce OBJEXTS_ALLOC_FAIL to mark failed slab_ext allocations

Message ID 20240212213922.783301-35-surenb@google.com (mailing list archive)
State New, archived
Headers show
Series Memory allocation profiling | expand

Commit Message

Suren Baghdasaryan Feb. 12, 2024, 9:39 p.m. UTC
If slabobj_ext vector allocation for a slab object fails and later on it
succeeds for another object in the same slab, the slabobj_ext for the
original object will be NULL and will be flagged in case when
CONFIG_MEM_ALLOC_PROFILING_DEBUG is enabled.
Mark failed slabobj_ext vector allocations using a new objext_flags flag
stored in the lower bits of slab->obj_exts. When new allocation succeeds
it marks all tag references in the same slabobj_ext vector as empty to
avoid warnings implemented by CONFIG_MEM_ALLOC_PROFILING_DEBUG checks.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
 include/linux/memcontrol.h |  4 +++-
 mm/slab.h                  | 25 +++++++++++++++++++++++++
 mm/slab_common.c           | 22 +++++++++++++++-------
 3 files changed, 43 insertions(+), 8 deletions(-)

Comments

Kees Cook Feb. 12, 2024, 10:49 p.m. UTC | #1
On Mon, Feb 12, 2024 at 01:39:20PM -0800, Suren Baghdasaryan wrote:
> If slabobj_ext vector allocation for a slab object fails and later on it
> succeeds for another object in the same slab, the slabobj_ext for the
> original object will be NULL and will be flagged in case when
> CONFIG_MEM_ALLOC_PROFILING_DEBUG is enabled.
> Mark failed slabobj_ext vector allocations using a new objext_flags flag
> stored in the lower bits of slab->obj_exts. When new allocation succeeds
> it marks all tag references in the same slabobj_ext vector as empty to
> avoid warnings implemented by CONFIG_MEM_ALLOC_PROFILING_DEBUG checks.
> 
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
>  include/linux/memcontrol.h |  4 +++-
>  mm/slab.h                  | 25 +++++++++++++++++++++++++
>  mm/slab_common.c           | 22 +++++++++++++++-------
>  3 files changed, 43 insertions(+), 8 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 2b010316016c..f95241ca9052 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -365,8 +365,10 @@ enum page_memcg_data_flags {
>  #endif /* CONFIG_MEMCG */
>  
>  enum objext_flags {
> +	/* slabobj_ext vector failed to allocate */
> +	OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
>  	/* the next bit after the last actual flag */
> -	__NR_OBJEXTS_FLAGS  = __FIRST_OBJEXT_FLAG,
> +	__NR_OBJEXTS_FLAGS  = (__FIRST_OBJEXT_FLAG << 1),
>  };
>  
>  #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
> diff --git a/mm/slab.h b/mm/slab.h
> index cf332a839bf4..7bb3900f83ef 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -586,9 +586,34 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
>  	}
>  }
>  
> +static inline void mark_failed_objexts_alloc(struct slab *slab)
> +{
> +	slab->obj_exts = OBJEXTS_ALLOC_FAIL;

Uh, does this mean slab->obj_exts is suddenly non-NULL? Is everything
that accesses obj_exts expecting this?

-Kees

> +}
> +
> +static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
> +			struct slabobj_ext *vec, unsigned int objects)
> +{
> +	/*
> +	 * If vector previously failed to allocate then we have live
> +	 * objects with no tag reference. Mark all references in this
> +	 * vector as empty to avoid warnings later on.
> +	 */
> +	if (obj_exts & OBJEXTS_ALLOC_FAIL) {
> +		unsigned int i;
> +
> +		for (i = 0; i < objects; i++)
> +			set_codetag_empty(&vec[i].ref);
> +	}
> +}
> +
> +
>  #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
>  
>  static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
> +static inline void mark_failed_objexts_alloc(struct slab *slab) {}
> +static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
> +			struct slabobj_ext *vec, unsigned int objects) {}
>  
>  #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
>  
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index d5f75d04ced2..489c7a8ba8f1 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -214,29 +214,37 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
>  			gfp_t gfp, bool new_slab)
>  {
>  	unsigned int objects = objs_per_slab(s, slab);
> -	unsigned long obj_exts;
> -	void *vec;
> +	unsigned long new_exts;
> +	unsigned long old_exts;
> +	struct slabobj_ext *vec;
>  
>  	gfp &= ~OBJCGS_CLEAR_MASK;
>  	/* Prevent recursive extension vector allocation */
>  	gfp |= __GFP_NO_OBJ_EXT;
>  	vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
>  			   slab_nid(slab));
> -	if (!vec)
> +	if (!vec) {
> +		/* Mark vectors which failed to allocate */
> +		if (new_slab)
> +			mark_failed_objexts_alloc(slab);
> +
>  		return -ENOMEM;
> +	}
>  
> -	obj_exts = (unsigned long)vec;
> +	new_exts = (unsigned long)vec;
>  #ifdef CONFIG_MEMCG
> -	obj_exts |= MEMCG_DATA_OBJEXTS;
> +	new_exts |= MEMCG_DATA_OBJEXTS;
>  #endif
> +	old_exts = slab->obj_exts;
> +	handle_failed_objexts_alloc(old_exts, vec, objects);
>  	if (new_slab) {
>  		/*
>  		 * If the slab is brand new and nobody can yet access its
>  		 * obj_exts, no synchronization is required and obj_exts can
>  		 * be simply assigned.
>  		 */
> -		slab->obj_exts = obj_exts;
> -	} else if (cmpxchg(&slab->obj_exts, 0, obj_exts)) {
> +		slab->obj_exts = new_exts;
> +	} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
>  		/*
>  		 * If the slab is already in use, somebody can allocate and
>  		 * assign slabobj_exts in parallel. In this case the existing
> -- 
> 2.43.0.687.g38aa6559b0-goog
>
Suren Baghdasaryan Feb. 13, 2024, 12:09 a.m. UTC | #2
On Mon, Feb 12, 2024 at 2:49 PM Kees Cook <keescook@chromium.org> wrote:
>
> On Mon, Feb 12, 2024 at 01:39:20PM -0800, Suren Baghdasaryan wrote:
> > If slabobj_ext vector allocation for a slab object fails and later on it
> > succeeds for another object in the same slab, the slabobj_ext for the
> > original object will be NULL and will be flagged in case when
> > CONFIG_MEM_ALLOC_PROFILING_DEBUG is enabled.
> > Mark failed slabobj_ext vector allocations using a new objext_flags flag
> > stored in the lower bits of slab->obj_exts. When new allocation succeeds
> > it marks all tag references in the same slabobj_ext vector as empty to
> > avoid warnings implemented by CONFIG_MEM_ALLOC_PROFILING_DEBUG checks.
> >
> > Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> > ---
> >  include/linux/memcontrol.h |  4 +++-
> >  mm/slab.h                  | 25 +++++++++++++++++++++++++
> >  mm/slab_common.c           | 22 +++++++++++++++-------
> >  3 files changed, 43 insertions(+), 8 deletions(-)
> >
> > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> > index 2b010316016c..f95241ca9052 100644
> > --- a/include/linux/memcontrol.h
> > +++ b/include/linux/memcontrol.h
> > @@ -365,8 +365,10 @@ enum page_memcg_data_flags {
> >  #endif /* CONFIG_MEMCG */
> >
> >  enum objext_flags {
> > +     /* slabobj_ext vector failed to allocate */
> > +     OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
> >       /* the next bit after the last actual flag */
> > -     __NR_OBJEXTS_FLAGS  = __FIRST_OBJEXT_FLAG,
> > +     __NR_OBJEXTS_FLAGS  = (__FIRST_OBJEXT_FLAG << 1),
> >  };
> >
> >  #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
> > diff --git a/mm/slab.h b/mm/slab.h
> > index cf332a839bf4..7bb3900f83ef 100644
> > --- a/mm/slab.h
> > +++ b/mm/slab.h
> > @@ -586,9 +586,34 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
> >       }
> >  }
> >
> > +static inline void mark_failed_objexts_alloc(struct slab *slab)
> > +{
> > +     slab->obj_exts = OBJEXTS_ALLOC_FAIL;
>
> Uh, does this mean slab->obj_exts is suddenly non-NULL? Is everything
> that accesses obj_exts expecting this?

Hi Kees,
Thank you for the reviews!
Yes, I believe everything that accesses slab->obj_exts directly
(currently alloc_slab_obj_exts() and free_slab_obj_exts()) handle this
special non-NULL case. kfence_init_pool() initialized slab->obj_exts
directly, but since it's setting it and not accessing it, it does not
need to handle OBJEXTS_ALLOC_FAIL. All other slab->obj_exts users use
slab_obj_exts() which applies OBJEXTS_FLAGS_MASK and masks out any
special bits.
Thanks,
Suren.

>
> -Kees
>
> > +}
> > +
> > +static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
> > +                     struct slabobj_ext *vec, unsigned int objects)
> > +{
> > +     /*
> > +      * If vector previously failed to allocate then we have live
> > +      * objects with no tag reference. Mark all references in this
> > +      * vector as empty to avoid warnings later on.
> > +      */
> > +     if (obj_exts & OBJEXTS_ALLOC_FAIL) {
> > +             unsigned int i;
> > +
> > +             for (i = 0; i < objects; i++)
> > +                     set_codetag_empty(&vec[i].ref);
> > +     }
> > +}
> > +
> > +
> >  #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
> >
> >  static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
> > +static inline void mark_failed_objexts_alloc(struct slab *slab) {}
> > +static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
> > +                     struct slabobj_ext *vec, unsigned int objects) {}
> >
> >  #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
> >
> > diff --git a/mm/slab_common.c b/mm/slab_common.c
> > index d5f75d04ced2..489c7a8ba8f1 100644
> > --- a/mm/slab_common.c
> > +++ b/mm/slab_common.c
> > @@ -214,29 +214,37 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
> >                       gfp_t gfp, bool new_slab)
> >  {
> >       unsigned int objects = objs_per_slab(s, slab);
> > -     unsigned long obj_exts;
> > -     void *vec;
> > +     unsigned long new_exts;
> > +     unsigned long old_exts;
> > +     struct slabobj_ext *vec;
> >
> >       gfp &= ~OBJCGS_CLEAR_MASK;
> >       /* Prevent recursive extension vector allocation */
> >       gfp |= __GFP_NO_OBJ_EXT;
> >       vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
> >                          slab_nid(slab));
> > -     if (!vec)
> > +     if (!vec) {
> > +             /* Mark vectors which failed to allocate */
> > +             if (new_slab)
> > +                     mark_failed_objexts_alloc(slab);
> > +
> >               return -ENOMEM;
> > +     }
> >
> > -     obj_exts = (unsigned long)vec;
> > +     new_exts = (unsigned long)vec;
> >  #ifdef CONFIG_MEMCG
> > -     obj_exts |= MEMCG_DATA_OBJEXTS;
> > +     new_exts |= MEMCG_DATA_OBJEXTS;
> >  #endif
> > +     old_exts = slab->obj_exts;
> > +     handle_failed_objexts_alloc(old_exts, vec, objects);
> >       if (new_slab) {
> >               /*
> >                * If the slab is brand new and nobody can yet access its
> >                * obj_exts, no synchronization is required and obj_exts can
> >                * be simply assigned.
> >                */
> > -             slab->obj_exts = obj_exts;
> > -     } else if (cmpxchg(&slab->obj_exts, 0, obj_exts)) {
> > +             slab->obj_exts = new_exts;
> > +     } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
> >               /*
> >                * If the slab is already in use, somebody can allocate and
> >                * assign slabobj_exts in parallel. In this case the existing
> > --
> > 2.43.0.687.g38aa6559b0-goog
> >
>
> --
> Kees Cook
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2b010316016c..f95241ca9052 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -365,8 +365,10 @@  enum page_memcg_data_flags {
 #endif /* CONFIG_MEMCG */
 
 enum objext_flags {
+	/* slabobj_ext vector failed to allocate */
+	OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG,
 	/* the next bit after the last actual flag */
-	__NR_OBJEXTS_FLAGS  = __FIRST_OBJEXT_FLAG,
+	__NR_OBJEXTS_FLAGS  = (__FIRST_OBJEXT_FLAG << 1),
 };
 
 #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1)
diff --git a/mm/slab.h b/mm/slab.h
index cf332a839bf4..7bb3900f83ef 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -586,9 +586,34 @@  static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
 	}
 }
 
+static inline void mark_failed_objexts_alloc(struct slab *slab)
+{
+	slab->obj_exts = OBJEXTS_ALLOC_FAIL;
+}
+
+static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
+			struct slabobj_ext *vec, unsigned int objects)
+{
+	/*
+	 * If vector previously failed to allocate then we have live
+	 * objects with no tag reference. Mark all references in this
+	 * vector as empty to avoid warnings later on.
+	 */
+	if (obj_exts & OBJEXTS_ALLOC_FAIL) {
+		unsigned int i;
+
+		for (i = 0; i < objects; i++)
+			set_codetag_empty(&vec[i].ref);
+	}
+}
+
+
 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
 
 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
+static inline void mark_failed_objexts_alloc(struct slab *slab) {}
+static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
+			struct slabobj_ext *vec, unsigned int objects) {}
 
 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
 
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d5f75d04ced2..489c7a8ba8f1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -214,29 +214,37 @@  int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
 			gfp_t gfp, bool new_slab)
 {
 	unsigned int objects = objs_per_slab(s, slab);
-	unsigned long obj_exts;
-	void *vec;
+	unsigned long new_exts;
+	unsigned long old_exts;
+	struct slabobj_ext *vec;
 
 	gfp &= ~OBJCGS_CLEAR_MASK;
 	/* Prevent recursive extension vector allocation */
 	gfp |= __GFP_NO_OBJ_EXT;
 	vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
 			   slab_nid(slab));
-	if (!vec)
+	if (!vec) {
+		/* Mark vectors which failed to allocate */
+		if (new_slab)
+			mark_failed_objexts_alloc(slab);
+
 		return -ENOMEM;
+	}
 
-	obj_exts = (unsigned long)vec;
+	new_exts = (unsigned long)vec;
 #ifdef CONFIG_MEMCG
-	obj_exts |= MEMCG_DATA_OBJEXTS;
+	new_exts |= MEMCG_DATA_OBJEXTS;
 #endif
+	old_exts = slab->obj_exts;
+	handle_failed_objexts_alloc(old_exts, vec, objects);
 	if (new_slab) {
 		/*
 		 * If the slab is brand new and nobody can yet access its
 		 * obj_exts, no synchronization is required and obj_exts can
 		 * be simply assigned.
 		 */
-		slab->obj_exts = obj_exts;
-	} else if (cmpxchg(&slab->obj_exts, 0, obj_exts)) {
+		slab->obj_exts = new_exts;
+	} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
 		/*
 		 * If the slab is already in use, somebody can allocate and
 		 * assign slabobj_exts in parallel. In this case the existing