diff mbox series

[v2,13/21] mm/slab: move pre/post-alloc hooks from slab.h to slub.c

Message ID 20231120-slab-remove-slab-v2-13-9c9c70177183@suse.cz (mailing list archive)
State New
Headers show
Series remove the SLAB allocator | expand

Commit Message

Vlastimil Babka Nov. 20, 2023, 6:34 p.m. UTC
We don't share the hooks between two slab implementations anymore so
they can be moved away from the header. As part of the move, also move
should_failslab() from slab_common.c as the pre_alloc hook uses it.
This means slab.h can stop including fault-inject.h and kmemleak.h.
Fix up some files that were depending on the includes transitively.

Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
---
 mm/kasan/report.c |  1 +
 mm/memcontrol.c   |  1 +
 mm/slab.h         | 72 -------------------------------------------------
 mm/slab_common.c  |  8 +-----
 mm/slub.c         | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 84 insertions(+), 79 deletions(-)

Comments

Hyeonggon Yoo Dec. 7, 2023, 12:43 a.m. UTC | #1
On Mon, Nov 20, 2023 at 07:34:24PM +0100, Vlastimil Babka wrote:
> We don't share the hooks between two slab implementations anymore so
> they can be moved away from the header. As part of the move, also move
> should_failslab() from slab_common.c as the pre_alloc hook uses it.
> This means slab.h can stop including fault-inject.h and kmemleak.h.
> Fix up some files that were depending on the includes transitively.
> 
> Reviewed-by: Kees Cook <keescook@chromium.org>
> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
> ---
>  mm/kasan/report.c |  1 +
>  mm/memcontrol.c   |  1 +
>  mm/slab.h         | 72 -------------------------------------------------
>  mm/slab_common.c  |  8 +-----
>  mm/slub.c         | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  5 files changed, 84 insertions(+), 79 deletions(-)
> 
> diff --git a/mm/kasan/report.c b/mm/kasan/report.c
> index e77facb62900..011f727bfaff 100644
> --- a/mm/kasan/report.c
> +++ b/mm/kasan/report.c
> @@ -23,6 +23,7 @@
>  #include <linux/stacktrace.h>
>  #include <linux/string.h>
>  #include <linux/types.h>
> +#include <linux/vmalloc.h>
>  #include <linux/kasan.h>
>  #include <linux/module.h>
>  #include <linux/sched/task_stack.h>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 947fb50eba31..8a0603517065 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -64,6 +64,7 @@
>  #include <linux/psi.h>
>  #include <linux/seq_buf.h>
>  #include <linux/sched/isolation.h>
> +#include <linux/kmemleak.h>
>  #include "internal.h"
>  #include <net/sock.h>
>  #include <net/ip.h>
> diff --git a/mm/slab.h b/mm/slab.h
> index 1ac3a2f8d4c0..65ebf86b3fe9 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -9,8 +9,6 @@
>  #include <linux/kobject.h>
>  #include <linux/sched/mm.h>
>  #include <linux/memcontrol.h>
> -#include <linux/fault-inject.h>
> -#include <linux/kmemleak.h>
>  #include <linux/kfence.h>
>  #include <linux/kasan.h>
>  
> @@ -796,76 +794,6 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
>  	return s->size;
>  }
>  
> -static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
> -						     struct list_lru *lru,
> -						     struct obj_cgroup **objcgp,
> -						     size_t size, gfp_t flags)
> -{
> -	flags &= gfp_allowed_mask;
> -
> -	might_alloc(flags);
> -
> -	if (should_failslab(s, flags))
> -		return NULL;
> -
> -	if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
> -		return NULL;
> -
> -	return s;
> -}
> -
> -static inline void slab_post_alloc_hook(struct kmem_cache *s,
> -					struct obj_cgroup *objcg, gfp_t flags,
> -					size_t size, void **p, bool init,
> -					unsigned int orig_size)
> -{
> -	unsigned int zero_size = s->object_size;
> -	bool kasan_init = init;
> -	size_t i;
> -
> -	flags &= gfp_allowed_mask;
> -
> -	/*
> -	 * For kmalloc object, the allocated memory size(object_size) is likely
> -	 * larger than the requested size(orig_size). If redzone check is
> -	 * enabled for the extra space, don't zero it, as it will be redzoned
> -	 * soon. The redzone operation for this extra space could be seen as a
> -	 * replacement of current poisoning under certain debug option, and
> -	 * won't break other sanity checks.
> -	 */
> -	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
> -	    (s->flags & SLAB_KMALLOC))
> -		zero_size = orig_size;
> -
> -	/*
> -	 * When slub_debug is enabled, avoid memory initialization integrated
> -	 * into KASAN and instead zero out the memory via the memset below with
> -	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
> -	 * cause false-positive reports. This does not lead to a performance
> -	 * penalty on production builds, as slub_debug is not intended to be
> -	 * enabled there.
> -	 */
> -	if (__slub_debug_enabled())
> -		kasan_init = false;
> -
> -	/*
> -	 * As memory initialization might be integrated into KASAN,
> -	 * kasan_slab_alloc and initialization memset must be
> -	 * kept together to avoid discrepancies in behavior.
> -	 *
> -	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
> -	 */
> -	for (i = 0; i < size; i++) {
> -		p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
> -		if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
> -			memset(p[i], 0, zero_size);
> -		kmemleak_alloc_recursive(p[i], s->object_size, 1,
> -					 s->flags, flags);
> -		kmsan_slab_alloc(s, p[i], flags);
> -	}
> -
> -	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
> -}
>  
>  /*
>   * The slab lists for all objects.
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index 63b8411db7ce..bbc2e3f061f1 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -21,6 +21,7 @@
>  #include <linux/swiotlb.h>
>  #include <linux/proc_fs.h>
>  #include <linux/debugfs.h>
> +#include <linux/kmemleak.h>
>  #include <linux/kasan.h>
>  #include <asm/cacheflush.h>
>  #include <asm/tlbflush.h>
> @@ -1470,10 +1471,3 @@ EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
>  EXPORT_TRACEPOINT_SYMBOL(kfree);
>  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
>  
> -int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
> -{
> -	if (__should_failslab(s, gfpflags))
> -		return -ENOMEM;
> -	return 0;
> -}
> -ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
> diff --git a/mm/slub.c b/mm/slub.c
> index 979932d046fd..9eb6508152c2 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -34,6 +34,7 @@
>  #include <linux/memory.h>
>  #include <linux/math64.h>
>  #include <linux/fault-inject.h>
> +#include <linux/kmemleak.h>
>  #include <linux/stacktrace.h>
>  #include <linux/prefetch.h>
>  #include <linux/memcontrol.h>
> @@ -3494,6 +3495,86 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
>  			0, sizeof(void *));
>  }
>  
> +noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
> +{
> +	if (__should_failslab(s, gfpflags))
> +		return -ENOMEM;
> +	return 0;
> +}
> +ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
> +
> +static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
> +						     struct list_lru *lru,
> +						     struct obj_cgroup **objcgp,
> +						     size_t size, gfp_t flags)
> +{
> +	flags &= gfp_allowed_mask;
> +
> +	might_alloc(flags);
> +
> +	if (should_failslab(s, flags))
> +		return NULL;
> +
> +	if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
> +		return NULL;
> +
> +	return s;
> +}
> +
> +static inline void slab_post_alloc_hook(struct kmem_cache *s,
> +					struct obj_cgroup *objcg, gfp_t flags,
> +					size_t size, void **p, bool init,
> +					unsigned int orig_size)
> +{
> +	unsigned int zero_size = s->object_size;
> +	bool kasan_init = init;
> +	size_t i;
> +
> +	flags &= gfp_allowed_mask;
> +
> +	/*
> +	 * For kmalloc object, the allocated memory size(object_size) is likely
> +	 * larger than the requested size(orig_size). If redzone check is
> +	 * enabled for the extra space, don't zero it, as it will be redzoned
> +	 * soon. The redzone operation for this extra space could be seen as a
> +	 * replacement of current poisoning under certain debug option, and
> +	 * won't break other sanity checks.
> +	 */
> +	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
> +	    (s->flags & SLAB_KMALLOC))
> +		zero_size = orig_size;
> +
> +	/*
> +	 * When slub_debug is enabled, avoid memory initialization integrated
> +	 * into KASAN and instead zero out the memory via the memset below with
> +	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
> +	 * cause false-positive reports. This does not lead to a performance
> +	 * penalty on production builds, as slub_debug is not intended to be
> +	 * enabled there.
> +	 */
> +	if (__slub_debug_enabled())
> +		kasan_init = false;
> +
> +	/*
> +	 * As memory initialization might be integrated into KASAN,
> +	 * kasan_slab_alloc and initialization memset must be
> +	 * kept together to avoid discrepancies in behavior.
> +	 *
> +	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
> +	 */
> +	for (i = 0; i < size; i++) {
> +		p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
> +		if (p[i] && init && (!kasan_init ||
> +				     !kasan_has_integrated_init()))
> +			memset(p[i], 0, zero_size);
> +		kmemleak_alloc_recursive(p[i], s->object_size, 1,
> +					 s->flags, flags);
> +		kmsan_slab_alloc(s, p[i], flags);
> +	}
> +
> +	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
> +}
> +
>  /*
>   * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
>   * have the fastpath folded into their functions. So no function call
> 
> -- 

Looks good to me,
Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>

> 2.42.1
> 
>
diff mbox series

Patch

diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index e77facb62900..011f727bfaff 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -23,6 +23,7 @@ 
 #include <linux/stacktrace.h>
 #include <linux/string.h>
 #include <linux/types.h>
+#include <linux/vmalloc.h>
 #include <linux/kasan.h>
 #include <linux/module.h>
 #include <linux/sched/task_stack.h>
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 947fb50eba31..8a0603517065 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -64,6 +64,7 @@ 
 #include <linux/psi.h>
 #include <linux/seq_buf.h>
 #include <linux/sched/isolation.h>
+#include <linux/kmemleak.h>
 #include "internal.h"
 #include <net/sock.h>
 #include <net/ip.h>
diff --git a/mm/slab.h b/mm/slab.h
index 1ac3a2f8d4c0..65ebf86b3fe9 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -9,8 +9,6 @@ 
 #include <linux/kobject.h>
 #include <linux/sched/mm.h>
 #include <linux/memcontrol.h>
-#include <linux/fault-inject.h>
-#include <linux/kmemleak.h>
 #include <linux/kfence.h>
 #include <linux/kasan.h>
 
@@ -796,76 +794,6 @@  static inline size_t slab_ksize(const struct kmem_cache *s)
 	return s->size;
 }
 
-static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
-						     struct list_lru *lru,
-						     struct obj_cgroup **objcgp,
-						     size_t size, gfp_t flags)
-{
-	flags &= gfp_allowed_mask;
-
-	might_alloc(flags);
-
-	if (should_failslab(s, flags))
-		return NULL;
-
-	if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
-		return NULL;
-
-	return s;
-}
-
-static inline void slab_post_alloc_hook(struct kmem_cache *s,
-					struct obj_cgroup *objcg, gfp_t flags,
-					size_t size, void **p, bool init,
-					unsigned int orig_size)
-{
-	unsigned int zero_size = s->object_size;
-	bool kasan_init = init;
-	size_t i;
-
-	flags &= gfp_allowed_mask;
-
-	/*
-	 * For kmalloc object, the allocated memory size(object_size) is likely
-	 * larger than the requested size(orig_size). If redzone check is
-	 * enabled for the extra space, don't zero it, as it will be redzoned
-	 * soon. The redzone operation for this extra space could be seen as a
-	 * replacement of current poisoning under certain debug option, and
-	 * won't break other sanity checks.
-	 */
-	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
-	    (s->flags & SLAB_KMALLOC))
-		zero_size = orig_size;
-
-	/*
-	 * When slub_debug is enabled, avoid memory initialization integrated
-	 * into KASAN and instead zero out the memory via the memset below with
-	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
-	 * cause false-positive reports. This does not lead to a performance
-	 * penalty on production builds, as slub_debug is not intended to be
-	 * enabled there.
-	 */
-	if (__slub_debug_enabled())
-		kasan_init = false;
-
-	/*
-	 * As memory initialization might be integrated into KASAN,
-	 * kasan_slab_alloc and initialization memset must be
-	 * kept together to avoid discrepancies in behavior.
-	 *
-	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
-	 */
-	for (i = 0; i < size; i++) {
-		p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
-		if (p[i] && init && (!kasan_init || !kasan_has_integrated_init()))
-			memset(p[i], 0, zero_size);
-		kmemleak_alloc_recursive(p[i], s->object_size, 1,
-					 s->flags, flags);
-		kmsan_slab_alloc(s, p[i], flags);
-	}
-
-	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
-}
 
 /*
  * The slab lists for all objects.
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 63b8411db7ce..bbc2e3f061f1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -21,6 +21,7 @@ 
 #include <linux/swiotlb.h>
 #include <linux/proc_fs.h>
 #include <linux/debugfs.h>
+#include <linux/kmemleak.h>
 #include <linux/kasan.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
@@ -1470,10 +1471,3 @@  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
 EXPORT_TRACEPOINT_SYMBOL(kfree);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
 
-int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
-{
-	if (__should_failslab(s, gfpflags))
-		return -ENOMEM;
-	return 0;
-}
-ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
diff --git a/mm/slub.c b/mm/slub.c
index 979932d046fd..9eb6508152c2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -34,6 +34,7 @@ 
 #include <linux/memory.h>
 #include <linux/math64.h>
 #include <linux/fault-inject.h>
+#include <linux/kmemleak.h>
 #include <linux/stacktrace.h>
 #include <linux/prefetch.h>
 #include <linux/memcontrol.h>
@@ -3494,6 +3495,86 @@  static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
 			0, sizeof(void *));
 }
 
+noinline int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
+{
+	if (__should_failslab(s, gfpflags))
+		return -ENOMEM;
+	return 0;
+}
+ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
+
+static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
+						     struct list_lru *lru,
+						     struct obj_cgroup **objcgp,
+						     size_t size, gfp_t flags)
+{
+	flags &= gfp_allowed_mask;
+
+	might_alloc(flags);
+
+	if (should_failslab(s, flags))
+		return NULL;
+
+	if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, size, flags))
+		return NULL;
+
+	return s;
+}
+
+static inline void slab_post_alloc_hook(struct kmem_cache *s,
+					struct obj_cgroup *objcg, gfp_t flags,
+					size_t size, void **p, bool init,
+					unsigned int orig_size)
+{
+	unsigned int zero_size = s->object_size;
+	bool kasan_init = init;
+	size_t i;
+
+	flags &= gfp_allowed_mask;
+
+	/*
+	 * For kmalloc object, the allocated memory size(object_size) is likely
+	 * larger than the requested size(orig_size). If redzone check is
+	 * enabled for the extra space, don't zero it, as it will be redzoned
+	 * soon. The redzone operation for this extra space could be seen as a
+	 * replacement of current poisoning under certain debug option, and
+	 * won't break other sanity checks.
+	 */
+	if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
+	    (s->flags & SLAB_KMALLOC))
+		zero_size = orig_size;
+
+	/*
+	 * When slub_debug is enabled, avoid memory initialization integrated
+	 * into KASAN and instead zero out the memory via the memset below with
+	 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
+	 * cause false-positive reports. This does not lead to a performance
+	 * penalty on production builds, as slub_debug is not intended to be
+	 * enabled there.
+	 */
+	if (__slub_debug_enabled())
+		kasan_init = false;
+
+	/*
+	 * As memory initialization might be integrated into KASAN,
+	 * kasan_slab_alloc and initialization memset must be
+	 * kept together to avoid discrepancies in behavior.
+	 *
+	 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
+	 */
+	for (i = 0; i < size; i++) {
+		p[i] = kasan_slab_alloc(s, p[i], flags, kasan_init);
+		if (p[i] && init && (!kasan_init ||
+				     !kasan_has_integrated_init()))
+			memset(p[i], 0, zero_size);
+		kmemleak_alloc_recursive(p[i], s->object_size, 1,
+					 s->flags, flags);
+		kmsan_slab_alloc(s, p[i], flags);
+	}
+
+	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
+}
+
 /*
  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
  * have the fastpath folded into their functions. So no function call