diff mbox series

[v2,2/9] slub: Plumb kmem_buckets into __do_kmalloc_node()

Message ID 20240305101026.694758-2-keescook@chromium.org (mailing list archive)
State Superseded
Headers show
Series slab: Introduce dedicated bucket allocator | expand

Commit Message

Kees Cook March 5, 2024, 10:10 a.m. UTC
To be able to choose which buckets to allocate from, make the buckets
available to the lower level kmalloc interfaces.

Signed-off-by: Kees Cook <keescook@chromium.org>
---
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: linux-mm@kvack.org
Cc: linux-hardening@vger.kernel.org
---
 include/linux/slab.h |  8 ++++----
 lib/fortify_kunit.c  |  2 +-
 mm/slab.h            |  6 ++++--
 mm/slab_common.c     |  2 +-
 mm/slub.c            | 12 ++++++------
 5 files changed, 16 insertions(+), 14 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 55059faf166c..1cc1a7637b56 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -508,8 +508,8 @@  static __always_inline void kfree_bulk(size_t size, void **p)
 	kmem_cache_free_bulk(NULL, size, p);
 }
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
-							 __alloc_size(1);
+void *__kmalloc_node(kmem_buckets *b, size_t size, gfp_t flags, int node)
+		     __assume_kmalloc_alignment __alloc_size(2);
 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
 									 __malloc;
 
@@ -608,7 +608,7 @@  static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
 				flags, node, size);
 	}
-	return __kmalloc_node(size, flags, node);
+	return __kmalloc_node(NULL, size, flags, node);
 }
 
 /**
@@ -686,7 +686,7 @@  static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size,
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
 		return kmalloc_node(bytes, flags, node);
-	return __kmalloc_node(bytes, flags, node);
+	return __kmalloc_node(NULL, bytes, flags, node);
 }
 
 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 2e4fedc81621..c44400b577f3 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -182,7 +182,7 @@  static void alloc_size_##allocator##_dynamic_test(struct kunit *test)	\
 	checker(expected_size, __kmalloc(alloc_size, gfp),		\
 		kfree(p));						\
 	checker(expected_size,						\
-		__kmalloc_node(alloc_size, gfp, NUMA_NO_NODE),		\
+		__kmalloc_node(NULL, alloc_size, gfp, NUMA_NO_NODE),	\
 		kfree(p));						\
 									\
 	orig = kmalloc(alloc_size, gfp);				\
diff --git a/mm/slab.h b/mm/slab.h
index 54deeb0428c6..931f261bde48 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -404,16 +404,18 @@  static inline unsigned int size_index_elem(unsigned int bytes)
  * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
  */
 static inline struct kmem_cache *
-kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
+kmalloc_slab(kmem_buckets *b, size_t size, gfp_t flags, unsigned long caller)
 {
 	unsigned int index;
 
+	if (!b)
+		b = &kmalloc_caches[kmalloc_type(flags, caller)];
 	if (size <= 192)
 		index = kmalloc_size_index[size_index_elem(size)];
 	else
 		index = fls(size - 1);
 
-	return kmalloc_caches[kmalloc_type(flags, caller)][index];
+	return (*b)[index];
 }
 
 gfp_t kmalloc_fix_flags(gfp_t flags);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 8787cf17d6e4..1d0f25b6ae91 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -698,7 +698,7 @@  size_t kmalloc_size_roundup(size_t size)
 		 * The flags don't matter since size_index is common to all.
 		 * Neither does the caller for just getting ->object_size.
 		 */
-		return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
+		return kmalloc_slab(NULL, size, GFP_KERNEL, 0)->object_size;
 	}
 
 	/* Above the smaller buckets, size is a multiple of page size. */
diff --git a/mm/slub.c b/mm/slub.c
index 2ef88bbf56a3..71220b4b1f79 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3959,7 +3959,7 @@  void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(kmalloc_large_node);
 
 static __always_inline
-void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
+void *__do_kmalloc_node(kmem_buckets *b, size_t size, gfp_t flags, int node,
 			unsigned long caller)
 {
 	struct kmem_cache *s;
@@ -3975,7 +3975,7 @@  void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
 	if (unlikely(!size))
 		return ZERO_SIZE_PTR;
 
-	s = kmalloc_slab(size, flags, caller);
+	s = kmalloc_slab(b, size, flags, caller);
 
 	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
 	ret = kasan_kmalloc(s, ret, size, flags);
@@ -3983,22 +3983,22 @@  void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
 	return ret;
 }
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+void *__kmalloc_node(kmem_buckets *b, size_t size, gfp_t flags, int node)
 {
-	return __do_kmalloc_node(size, flags, node, _RET_IP_);
+	return __do_kmalloc_node(b, size, flags, node, _RET_IP_);
 }
 EXPORT_SYMBOL(__kmalloc_node);
 
 void *__kmalloc(size_t size, gfp_t flags)
 {
-	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
+	return __do_kmalloc_node(NULL, size, flags, NUMA_NO_NODE, _RET_IP_);
 }
 EXPORT_SYMBOL(__kmalloc);
 
 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
 				  int node, unsigned long caller)
 {
-	return __do_kmalloc_node(size, flags, node, caller);
+	return __do_kmalloc_node(NULL, size, flags, node, caller);
 }
 EXPORT_SYMBOL(__kmalloc_node_track_caller);