@@ -508,8 +508,8 @@ static __always_inline void kfree_bulk(size_t size, void **p)
kmem_cache_free_bulk(NULL, size, p);
}
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
- __alloc_size(1);
+void *__kmalloc_node(kmem_buckets *b, size_t size, gfp_t flags, int node)
+ __assume_kmalloc_alignment __alloc_size(2);
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
__malloc;
@@ -608,7 +608,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, node, size);
}
- return __kmalloc_node(size, flags, node);
+ return __kmalloc_node(NULL, size, flags, node);
}
/**
@@ -686,7 +686,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size,
return NULL;
if (__builtin_constant_p(n) && __builtin_constant_p(size))
return kmalloc_node(bytes, flags, node);
- return __kmalloc_node(bytes, flags, node);
+ return __kmalloc_node(NULL, bytes, flags, node);
}
static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
@@ -182,7 +182,7 @@ static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
checker(expected_size, __kmalloc(alloc_size, gfp), \
kfree(p)); \
checker(expected_size, \
- __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ __kmalloc_node(NULL, alloc_size, gfp, NUMA_NO_NODE), \
kfree(p)); \
\
orig = kmalloc(alloc_size, gfp); \
@@ -404,16 +404,18 @@ static inline unsigned int size_index_elem(unsigned int bytes)
* KMALLOC_MAX_CACHE_SIZE and the caller must check that.
*/
static inline struct kmem_cache *
-kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
+kmalloc_slab(kmem_buckets *b, size_t size, gfp_t flags, unsigned long caller)
{
unsigned int index;
+ if (!b)
+ b = &kmalloc_caches[kmalloc_type(flags, caller)];
if (size <= 192)
index = kmalloc_size_index[size_index_elem(size)];
else
index = fls(size - 1);
- return kmalloc_caches[kmalloc_type(flags, caller)][index];
+ return (*b)[index];
}
gfp_t kmalloc_fix_flags(gfp_t flags);
@@ -698,7 +698,7 @@ size_t kmalloc_size_roundup(size_t size)
* The flags don't matter since size_index is common to all.
* Neither does the caller for just getting ->object_size.
*/
- return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
+ return kmalloc_slab(NULL, size, GFP_KERNEL, 0)->object_size;
}
/* Above the smaller buckets, size is a multiple of page size. */
@@ -3959,7 +3959,7 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(kmalloc_large_node);
static __always_inline
-void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
+void *__do_kmalloc_node(kmem_buckets *b, size_t size, gfp_t flags, int node,
unsigned long caller)
{
struct kmem_cache *s;
@@ -3975,7 +3975,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
if (unlikely(!size))
return ZERO_SIZE_PTR;
- s = kmalloc_slab(size, flags, caller);
+ s = kmalloc_slab(b, size, flags, caller);
ret = slab_alloc_node(s, NULL, flags, node, caller, size);
ret = kasan_kmalloc(s, ret, size, flags);
@@ -3983,22 +3983,22 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
return ret;
}
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+void *__kmalloc_node(kmem_buckets *b, size_t size, gfp_t flags, int node)
{
- return __do_kmalloc_node(size, flags, node, _RET_IP_);
+ return __do_kmalloc_node(b, size, flags, node, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc_node);
void *__kmalloc(size_t size, gfp_t flags)
{
- return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
+ return __do_kmalloc_node(NULL, size, flags, NUMA_NO_NODE, _RET_IP_);
}
EXPORT_SYMBOL(__kmalloc);
void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
int node, unsigned long caller)
{
- return __do_kmalloc_node(size, flags, node, caller);
+ return __do_kmalloc_node(NULL, size, flags, node, caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
To be able to choose which buckets to allocate from, make the buckets available to the lower level kmalloc interfaces. Signed-off-by: Kees Cook <keescook@chromium.org> --- Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: linux-mm@kvack.org Cc: linux-hardening@vger.kernel.org --- include/linux/slab.h | 8 ++++---- lib/fortify_kunit.c | 2 +- mm/slab.h | 6 ++++-- mm/slab_common.c | 2 +- mm/slub.c | 12 ++++++------ 5 files changed, 16 insertions(+), 14 deletions(-)