diff mbox series

[v4,2/6] mm/slab: Plumb kmem_buckets into __do_kmalloc_node()

Message ID 20240531191458.987345-2-kees@kernel.org (mailing list archive)
State Superseded
Headers show
Series slab: Introduce dedicated bucket allocator | expand

Commit Message

Kees Cook May 31, 2024, 7:14 p.m. UTC
Introduce CONFIG_SLAB_BUCKETS which provides the infrastructure to
support separated kmalloc buckets (in the follow kmem_buckets_create()
patches and future codetag-based separation). Since this will provide
a mitigation for a very common case of exploits, enable it by default.

To be able to choose which buckets to allocate from, make the buckets
available to the internal kmalloc interfaces by adding them as the
first argument, rather than depending on the buckets being chosen from
the fixed set of global buckets. Where the bucket is not available,
pass NULL, which means "use the default system kmalloc bucket set"
(the prior existing behavior), as implemented in kmalloc_slab().

To avoid adding the extra argument when !CONFIG_SLAB_BUCKETS, only the
top-level macros and static inlines use the buckets argument (where
they are stripped out and compiled out respectively). The actual extern
functions can then been built without the argument, and the internals
fall back to the global kmalloc buckets unconditionally.

Signed-off-by: Kees Cook <kees@kernel.org>
---
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: jvoisin <julien.voisin@dustri.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: linux-mm@kvack.org
Cc: linux-hardening@vger.kernel.org
---
 include/linux/slab.h | 34 ++++++++++++++++++++++++++--------
 mm/Kconfig           | 15 +++++++++++++++
 mm/slab.h            |  6 ++++--
 mm/slab_common.c     |  4 ++--
 mm/slub.c            | 34 ++++++++++++++++++++++++----------
 mm/util.c            |  2 +-
 6 files changed, 72 insertions(+), 23 deletions(-)

Comments

Vlastimil Babka June 3, 2024, 5:06 p.m. UTC | #1
On 5/31/24 9:14 PM, Kees Cook wrote:
> Introduce CONFIG_SLAB_BUCKETS which provides the infrastructure to
> support separated kmalloc buckets (in the follow kmem_buckets_create()
> patches and future codetag-based separation). Since this will provide
> a mitigation for a very common case of exploits, enable it by default.

Are you sure? I thought there was a policy that nobody is special enough
to have stuff enabled by default. Is it worth risking Linus shouting? :)
 
> To be able to choose which buckets to allocate from, make the buckets
> available to the internal kmalloc interfaces by adding them as the
> first argument, rather than depending on the buckets being chosen from
> the fixed set of global buckets. Where the bucket is not available,
> pass NULL, which means "use the default system kmalloc bucket set"
> (the prior existing behavior), as implemented in kmalloc_slab().
> 
> To avoid adding the extra argument when !CONFIG_SLAB_BUCKETS, only the
> top-level macros and static inlines use the buckets argument (where
> they are stripped out and compiled out respectively). The actual extern
> functions can then been built without the argument, and the internals
> fall back to the global kmalloc buckets unconditionally.
> 
> Signed-off-by: Kees Cook <kees@kernel.org>
> ---
> Cc: Vlastimil Babka <vbabka@suse.cz>
> Cc: Christoph Lameter <cl@linux.com>
> Cc: Pekka Enberg <penberg@kernel.org>
> Cc: David Rientjes <rientjes@google.com>
> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
> Cc: jvoisin <julien.voisin@dustri.org>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Roman Gushchin <roman.gushchin@linux.dev>
> Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
> Cc: linux-mm@kvack.org
> Cc: linux-hardening@vger.kernel.org
> ---
>  include/linux/slab.h | 34 ++++++++++++++++++++++++++--------
>  mm/Kconfig           | 15 +++++++++++++++
>  mm/slab.h            |  6 ++++--
>  mm/slab_common.c     |  4 ++--
>  mm/slub.c            | 34 ++++++++++++++++++++++++----------
>  mm/util.c            |  2 +-
>  6 files changed, 72 insertions(+), 23 deletions(-)
> 
> diff --git a/include/linux/slab.h b/include/linux/slab.h
> index de2b7209cd05..b1165b22cc6f 100644
> --- a/include/linux/slab.h
> +++ b/include/linux/slab.h
> @@ -569,8 +569,17 @@ static __always_inline void kfree_bulk(size_t size, void **p)
>  	kmem_cache_free_bulk(NULL, size, p);
>  }
>  
> -void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
> -							 __alloc_size(1);
> +#ifdef CONFIG_SLAB_BUCKETS
> +void *__kmalloc_buckets_node_noprof(kmem_buckets *b, size_t size, gfp_t flags, int node)
> +				__assume_kmalloc_alignment __alloc_size(2);
> +# define __kmalloc_node_noprof(b, size, flags, node)	\
> +	__kmalloc_buckets_node_noprof(b, size, flags, node)
> +#else
> +void *__kmalloc_buckets_node_noprof(size_t size, gfp_t flags, int node)
> +				__assume_kmalloc_alignment __alloc_size(1);
> +# define __kmalloc_node_noprof(b, size, flags, node)	\
> +	__kmalloc_buckets_node_noprof(size, flags, node)
> +#endif
>  #define __kmalloc_node(...)			alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__))

I found this too verbose and tried a different approach, in the end rewrote
everything to verify the idea works. So I'll just link to the result in git:

https://git.kernel.org/pub/scm/linux/kernel/git/vbabka/linux.git/log/?h=slab-buckets-v4-rewrite

It's also rebased on slab.git:slab/for-6.11/cleanups with some alloc_hooks()
cleanups that would cause conflicts otherwkse.

But the crux of that approach is:

/*
 * These macros allow declaring a kmem_buckets * parameter alongside size, which
 * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
 * sites don't have to pass NULL.
 */
#ifdef CONFIG_SLAB_BUCKETS
#define DECL_BUCKET_PARAMS(_size, _b)   size_t (_size), kmem_buckets *(_b)
#define PASS_BUCKET_PARAMS(_size, _b)   (_size), (_b)
#define PASS_BUCKET_PARAM(_b)           (_b)
#else
#define DECL_BUCKET_PARAMS(_size, _b)   size_t (_size)
#define PASS_BUCKET_PARAMS(_size, _b)   (_size)
#define PASS_BUCKET_PARAM(_b)           NULL
#endif

Then we have declaration e.g.

void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
                                __assume_kmalloc_alignment __alloc_size(1);

and the function is called like (from code not using buckets)
return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);

or (from code using buckets)
#define kmem_buckets_alloc(_b, _size, _flags)   \
        alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))

And implementation looks like:

void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
{
        return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
}

The size param is always the first, so the __alloc_size(1) doesn't need tweaking.
size is also used in the macros even if it's never mangled, because it's easy
to pass one param instead of two, but not zero params instead of one, if we want
the ending comma not be part of the macro (which would look awkward).

Does it look ok to you? Of course names of the macros could be tweaked. Anyway feel
free to use the branch for the followup. Hopefully this way is also compatible with
the planned codetag based followup.

>  
>  void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
> @@ -679,7 +688,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gf
>  				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
>  				flags, node, size);
>  	}
> -	return __kmalloc_node_noprof(size, flags, node);
> +	return __kmalloc_node_noprof(NULL, size, flags, node);
>  }
>  #define kmalloc_node(...)			alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
>  
> @@ -730,10 +739,19 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(voi
>   */
>  #define kcalloc(n, size, flags)		kmalloc_array(n, size, (flags) | __GFP_ZERO)
>  
> -void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
> -				  unsigned long caller) __alloc_size(1);
> +#ifdef CONFIG_SLAB_BUCKETS
> +void *__kmalloc_node_track_caller_noprof(kmem_buckets *b, size_t size, gfp_t flags, int node,
> +					 unsigned long caller) __alloc_size(2);
> +# define kmalloc_node_track_caller_noprof(b, size, flags, node, caller)	\
> +	__kmalloc_node_track_caller_noprof(b, size, flags, node, caller)
> +#else
> +void *__kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
> +					 unsigned long caller) __alloc_size(1);
> +# define kmalloc_node_track_caller_noprof(b, size, flags, node, caller)	\
> +	__kmalloc_node_track_caller_noprof(size, flags, node, caller)
> +#endif
>  #define kmalloc_node_track_caller(...)		\
> -	alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
> +	alloc_hooks(kmalloc_node_track_caller_noprof(NULL, __VA_ARGS__, _RET_IP_))
>  
>  /*
>   * kmalloc_track_caller is a special version of kmalloc that records the
> @@ -746,7 +764,7 @@ void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
>  #define kmalloc_track_caller(...)		kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
>  
>  #define kmalloc_track_caller_noprof(...)	\
> -		kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
> +		kmalloc_node_track_caller_noprof(NULL, __VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
>  
>  static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
>  							  int node)
> @@ -757,7 +775,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_
>  		return NULL;
>  	if (__builtin_constant_p(n) && __builtin_constant_p(size))
>  		return kmalloc_node_noprof(bytes, flags, node);
> -	return __kmalloc_node_noprof(bytes, flags, node);
> +	return __kmalloc_node_noprof(NULL, bytes, flags, node);
>  }
>  #define kmalloc_array_node(...)			alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
>  
> diff --git a/mm/Kconfig b/mm/Kconfig
> index b4cb45255a54..8c29af7835cc 100644
> --- a/mm/Kconfig
> +++ b/mm/Kconfig
> @@ -273,6 +273,21 @@ config SLAB_FREELIST_HARDENED
>  	  sacrifices to harden the kernel slab allocator against common
>  	  freelist exploit methods.
>  
> +config SLAB_BUCKETS
> +	bool "Support allocation from separate kmalloc buckets"
> +	default y
> +	depends on !SLUB_TINY
> +	help
> +	  Kernel heap attacks frequently depend on being able to create
> +	  specifically-sized allocations with user-controlled contents
> +	  that will be allocated into the same kmalloc bucket as a
> +	  target object. To avoid sharing these allocation buckets,
> +	  provide an explicitly separated set of buckets to be used for
> +	  user-controlled allocations. This may very slightly increase
> +	  memory fragmentation, though in practice it's only a handful
> +	  of extra pages since the bulk of user-controlled allocations
> +	  are relatively long-lived.
> +
>  config SLUB_STATS
>  	default n
>  	bool "Enable performance statistics"
> diff --git a/mm/slab.h b/mm/slab.h
> index 5f8f47c5bee0..f459cd338852 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -403,16 +403,18 @@ static inline unsigned int size_index_elem(unsigned int bytes)
>   * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
>   */
>  static inline struct kmem_cache *
> -kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
> +kmalloc_slab(kmem_buckets *b, size_t size, gfp_t flags, unsigned long caller)
>  {
>  	unsigned int index;
>  
> +	if (!b)
> +		b = &kmalloc_caches[kmalloc_type(flags, caller)];
>  	if (size <= 192)
>  		index = kmalloc_size_index[size_index_elem(size)];
>  	else
>  		index = fls(size - 1);
>  
> -	return kmalloc_caches[kmalloc_type(flags, caller)][index];
> +	return (*b)[index];
>  }
>  
>  gfp_t kmalloc_fix_flags(gfp_t flags);
> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index e0b1c109bed2..b5c879fa66bc 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -702,7 +702,7 @@ size_t kmalloc_size_roundup(size_t size)
>  		 * The flags don't matter since size_index is common to all.
>  		 * Neither does the caller for just getting ->object_size.
>  		 */
> -		return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
> +		return kmalloc_slab(NULL, size, GFP_KERNEL, 0)->object_size;
>  	}
>  
>  	/* Above the smaller buckets, size is a multiple of page size. */
> @@ -1179,7 +1179,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
>  		return (void *)p;
>  	}
>  
> -	ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
> +	ret = kmalloc_node_track_caller_noprof(NULL, new_size, flags, NUMA_NO_NODE, _RET_IP_);
>  	if (ret && p) {
>  		/* Disable KASAN checks as the object's redzone is accessed. */
>  		kasan_disable_current();
> diff --git a/mm/slub.c b/mm/slub.c
> index 0809760cf789..ec682a325abe 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -4099,7 +4099,7 @@ void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
>  EXPORT_SYMBOL(kmalloc_large_node_noprof);
>  
>  static __always_inline
> -void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
> +void *__do_kmalloc_node(kmem_buckets *b, size_t size, gfp_t flags, int node,
>  			unsigned long caller)
>  {
>  	struct kmem_cache *s;
> @@ -4115,7 +4115,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
>  	if (unlikely(!size))
>  		return ZERO_SIZE_PTR;
>  
> -	s = kmalloc_slab(size, flags, caller);
> +	s = kmalloc_slab(b, size, flags, caller);
>  
>  	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
>  	ret = kasan_kmalloc(s, ret, size, flags);
> @@ -4123,24 +4123,38 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
>  	return ret;
>  }
>  
> -void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
> +#ifdef CONFIG_SLAB_BUCKETS
> +# define __do_kmalloc_buckets_node(b, size, flags, node, caller)	\
> +	__do_kmalloc_node(b, size, flags, node, caller)
> +void *__kmalloc_buckets_node_noprof(kmem_buckets *b, size_t size, gfp_t flags, int node)
> +#else
> +# define __do_kmalloc_buckets_node(b, size, flags, node, caller)	\
> +	__do_kmalloc_node(NULL, size, flags, node, caller)
> +void *__kmalloc_buckets_node_noprof(size_t size, gfp_t flags, int node)
> +#endif
>  {
> -	return __do_kmalloc_node(size, flags, node, _RET_IP_);
> +	return __do_kmalloc_buckets_node(b, size, flags, node, _RET_IP_);
>  }
> -EXPORT_SYMBOL(__kmalloc_node_noprof);
> +EXPORT_SYMBOL(__kmalloc_buckets_node_noprof);
>  
>  void *__kmalloc_noprof(size_t size, gfp_t flags)
>  {
> -	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
> +	return __do_kmalloc_buckets_node(NULL, size, flags, NUMA_NO_NODE, _RET_IP_);
>  }
>  EXPORT_SYMBOL(__kmalloc_noprof);
>  
> -void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags,
> -				       int node, unsigned long caller)
> +#ifdef CONFIG_SLAB_BUCKETS
> +void *__kmalloc_node_track_caller_noprof(kmem_buckets *b, size_t size, gfp_t flags,
> +					 int node, unsigned long caller)
> +#else
> +void *__kmalloc_node_track_caller_noprof(size_t size, gfp_t flags,
> +					 int node, unsigned long caller)
> +#endif
>  {
> -	return __do_kmalloc_node(size, flags, node, caller);
> +	return __do_kmalloc_buckets_node(b, size, flags, node, caller);
> +
>  }
> -EXPORT_SYMBOL(kmalloc_node_track_caller_noprof);
> +EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
>  
>  void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
>  {
> diff --git a/mm/util.c b/mm/util.c
> index c9e519e6811f..80430e5ba981 100644
> --- a/mm/util.c
> +++ b/mm/util.c
> @@ -128,7 +128,7 @@ void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
>  {
>  	void *p;
>  
> -	p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
> +	p = kmalloc_node_track_caller_noprof(NULL, len, gfp, NUMA_NO_NODE, _RET_IP_);
>  	if (p)
>  		memcpy(p, src, len);
>  	return p;
Kees Cook June 3, 2024, 10:44 p.m. UTC | #2
On Mon, Jun 03, 2024 at 07:06:15PM +0200, Vlastimil Babka wrote:
> On 5/31/24 9:14 PM, Kees Cook wrote:
> > Introduce CONFIG_SLAB_BUCKETS which provides the infrastructure to
> > support separated kmalloc buckets (in the follow kmem_buckets_create()
> > patches and future codetag-based separation). Since this will provide
> > a mitigation for a very common case of exploits, enable it by default.
> 
> Are you sure? I thought there was a policy that nobody is special enough
> to have stuff enabled by default. Is it worth risking Linus shouting? :)

I think it's important to have this enabled given how common the
exploitation methodology is and how cheap this solution is. Regardless,
if you want it "default n", I can change it.

> I found this too verbose and tried a different approach, in the end rewrote
> everything to verify the idea works. So I'll just link to the result in git:
> 
> https://git.kernel.org/pub/scm/linux/kernel/git/vbabka/linux.git/log/?h=slab-buckets-v4-rewrite
> 
> It's also rebased on slab.git:slab/for-6.11/cleanups with some alloc_hooks()
> cleanups that would cause conflicts otherwkse.
> 
> But the crux of that approach is:
> 
> /*
>  * These macros allow declaring a kmem_buckets * parameter alongside size, which
>  * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
>  * sites don't have to pass NULL.
>  */
> #ifdef CONFIG_SLAB_BUCKETS
> #define DECL_BUCKET_PARAMS(_size, _b)   size_t (_size), kmem_buckets *(_b)
> #define PASS_BUCKET_PARAMS(_size, _b)   (_size), (_b)
> #define PASS_BUCKET_PARAM(_b)           (_b)
> #else
> #define DECL_BUCKET_PARAMS(_size, _b)   size_t (_size)
> #define PASS_BUCKET_PARAMS(_size, _b)   (_size)
> #define PASS_BUCKET_PARAM(_b)           NULL
> #endif
> 
> Then we have declaration e.g.
> 
> void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
>                                 __assume_kmalloc_alignment __alloc_size(1);
> 
> and the function is called like (from code not using buckets)
> return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
> 
> or (from code using buckets)
> #define kmem_buckets_alloc(_b, _size, _flags)   \
>         alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
> 
> And implementation looks like:
> 
> void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
> {
>         return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
> }
> 
> The size param is always the first, so the __alloc_size(1) doesn't need tweaking.
> size is also used in the macros even if it's never mangled, because it's easy
> to pass one param instead of two, but not zero params instead of one, if we want
> the ending comma not be part of the macro (which would look awkward).
> 
> Does it look ok to you? Of course names of the macros could be tweaked. Anyway feel
> free to use the branch for the followup. Hopefully this way is also compatible with
> the planned codetag based followup.

This looks really nice, thank you! This is well aligned with the codetag
followup, which also needs to have "size" be very easy to find (to the
macros can check for compile-time-constant or not).

I will go work from your branch...

Thanks!

-Kees
Vlastimil Babka June 4, 2024, 12:30 p.m. UTC | #3
On 6/4/24 12:44 AM, Kees Cook wrote:
> On Mon, Jun 03, 2024 at 07:06:15PM +0200, Vlastimil Babka wrote:
>> On 5/31/24 9:14 PM, Kees Cook wrote:
>> > Introduce CONFIG_SLAB_BUCKETS which provides the infrastructure to
>> > support separated kmalloc buckets (in the follow kmem_buckets_create()
>> > patches and future codetag-based separation). Since this will provide
>> > a mitigation for a very common case of exploits, enable it by default.
>> 
>> Are you sure? I thought there was a policy that nobody is special enough
>> to have stuff enabled by default. Is it worth risking Linus shouting? :)
> 
> I think it's important to have this enabled given how common the
> exploitation methodology is and how cheap this solution is. Regardless,
> if you want it "default n", I can change it.

Yeah, I'd just recommend it in the help, noting it has a bit of memory
overhead. Defaults are not that important anyway IMHO, either it's distro
doing the config, and individually security conscious people should know
what they are doing.

> 
> This looks really nice, thank you! This is well aligned with the codetag
> followup, which also needs to have "size" be very easy to find (to the
> macros can check for compile-time-constant or not).
> 
> I will go work from your branch...

Great!

> Thanks!
> 
> -Kees
>
diff mbox series

Patch

diff --git a/include/linux/slab.h b/include/linux/slab.h
index de2b7209cd05..b1165b22cc6f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -569,8 +569,17 @@  static __always_inline void kfree_bulk(size_t size, void **p)
 	kmem_cache_free_bulk(NULL, size, p);
 }
 
-void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
-							 __alloc_size(1);
+#ifdef CONFIG_SLAB_BUCKETS
+void *__kmalloc_buckets_node_noprof(kmem_buckets *b, size_t size, gfp_t flags, int node)
+				__assume_kmalloc_alignment __alloc_size(2);
+# define __kmalloc_node_noprof(b, size, flags, node)	\
+	__kmalloc_buckets_node_noprof(b, size, flags, node)
+#else
+void *__kmalloc_buckets_node_noprof(size_t size, gfp_t flags, int node)
+				__assume_kmalloc_alignment __alloc_size(1);
+# define __kmalloc_node_noprof(b, size, flags, node)	\
+	__kmalloc_buckets_node_noprof(size, flags, node)
+#endif
 #define __kmalloc_node(...)			alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__))
 
 void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
@@ -679,7 +688,7 @@  static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gf
 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
 				flags, node, size);
 	}
-	return __kmalloc_node_noprof(size, flags, node);
+	return __kmalloc_node_noprof(NULL, size, flags, node);
 }
 #define kmalloc_node(...)			alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
 
@@ -730,10 +739,19 @@  static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(voi
  */
 #define kcalloc(n, size, flags)		kmalloc_array(n, size, (flags) | __GFP_ZERO)
 
-void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
-				  unsigned long caller) __alloc_size(1);
+#ifdef CONFIG_SLAB_BUCKETS
+void *__kmalloc_node_track_caller_noprof(kmem_buckets *b, size_t size, gfp_t flags, int node,
+					 unsigned long caller) __alloc_size(2);
+# define kmalloc_node_track_caller_noprof(b, size, flags, node, caller)	\
+	__kmalloc_node_track_caller_noprof(b, size, flags, node, caller)
+#else
+void *__kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
+					 unsigned long caller) __alloc_size(1);
+# define kmalloc_node_track_caller_noprof(b, size, flags, node, caller)	\
+	__kmalloc_node_track_caller_noprof(size, flags, node, caller)
+#endif
 #define kmalloc_node_track_caller(...)		\
-	alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
+	alloc_hooks(kmalloc_node_track_caller_noprof(NULL, __VA_ARGS__, _RET_IP_))
 
 /*
  * kmalloc_track_caller is a special version of kmalloc that records the
@@ -746,7 +764,7 @@  void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
 #define kmalloc_track_caller(...)		kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
 
 #define kmalloc_track_caller_noprof(...)	\
-		kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
+		kmalloc_node_track_caller_noprof(NULL, __VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
 
 static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
 							  int node)
@@ -757,7 +775,7 @@  static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
 		return kmalloc_node_noprof(bytes, flags, node);
-	return __kmalloc_node_noprof(bytes, flags, node);
+	return __kmalloc_node_noprof(NULL, bytes, flags, node);
 }
 #define kmalloc_array_node(...)			alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
 
diff --git a/mm/Kconfig b/mm/Kconfig
index b4cb45255a54..8c29af7835cc 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -273,6 +273,21 @@  config SLAB_FREELIST_HARDENED
 	  sacrifices to harden the kernel slab allocator against common
 	  freelist exploit methods.
 
+config SLAB_BUCKETS
+	bool "Support allocation from separate kmalloc buckets"
+	default y
+	depends on !SLUB_TINY
+	help
+	  Kernel heap attacks frequently depend on being able to create
+	  specifically-sized allocations with user-controlled contents
+	  that will be allocated into the same kmalloc bucket as a
+	  target object. To avoid sharing these allocation buckets,
+	  provide an explicitly separated set of buckets to be used for
+	  user-controlled allocations. This may very slightly increase
+	  memory fragmentation, though in practice it's only a handful
+	  of extra pages since the bulk of user-controlled allocations
+	  are relatively long-lived.
+
 config SLUB_STATS
 	default n
 	bool "Enable performance statistics"
diff --git a/mm/slab.h b/mm/slab.h
index 5f8f47c5bee0..f459cd338852 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -403,16 +403,18 @@  static inline unsigned int size_index_elem(unsigned int bytes)
  * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
  */
 static inline struct kmem_cache *
-kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
+kmalloc_slab(kmem_buckets *b, size_t size, gfp_t flags, unsigned long caller)
 {
 	unsigned int index;
 
+	if (!b)
+		b = &kmalloc_caches[kmalloc_type(flags, caller)];
 	if (size <= 192)
 		index = kmalloc_size_index[size_index_elem(size)];
 	else
 		index = fls(size - 1);
 
-	return kmalloc_caches[kmalloc_type(flags, caller)][index];
+	return (*b)[index];
 }
 
 gfp_t kmalloc_fix_flags(gfp_t flags);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index e0b1c109bed2..b5c879fa66bc 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -702,7 +702,7 @@  size_t kmalloc_size_roundup(size_t size)
 		 * The flags don't matter since size_index is common to all.
 		 * Neither does the caller for just getting ->object_size.
 		 */
-		return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
+		return kmalloc_slab(NULL, size, GFP_KERNEL, 0)->object_size;
 	}
 
 	/* Above the smaller buckets, size is a multiple of page size. */
@@ -1179,7 +1179,7 @@  __do_krealloc(const void *p, size_t new_size, gfp_t flags)
 		return (void *)p;
 	}
 
-	ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
+	ret = kmalloc_node_track_caller_noprof(NULL, new_size, flags, NUMA_NO_NODE, _RET_IP_);
 	if (ret && p) {
 		/* Disable KASAN checks as the object's redzone is accessed. */
 		kasan_disable_current();
diff --git a/mm/slub.c b/mm/slub.c
index 0809760cf789..ec682a325abe 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4099,7 +4099,7 @@  void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(kmalloc_large_node_noprof);
 
 static __always_inline
-void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
+void *__do_kmalloc_node(kmem_buckets *b, size_t size, gfp_t flags, int node,
 			unsigned long caller)
 {
 	struct kmem_cache *s;
@@ -4115,7 +4115,7 @@  void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
 	if (unlikely(!size))
 		return ZERO_SIZE_PTR;
 
-	s = kmalloc_slab(size, flags, caller);
+	s = kmalloc_slab(b, size, flags, caller);
 
 	ret = slab_alloc_node(s, NULL, flags, node, caller, size);
 	ret = kasan_kmalloc(s, ret, size, flags);
@@ -4123,24 +4123,38 @@  void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
 	return ret;
 }
 
-void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
+#ifdef CONFIG_SLAB_BUCKETS
+# define __do_kmalloc_buckets_node(b, size, flags, node, caller)	\
+	__do_kmalloc_node(b, size, flags, node, caller)
+void *__kmalloc_buckets_node_noprof(kmem_buckets *b, size_t size, gfp_t flags, int node)
+#else
+# define __do_kmalloc_buckets_node(b, size, flags, node, caller)	\
+	__do_kmalloc_node(NULL, size, flags, node, caller)
+void *__kmalloc_buckets_node_noprof(size_t size, gfp_t flags, int node)
+#endif
 {
-	return __do_kmalloc_node(size, flags, node, _RET_IP_);
+	return __do_kmalloc_buckets_node(b, size, flags, node, _RET_IP_);
 }
-EXPORT_SYMBOL(__kmalloc_node_noprof);
+EXPORT_SYMBOL(__kmalloc_buckets_node_noprof);
 
 void *__kmalloc_noprof(size_t size, gfp_t flags)
 {
-	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
+	return __do_kmalloc_buckets_node(NULL, size, flags, NUMA_NO_NODE, _RET_IP_);
 }
 EXPORT_SYMBOL(__kmalloc_noprof);
 
-void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags,
-				       int node, unsigned long caller)
+#ifdef CONFIG_SLAB_BUCKETS
+void *__kmalloc_node_track_caller_noprof(kmem_buckets *b, size_t size, gfp_t flags,
+					 int node, unsigned long caller)
+#else
+void *__kmalloc_node_track_caller_noprof(size_t size, gfp_t flags,
+					 int node, unsigned long caller)
+#endif
 {
-	return __do_kmalloc_node(size, flags, node, caller);
+	return __do_kmalloc_buckets_node(b, size, flags, node, caller);
+
 }
-EXPORT_SYMBOL(kmalloc_node_track_caller_noprof);
+EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
 
 void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
diff --git a/mm/util.c b/mm/util.c
index c9e519e6811f..80430e5ba981 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -128,7 +128,7 @@  void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
 {
 	void *p;
 
-	p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
+	p = kmalloc_node_track_caller_noprof(NULL, len, gfp, NUMA_NO_NODE, _RET_IP_);
 	if (p)
 		memcpy(p, src, len);
 	return p;