diff mbox series

[v4,4/6] mm/slab: Introduce kmem_buckets_create() and family

Message ID 20240531191458.987345-4-kees@kernel.org (mailing list archive)
State Superseded
Headers show
Series slab: Introduce dedicated bucket allocator | expand

Commit Message

Kees Cook May 31, 2024, 7:14 p.m. UTC
Dedicated caches are available for fixed size allocations via
kmem_cache_alloc(), but for dynamically sized allocations there is only
the global kmalloc API's set of buckets available. This means it isn't
possible to separate specific sets of dynamically sized allocations into
a separate collection of caches.

This leads to a use-after-free exploitation weakness in the Linux
kernel since many heap memory spraying/grooming attacks depend on using
userspace-controllable dynamically sized allocations to collide with
fixed size allocations that end up in same cache.

While CONFIG_RANDOM_KMALLOC_CACHES provides a probabilistic defense
against these kinds of "type confusion" attacks, including for fixed
same-size heap objects, we can create a complementary deterministic
defense for dynamically sized allocations that are directly user
controlled. Addressing these cases is limited in scope, so isolating these
kinds of interfaces will not become an unbounded game of whack-a-mole. For
example, many pass through memdup_user(), making isolation there very
effective.

In order to isolate user-controllable dynamically-sized
allocations from the common system kmalloc allocations, introduce
kmem_buckets_create(), which behaves like kmem_cache_create(). Introduce
kmem_buckets_alloc(), which behaves like kmem_cache_alloc(). Introduce
kmem_buckets_alloc_track_caller() for where caller tracking is
needed. Introduce kmem_buckets_valloc() for cases where vmalloc fallback
is needed.

This can also be used in the future to extend allocation profiling's use
of code tagging to implement per-caller allocation cache isolation[1]
even for dynamic allocations.

Memory allocation pinning[2] is still needed to plug the Use-After-Free
cross-allocator weakness, but that is an existing and separate issue
which is complementary to this improvement. Development continues for
that feature via the SLAB_VIRTUAL[3] series (which could also provide
guard pages -- another complementary improvement).

Link: https://lore.kernel.org/lkml/202402211449.401382D2AF@keescook [1]
Link: https://googleprojectzero.blogspot.com/2021/10/how-simple-linux-kernel-memory.html [2]
Link: https://lore.kernel.org/lkml/20230915105933.495735-1-matteorizzo@google.com/ [3]
Signed-off-by: Kees Cook <kees@kernel.org>
---
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: jvoisin <julien.voisin@dustri.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: linux-mm@kvack.org
---
 include/linux/slab.h | 12 +++++++
 mm/slab_common.c     | 80 ++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 92 insertions(+)

Comments

Simon Horman June 4, 2024, 3:02 p.m. UTC | #1
On Fri, May 31, 2024 at 12:14:56PM -0700, Kees Cook wrote:

...

> diff --git a/mm/slab_common.c b/mm/slab_common.c
> index b5c879fa66bc..f42a98d368a9 100644
> --- a/mm/slab_common.c
> +++ b/mm/slab_common.c
> @@ -392,6 +392,82 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
>  }
>  EXPORT_SYMBOL(kmem_cache_create);
>  
> +static struct kmem_cache *kmem_buckets_cache __ro_after_init;
> +
> +kmem_buckets *kmem_buckets_create(const char *name, unsigned int align,
> +				  slab_flags_t flags,
> +				  unsigned int useroffset,
> +				  unsigned int usersize,
> +				  void (*ctor)(void *))
> +{
> +	kmem_buckets *b;
> +	int idx;
> +
> +	/*
> +	 * When the separate buckets API is not built in, just return
> +	 * a non-NULL value for the kmem_buckets pointer, which will be
> +	 * unused when performing allocations.
> +	 */
> +	if (!IS_ENABLED(CONFIG_SLAB_BUCKETS))
> +		return ZERO_SIZE_PTR;
> +
> +	if (WARN_ON(!kmem_buckets_cache))
> +		return NULL;
> +
> +	b = kmem_cache_alloc(kmem_buckets_cache, GFP_KERNEL|__GFP_ZERO);
> +	if (WARN_ON(!b))
> +		return NULL;
> +
> +	flags |= SLAB_NO_MERGE;
> +
> +	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> +		char *short_size, *cache_name;
> +		unsigned int cache_useroffset, cache_usersize;
> +		unsigned int size;
> +
> +		if (!kmalloc_caches[KMALLOC_NORMAL][idx])
> +			continue;
> +
> +		size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size;
> +		if (!size)
> +			continue;
> +
> +		short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, '-');
> +		if (WARN_ON(!short_size))
> +			goto fail;
> +
> +		cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
> +		if (WARN_ON(!cache_name))
> +			goto fail;
> +
> +		if (useroffset >= size) {
> +			cache_useroffset = 0;
> +			cache_usersize = 0;
> +		} else {
> +			cache_useroffset = useroffset;
> +			cache_usersize = min(size - cache_useroffset, usersize);
> +		}
> +		(*b)[idx] = kmem_cache_create_usercopy(cache_name, size,
> +					align, flags, cache_useroffset,
> +					cache_usersize, ctor);
> +		kfree(cache_name);
> +		if (WARN_ON(!(*b)[idx]))
> +			goto fail;
> +	}
> +
> +	return b;
> +
> +fail:
> +	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> +		if ((*b)[idx])
> +			kmem_cache_destroy((*b)[idx]);

nit: I don't think it is necessary to guard this with a check for NULL.

> +	}
> +	kfree(b);
> +
> +	return NULL;
> +}
> +EXPORT_SYMBOL(kmem_buckets_create);
Tycho Andersen June 4, 2024, 10:13 p.m. UTC | #2
On Tue, Jun 04, 2024 at 04:02:28PM +0100, Simon Horman wrote:
> On Fri, May 31, 2024 at 12:14:56PM -0700, Kees Cook wrote:
> > +	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> > +		char *short_size, *cache_name;
> > +		unsigned int cache_useroffset, cache_usersize;
> > +		unsigned int size;
> > +
> > +		if (!kmalloc_caches[KMALLOC_NORMAL][idx])
> > +			continue;
> > +
> > +		size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size;
> > +		if (!size)
> > +			continue;
> > +
> > +		short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, '-');
> > +		if (WARN_ON(!short_size))
> > +			goto fail;
> > +
> > +		cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
> > +		if (WARN_ON(!cache_name))
> > +			goto fail;
> > +
> > +		if (useroffset >= size) {
> > +			cache_useroffset = 0;
> > +			cache_usersize = 0;
> > +		} else {
> > +			cache_useroffset = useroffset;
> > +			cache_usersize = min(size - cache_useroffset, usersize);
> > +		}
> > +		(*b)[idx] = kmem_cache_create_usercopy(cache_name, size,
> > +					align, flags, cache_useroffset,
> > +					cache_usersize, ctor);
> > +		kfree(cache_name);
> > +		if (WARN_ON(!(*b)[idx]))
> > +			goto fail;
> > +	}
> > +
> > +	return b;
> > +
> > +fail:
> > +	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> > +		if ((*b)[idx])
> > +			kmem_cache_destroy((*b)[idx]);
> 
> nit: I don't think it is necessary to guard this with a check for NULL.

Isn't it? What if a kasprintf() fails halfway through the loop?

Tycho
Kees Cook June 5, 2024, 12:49 a.m. UTC | #3
On Tue, Jun 04, 2024 at 04:13:32PM -0600, Tycho Andersen wrote:
> On Tue, Jun 04, 2024 at 04:02:28PM +0100, Simon Horman wrote:
> > On Fri, May 31, 2024 at 12:14:56PM -0700, Kees Cook wrote:
> > > +	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> > > +		char *short_size, *cache_name;
> > > +		unsigned int cache_useroffset, cache_usersize;
> > > +		unsigned int size;
> > > +
> > > +		if (!kmalloc_caches[KMALLOC_NORMAL][idx])
> > > +			continue;
> > > +
> > > +		size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size;
> > > +		if (!size)
> > > +			continue;
> > > +
> > > +		short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, '-');
> > > +		if (WARN_ON(!short_size))
> > > +			goto fail;
> > > +
> > > +		cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
> > > +		if (WARN_ON(!cache_name))
> > > +			goto fail;
> > > +
> > > +		if (useroffset >= size) {
> > > +			cache_useroffset = 0;
> > > +			cache_usersize = 0;
> > > +		} else {
> > > +			cache_useroffset = useroffset;
> > > +			cache_usersize = min(size - cache_useroffset, usersize);
> > > +		}
> > > +		(*b)[idx] = kmem_cache_create_usercopy(cache_name, size,
> > > +					align, flags, cache_useroffset,
> > > +					cache_usersize, ctor);
> > > +		kfree(cache_name);
> > > +		if (WARN_ON(!(*b)[idx]))
> > > +			goto fail;
> > > +	}
> > > +
> > > +	return b;
> > > +
> > > +fail:
> > > +	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> > > +		if ((*b)[idx])
> > > +			kmem_cache_destroy((*b)[idx]);
> > 
> > nit: I don't think it is necessary to guard this with a check for NULL.
> 
> Isn't it? What if a kasprintf() fails halfway through the loop?

He means that kmem_cache_destroy() already checks for NULL. Quite right!

void kmem_cache_destroy(struct kmem_cache *s)
{
        int err = -EBUSY;
        bool rcu_set;

        if (unlikely(!s) || !kasan_check_byte(s))
                return;
Simon Horman June 5, 2024, 7:54 p.m. UTC | #4
On Tue, Jun 04, 2024 at 05:49:20PM -0700, Kees Cook wrote:
> On Tue, Jun 04, 2024 at 04:13:32PM -0600, Tycho Andersen wrote:
> > On Tue, Jun 04, 2024 at 04:02:28PM +0100, Simon Horman wrote:
> > > On Fri, May 31, 2024 at 12:14:56PM -0700, Kees Cook wrote:
> > > > +	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> > > > +		char *short_size, *cache_name;
> > > > +		unsigned int cache_useroffset, cache_usersize;
> > > > +		unsigned int size;
> > > > +
> > > > +		if (!kmalloc_caches[KMALLOC_NORMAL][idx])
> > > > +			continue;
> > > > +
> > > > +		size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size;
> > > > +		if (!size)
> > > > +			continue;
> > > > +
> > > > +		short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, '-');
> > > > +		if (WARN_ON(!short_size))
> > > > +			goto fail;
> > > > +
> > > > +		cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
> > > > +		if (WARN_ON(!cache_name))
> > > > +			goto fail;
> > > > +
> > > > +		if (useroffset >= size) {
> > > > +			cache_useroffset = 0;
> > > > +			cache_usersize = 0;
> > > > +		} else {
> > > > +			cache_useroffset = useroffset;
> > > > +			cache_usersize = min(size - cache_useroffset, usersize);
> > > > +		}
> > > > +		(*b)[idx] = kmem_cache_create_usercopy(cache_name, size,
> > > > +					align, flags, cache_useroffset,
> > > > +					cache_usersize, ctor);
> > > > +		kfree(cache_name);
> > > > +		if (WARN_ON(!(*b)[idx]))
> > > > +			goto fail;
> > > > +	}
> > > > +
> > > > +	return b;
> > > > +
> > > > +fail:
> > > > +	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
> > > > +		if ((*b)[idx])
> > > > +			kmem_cache_destroy((*b)[idx]);
> > > 
> > > nit: I don't think it is necessary to guard this with a check for NULL.
> > 
> > Isn't it? What if a kasprintf() fails halfway through the loop?
> 
> He means that kmem_cache_destroy() already checks for NULL. Quite right!
> 
> void kmem_cache_destroy(struct kmem_cache *s)
> {
>         int err = -EBUSY;
>         bool rcu_set;
> 
>         if (unlikely(!s) || !kasan_check_byte(s))
>                 return;

Yes, thanks. That is what I was referring to.
diff mbox series

Patch

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 8853c6eb20b4..b48c50d90aae 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -552,6 +552,11 @@  void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
 
 void kmem_cache_free(struct kmem_cache *s, void *objp);
 
+kmem_buckets *kmem_buckets_create(const char *name, unsigned int align,
+				  slab_flags_t flags,
+				  unsigned int useroffset, unsigned int usersize,
+				  void (*ctor)(void *));
+
 /*
  * Bulk allocation and freeing operations. These are accelerated in an
  * allocator specific way to avoid taking locks repeatedly or building
@@ -675,6 +680,12 @@  static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t f
 }
 #define kmalloc(...)				alloc_hooks(kmalloc_noprof(__VA_ARGS__))
 
+#define kmem_buckets_alloc(_b, _size, _flags)	\
+	alloc_hooks(__kmalloc_node_noprof(_b, _size, _flags, NUMA_NO_NODE))
+
+#define kmem_buckets_alloc_track_caller(_b, _size, _flags)	\
+	alloc_hooks(kmalloc_node_track_caller_noprof(_b, _size, _flags, NUMA_NO_NODE, _RET_IP_))
+
 static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
 {
 	if (__builtin_constant_p(size) && size) {
@@ -818,6 +829,7 @@  extern void *kvmalloc_buckets_node_noprof(size_t size, gfp_t flags, int node)
 #define kvzalloc(_size, _flags)			kvmalloc(_size, (_flags)|__GFP_ZERO)
 
 #define kvzalloc_node(_size, _flags, _node)	kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
+#define kmem_buckets_valloc(_b, _size, _flags)	kvmalloc_buckets_node(_b, _size, _flags, NUMA_NO_NODE)
 
 static inline __alloc_size(1, 2) void *
 kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b5c879fa66bc..f42a98d368a9 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -392,6 +392,82 @@  kmem_cache_create(const char *name, unsigned int size, unsigned int align,
 }
 EXPORT_SYMBOL(kmem_cache_create);
 
+static struct kmem_cache *kmem_buckets_cache __ro_after_init;
+
+kmem_buckets *kmem_buckets_create(const char *name, unsigned int align,
+				  slab_flags_t flags,
+				  unsigned int useroffset,
+				  unsigned int usersize,
+				  void (*ctor)(void *))
+{
+	kmem_buckets *b;
+	int idx;
+
+	/*
+	 * When the separate buckets API is not built in, just return
+	 * a non-NULL value for the kmem_buckets pointer, which will be
+	 * unused when performing allocations.
+	 */
+	if (!IS_ENABLED(CONFIG_SLAB_BUCKETS))
+		return ZERO_SIZE_PTR;
+
+	if (WARN_ON(!kmem_buckets_cache))
+		return NULL;
+
+	b = kmem_cache_alloc(kmem_buckets_cache, GFP_KERNEL|__GFP_ZERO);
+	if (WARN_ON(!b))
+		return NULL;
+
+	flags |= SLAB_NO_MERGE;
+
+	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
+		char *short_size, *cache_name;
+		unsigned int cache_useroffset, cache_usersize;
+		unsigned int size;
+
+		if (!kmalloc_caches[KMALLOC_NORMAL][idx])
+			continue;
+
+		size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size;
+		if (!size)
+			continue;
+
+		short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, '-');
+		if (WARN_ON(!short_size))
+			goto fail;
+
+		cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
+		if (WARN_ON(!cache_name))
+			goto fail;
+
+		if (useroffset >= size) {
+			cache_useroffset = 0;
+			cache_usersize = 0;
+		} else {
+			cache_useroffset = useroffset;
+			cache_usersize = min(size - cache_useroffset, usersize);
+		}
+		(*b)[idx] = kmem_cache_create_usercopy(cache_name, size,
+					align, flags, cache_useroffset,
+					cache_usersize, ctor);
+		kfree(cache_name);
+		if (WARN_ON(!(*b)[idx]))
+			goto fail;
+	}
+
+	return b;
+
+fail:
+	for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
+		if ((*b)[idx])
+			kmem_cache_destroy((*b)[idx]);
+	}
+	kfree(b);
+
+	return NULL;
+}
+EXPORT_SYMBOL(kmem_buckets_create);
+
 #ifdef SLAB_SUPPORTS_SYSFS
 /*
  * For a given kmem_cache, kmem_cache_destroy() should only be called
@@ -931,6 +1007,10 @@  void __init create_kmalloc_caches(void)
 
 	/* Kmalloc array is now usable */
 	slab_state = UP;
+
+	kmem_buckets_cache = kmem_cache_create("kmalloc_buckets",
+					       sizeof(kmem_buckets),
+					       0, 0, NULL);
 }
 
 /**