@@ -166,11 +166,11 @@ documentation. Note that `kvmalloc` may return memory that is not
physically contiguous.
If you need to allocate many identical objects you can use the slab
-cache allocator. The cache should be set up with kmem_cache_create() or
-kmem_cache_create_usercopy() before it can be used. The second function
-should be used if a part of the cache might be copied to the userspace.
-After the cache is created kmem_cache_alloc() and its convenience
-wrappers can allocate memory from that cache.
+cache allocator. The cache should be set up with kmem_cache_setup()
+before it can be used. The second function should be used if a part of
+the cache might be copied to the userspace. After the cache is created
+kmem_cache_alloc() and its convenience wrappers can allocate memory from
+that cache.
When the allocated memory is no longer needed it must be freed.
@@ -240,6 +240,27 @@ struct mem_cgroup;
*/
bool slab_is_available(void);
+/**
+ * @align: The required alignment for the objects.
+ * @useroffset: Usercopy region offset
+ * @usersize: Usercopy region size
+ * @freeptr_offset: Custom offset for the free pointer in RCU caches
+ * @use_freeptr_offset: Whether a @freeptr_offset is used
+ * @ctor: A constructor for the objects.
+ */
+struct kmem_cache_args {
+ unsigned int align;
+ unsigned int useroffset;
+ unsigned int usersize;
+ unsigned int freeptr_offset;
+ bool use_freeptr_offset;
+ void (*ctor)(void *);
+};
+
+struct kmem_cache *kmem_cache_setup(const char *name, unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags);
+
struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, slab_flags_t flags,
void (*ctor)(void *));
@@ -202,22 +202,22 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
}
static struct kmem_cache *create_cache(const char *name,
- unsigned int object_size, unsigned int freeptr_offset,
- unsigned int align, slab_flags_t flags,
- unsigned int useroffset, unsigned int usersize,
- void (*ctor)(void *))
+ unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
{
struct kmem_cache *s;
int err;
- if (WARN_ON(useroffset + usersize > object_size))
- useroffset = usersize = 0;
+ if (WARN_ON(args->useroffset + args->usersize > object_size))
+ args->useroffset = args->usersize = 0;
/* If a custom freelist pointer is requested make sure it's sane. */
err = -EINVAL;
- if (freeptr_offset != UINT_MAX &&
- (freeptr_offset >= object_size || !(flags & SLAB_TYPESAFE_BY_RCU) ||
- !IS_ALIGNED(freeptr_offset, sizeof(freeptr_t))))
+ if (args->use_freeptr_offset &&
+ (args->freeptr_offset >= object_size ||
+ !(flags & SLAB_TYPESAFE_BY_RCU) ||
+ !IS_ALIGNED(args->freeptr_offset, sizeof(freeptr_t))))
goto out;
err = -ENOMEM;
@@ -227,12 +227,15 @@ static struct kmem_cache *create_cache(const char *name,
s->name = name;
s->size = s->object_size = object_size;
- s->rcu_freeptr_offset = freeptr_offset;
- s->align = align;
- s->ctor = ctor;
+ if (args->use_freeptr_offset)
+ s->rcu_freeptr_offset = args->freeptr_offset;
+ else
+ s->rcu_freeptr_offset = UINT_MAX;
+ s->align = args->align;
+ s->ctor = args->ctor;
#ifdef CONFIG_HARDENED_USERCOPY
- s->useroffset = useroffset;
- s->usersize = usersize;
+ s->useroffset = args->useroffset;
+ s->usersize = args->usersize;
#endif
err = __kmem_cache_create(s, flags);
if (err)
@@ -248,12 +251,22 @@ static struct kmem_cache *create_cache(const char *name,
return ERR_PTR(err);
}
-static struct kmem_cache *
-do_kmem_cache_create_usercopy(const char *name,
- unsigned int size, unsigned int freeptr_offset,
- unsigned int align, slab_flags_t flags,
- unsigned int useroffset, unsigned int usersize,
- void (*ctor)(void *))
+/**
+ * kmem_cache_setup - Create a kmem cache
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @object_size: The size of objects to be created in this cache.
+ * @args: Arguments for the cache creation (see struct kmem_cache_args).
+ *
+ * Cannot be called within a interrupt, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache.
+ *
+ * See %SLAB_* flags for an explanation of individual @flags.
+ *
+ * Return: a pointer to the cache on success, NULL on failure.
+ */
+struct kmem_cache *kmem_cache_setup(const char *name, unsigned int object_size,
+ struct kmem_cache_args *args,
+ slab_flags_t flags)
{
struct kmem_cache *s = NULL;
const char *cache_name;
@@ -275,7 +288,7 @@ do_kmem_cache_create_usercopy(const char *name,
mutex_lock(&slab_mutex);
- err = kmem_cache_sanity_check(name, size);
+ err = kmem_cache_sanity_check(name, object_size);
if (err) {
goto out_unlock;
}
@@ -296,12 +309,14 @@ do_kmem_cache_create_usercopy(const char *name,
/* Fail closed on bad usersize of useroffset values. */
if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
- WARN_ON(!usersize && useroffset) ||
- WARN_ON(size < usersize || size - usersize < useroffset))
- usersize = useroffset = 0;
-
- if (!usersize)
- s = __kmem_cache_alias(name, size, align, flags, ctor);
+ WARN_ON(!args->usersize && args->useroffset) ||
+ WARN_ON(object_size < args->usersize ||
+ object_size - args->usersize < args->useroffset))
+ args->usersize = args->useroffset = 0;
+
+ if (!args->usersize)
+ s = __kmem_cache_alias(name, object_size, args->align, flags,
+ args->ctor);
if (s)
goto out_unlock;
@@ -311,9 +326,8 @@ do_kmem_cache_create_usercopy(const char *name,
goto out_unlock;
}
- s = create_cache(cache_name, size, freeptr_offset,
- calculate_alignment(flags, align, size),
- flags, useroffset, usersize, ctor);
+ args->align = calculate_alignment(flags, args->align, object_size);
+ s = create_cache(cache_name, object_size, args, flags);
if (IS_ERR(s)) {
err = PTR_ERR(s);
kfree_const(cache_name);
@@ -335,6 +349,7 @@ do_kmem_cache_create_usercopy(const char *name,
}
return s;
}
+EXPORT_SYMBOL(kmem_cache_setup);
/**
* kmem_cache_create_usercopy - Create a cache with a region suitable
@@ -370,8 +385,14 @@ kmem_cache_create_usercopy(const char *name, unsigned int size,
unsigned int useroffset, unsigned int usersize,
void (*ctor)(void *))
{
- return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
- useroffset, usersize, ctor);
+ return kmem_cache_setup(name, size,
+ &(struct kmem_cache_args){
+ .align = align,
+ .ctor = ctor,
+ .useroffset = useroffset,
+ .usersize = usersize,
+ },
+ flags);
}
EXPORT_SYMBOL(kmem_cache_create_usercopy);
@@ -404,8 +425,12 @@ struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
slab_flags_t flags, void (*ctor)(void *))
{
- return do_kmem_cache_create_usercopy(name, size, UINT_MAX, align, flags,
- 0, 0, ctor);
+ return kmem_cache_setup(name, size,
+ &(struct kmem_cache_args){
+ .align = align,
+ .ctor = ctor,
+ },
+ flags);
}
EXPORT_SYMBOL(kmem_cache_create);
@@ -442,9 +467,12 @@ struct kmem_cache *kmem_cache_create_rcu(const char *name, unsigned int size,
unsigned int freeptr_offset,
slab_flags_t flags)
{
- return do_kmem_cache_create_usercopy(name, size, freeptr_offset, 0,
- flags | SLAB_TYPESAFE_BY_RCU, 0, 0,
- NULL);
+ return kmem_cache_setup(name, size,
+ &(struct kmem_cache_args){
+ .freeptr_offset = freeptr_offset,
+ .use_freeptr_offset = true,
+ },
+ flags | SLAB_TYPESAFE_BY_RCU);
}
EXPORT_SYMBOL(kmem_cache_create_rcu);
Replace the custom kmem_cache_*() functions with a unified kmem_cache_setup() function that is based on struct kmem_cache_args and will replace kmem_cache_create(), kmem_cache_create_usercopy(), and kmem_cache_create_rcu(). The @name, @object_size, and @flags parameters are passed separately as they are nearly universally used. The rest of the arguments moves into struct kmem_cache_args. A new "use_freeptr_offset" boolean is added as zero is a valid freelist pointer offset. The boolean allows callers to avoid having to do anything special if they don't care about freelist pointer offsets (most callers don't). Signed-off-by: Christian Brauner <brauner@kernel.org> --- Documentation/core-api/memory-allocation.rst | 10 +-- include/linux/slab.h | 21 ++++++ mm/slab_common.c | 102 +++++++++++++++++---------- 3 files changed, 91 insertions(+), 42 deletions(-)